mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
19 Commits
v6.0.5-rc.
...
hashtree-i
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3b00cacef4 | ||
|
|
6f89cb5330 | ||
|
|
e315f78b97 | ||
|
|
536df4b682 | ||
|
|
1b98d8c202 | ||
|
|
11ca766ed6 | ||
|
|
cd6cc76d58 | ||
|
|
fc4a1469f0 | ||
|
|
f3dc4c283e | ||
|
|
6ddf271688 | ||
|
|
af7afba26e | ||
|
|
b740a4ff83 | ||
|
|
385c2224e8 | ||
|
|
04b39d1a4d | ||
|
|
4c40caf7fd | ||
|
|
bc209cadab | ||
|
|
856742ff68 | ||
|
|
abe16a9cb4 | ||
|
|
77958022e7 |
31
BUILD.bazel
31
BUILD.bazel
@@ -1,8 +1,9 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@bazel_gazelle//:def.bzl", "gazelle")
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "nogo")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "nogo")
|
||||
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
|
||||
load("@prysm//tools/nogo_config:def.bzl", "nogo_config_exclude")
|
||||
|
||||
@@ -282,3 +283,31 @@ sh_binary(
|
||||
srcs = ["prysm.sh"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"compare_states.go",
|
||||
"reproduce_bug.go",
|
||||
"verify_fix.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "v6",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
@@ -415,6 +415,10 @@ load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
|
||||
|
||||
bls_dependencies()
|
||||
|
||||
load("@prysm//third_party/hashtree:hashtree.bzl", "hashtree_dependencies")
|
||||
|
||||
hashtree_dependencies()
|
||||
|
||||
load("@prysm//testing/endtoend:deps.bzl", "e2e_deps")
|
||||
|
||||
e2e_deps()
|
||||
|
||||
@@ -2,18 +2,28 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["common.go"],
|
||||
srcs = [
|
||||
"common.go",
|
||||
"header.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/apiutil",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//consensus-types/primitives:go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["common_test.go"],
|
||||
srcs = [
|
||||
"common_test.go",
|
||||
"header_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
122
api/apiutil/header.go
Normal file
122
api/apiutil/header.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package apiutil
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type mediaRange struct {
|
||||
mt string // canonicalised media‑type, e.g. "application/json"
|
||||
q float64 // quality factor (0‑1)
|
||||
raw string // original string – useful for logging/debugging
|
||||
spec int // 2=exact, 1=type/*, 0=*/*
|
||||
}
|
||||
|
||||
func parseMediaRange(field string) (mediaRange, bool) {
|
||||
field = strings.TrimSpace(field)
|
||||
|
||||
mt, params, err := mime.ParseMediaType(field)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to parse header field")
|
||||
return mediaRange{}, false
|
||||
}
|
||||
|
||||
r := mediaRange{mt: mt, q: 1, spec: 2, raw: field}
|
||||
|
||||
if qs, ok := params["q"]; ok {
|
||||
v, err := strconv.ParseFloat(qs, 64)
|
||||
if err != nil || v < 0 || v > 1 {
|
||||
log.WithField("q", qs).Debug("Invalid quality factor (0‑1)")
|
||||
return mediaRange{}, false // skip invalid entry
|
||||
}
|
||||
r.q = v
|
||||
}
|
||||
|
||||
switch {
|
||||
case mt == "*/*":
|
||||
r.spec = 0
|
||||
case strings.HasSuffix(mt, "/*"):
|
||||
r.spec = 1
|
||||
}
|
||||
return r, true
|
||||
}
|
||||
|
||||
func hasExplicitQ(r mediaRange) bool {
|
||||
return strings.Contains(strings.ToLower(r.raw), ";q=")
|
||||
}
|
||||
|
||||
// ParseAccept returns media ranges sorted by q (desc) then specificity.
|
||||
func ParseAccept(header string) []mediaRange {
|
||||
if header == "" {
|
||||
return []mediaRange{{mt: "*/*", q: 1, spec: 0, raw: "*/*"}}
|
||||
}
|
||||
|
||||
var out []mediaRange
|
||||
for _, field := range strings.Split(header, ",") {
|
||||
if r, ok := parseMediaRange(field); ok {
|
||||
out = append(out, r)
|
||||
}
|
||||
}
|
||||
|
||||
sort.SliceStable(out, func(i, j int) bool {
|
||||
ei, ej := hasExplicitQ(out[i]), hasExplicitQ(out[j])
|
||||
if ei != ej {
|
||||
return ei // explicit beats implicit
|
||||
}
|
||||
if out[i].q != out[j].q {
|
||||
return out[i].q > out[j].q
|
||||
}
|
||||
return out[i].spec > out[j].spec
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
// Matches reports whether content type is acceptable per the header.
|
||||
func Matches(header, ct string) bool {
|
||||
for _, r := range ParseAccept(header) {
|
||||
switch {
|
||||
case r.q == 0:
|
||||
continue
|
||||
case r.mt == "*/*":
|
||||
return true
|
||||
case strings.HasSuffix(r.mt, "/*"):
|
||||
if strings.HasPrefix(ct, r.mt[:len(r.mt)-1]) {
|
||||
return true
|
||||
}
|
||||
case r.mt == ct:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Negotiate selects the best server type according to the header.
|
||||
// Returns the chosen type and true, or "", false when nothing matches.
|
||||
func Negotiate(header string, serverTypes []string) (string, bool) {
|
||||
for _, r := range ParseAccept(header) {
|
||||
if r.q == 0 {
|
||||
continue
|
||||
}
|
||||
for _, s := range serverTypes {
|
||||
if Matches(r.mt, s) {
|
||||
return s, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// PrimaryAcceptMatches only checks if the first accept matches
|
||||
func PrimaryAcceptMatches(header, produced string) bool {
|
||||
for _, r := range ParseAccept(header) {
|
||||
if r.q == 0 {
|
||||
continue // explicitly unacceptable – skip
|
||||
}
|
||||
return Matches(r.mt, produced)
|
||||
}
|
||||
return false
|
||||
}
|
||||
174
api/apiutil/header_test.go
Normal file
174
api/apiutil/header_test.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package apiutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestParseAccept(t *testing.T) {
|
||||
type want struct {
|
||||
mt string
|
||||
q float64
|
||||
spec int
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
header string
|
||||
want []want
|
||||
}{
|
||||
{
|
||||
name: "empty header becomes */*;q=1",
|
||||
header: "",
|
||||
want: []want{{mt: "*/*", q: 1, spec: 0}},
|
||||
},
|
||||
{
|
||||
name: "quality ordering then specificity",
|
||||
header: "application/json;q=0.2, */*;q=0.1, application/xml;q=0.5, text/*;q=0.5",
|
||||
want: []want{
|
||||
{mt: "application/xml", q: 0.5, spec: 2},
|
||||
{mt: "text/*", q: 0.5, spec: 1},
|
||||
{mt: "application/json", q: 0.2, spec: 2},
|
||||
{mt: "*/*", q: 0.1, spec: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid pieces are skipped",
|
||||
header: "text/plain; q=boom, application/json",
|
||||
want: []want{{mt: "application/json", q: 1, spec: 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := ParseAccept(tc.header)
|
||||
gotProjected := make([]want, len(got))
|
||||
for i, g := range got {
|
||||
gotProjected[i] = want{mt: g.mt, q: g.q, spec: g.spec}
|
||||
}
|
||||
require.DeepEqual(t, gotProjected, tc.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatches(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
accept string
|
||||
ct string
|
||||
matches bool
|
||||
}{
|
||||
{"exact match", "application/json", "application/json", true},
|
||||
{"type wildcard", "application/*;q=0.8", "application/xml", true},
|
||||
{"global wildcard", "*/*;q=0.1", "image/png", true},
|
||||
{"explicitly unacceptable (q=0)", "text/*;q=0", "text/plain", false},
|
||||
{"no match", "image/png", "application/json", false},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := Matches(tc.accept, tc.ct)
|
||||
require.Equal(t, tc.matches, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNegotiate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
accept string
|
||||
serverTypes []string
|
||||
wantType string
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
name: "highest quality wins",
|
||||
accept: "application/json;q=0.8,application/xml;q=0.9",
|
||||
serverTypes: []string{"application/json", "application/xml"},
|
||||
wantType: "application/xml",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard matches first server type",
|
||||
accept: "*/*;q=0.5",
|
||||
serverTypes: []string{"application/octet-stream", "application/json"},
|
||||
wantType: "application/octet-stream",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "no acceptable type",
|
||||
accept: "image/png",
|
||||
serverTypes: []string{"application/json"},
|
||||
wantType: "",
|
||||
ok: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, ok := Negotiate(tc.accept, tc.serverTypes)
|
||||
require.Equal(t, tc.ok, ok)
|
||||
require.Equal(t, tc.wantType, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrimaryAcceptMatches(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
accept string
|
||||
produced string
|
||||
expect bool
|
||||
}{
|
||||
{
|
||||
name: "prefers json",
|
||||
accept: "application/json;q=0.9,application/xml",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard application beats other wildcard",
|
||||
accept: "application/*;q=0.2,*/*;q=0.1",
|
||||
produced: "application/xml",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json wins",
|
||||
accept: "application/xml;q=0.8,application/json;q=0.9",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json loses",
|
||||
accept: "application/xml;q=0.8,application/json;q=0.9,application/octet-stream;q=0.99",
|
||||
produced: "application/json",
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "json wins with non q option",
|
||||
accept: "application/xml;q=0.8,image/png,application/json;q=0.9",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json not primary",
|
||||
accept: "image/png,application/json",
|
||||
produced: "application/json",
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "absent header",
|
||||
accept: "",
|
||||
produced: "text/plain",
|
||||
expect: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := PrimaryAcceptMatches(tc.accept, tc.produced)
|
||||
require.Equal(t, got, tc.expect)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/apiutil"
|
||||
"github.com/rs/cors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -74,42 +75,10 @@ func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
|
||||
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
// header is optional and should skip if not provided
|
||||
if acceptHeader == "" {
|
||||
next.ServeHTTP(w, r)
|
||||
if _, ok := apiutil.Negotiate(r.Header.Get("Accept"), serverAcceptedTypes); !ok {
|
||||
http.Error(w, "Not Acceptable", http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
accepted := false
|
||||
acceptTypes := strings.Split(acceptHeader, ",")
|
||||
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
|
||||
for _, acceptType := range acceptTypes {
|
||||
acceptType = strings.TrimSpace(acceptType)
|
||||
if acceptType == "*/*" {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
for _, serverAcceptedType := range serverAcceptedTypes {
|
||||
if strings.HasPrefix(acceptType, serverAcceptedType) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if accepted {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !accepted {
|
||||
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -283,3 +283,10 @@ type GetPendingPartialWithdrawalsResponse struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingPartialWithdrawal `json:"data"`
|
||||
}
|
||||
|
||||
type GetProposerLookaheadResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` // validator indexes
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@ type Identity struct {
|
||||
type Metadata struct {
|
||||
SeqNumber string `json:"seq_number"`
|
||||
Attnets string `json:"attnets"`
|
||||
Syncnets string `json:"syncnets,omitempty"`
|
||||
Cgc string `json:"custody_group_count,omitempty"`
|
||||
}
|
||||
|
||||
type GetPeerResponse struct {
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/trusted_setup_4096.json
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/presets/mainnet/trusted_setups/trusted_setup_4096.json
|
||||
//go:embed trusted_setup_4096.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
|
||||
@@ -666,10 +666,9 @@ func (s *Service) areDataColumnsAvailable(
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS.
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.GetData().Slot + 1
|
||||
if err := slots.VerifyTime(s.genesisTime, nextSlot, disparity); err != nil {
|
||||
continue
|
||||
|
||||
@@ -15,7 +15,6 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
|
||||
}
|
||||
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
// (It is only a wrapper around ReceiveDataColumns.)
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
|
||||
@@ -206,9 +206,9 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
|
||||
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
|
||||
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
|
||||
// value defined:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#configuration
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#configuration
|
||||
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
|
||||
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
func MinEpochsForBlockRequests() primitives.Epoch {
|
||||
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
|
||||
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)
|
||||
|
||||
@@ -292,7 +292,7 @@ func TestMinEpochsForBlockRequests(t *testing.T) {
|
||||
params.SetActiveTestCleanup(t, params.MainnetConfig())
|
||||
var expected primitives.Epoch = 33024
|
||||
// expected value of 33024 via spec commentary:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
|
||||
//
|
||||
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
|
||||
|
||||
@@ -41,17 +41,17 @@ const (
|
||||
// CustodyGroups computes the custody groups the node should participate in for custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#get_custody_groups
|
||||
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error) {
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
// Check if the custody group count is larger than the number of custody groups.
|
||||
if custodyGroupCount > numberOfCustodyGroup {
|
||||
if custodyGroupCount > numberOfCustodyGroups {
|
||||
return nil, ErrCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
// Shortcut if all custody groups are needed.
|
||||
if custodyGroupCount == numberOfCustodyGroup {
|
||||
custodyGroups := make([]uint64, 0, numberOfCustodyGroup)
|
||||
for i := range numberOfCustodyGroup {
|
||||
if custodyGroupCount == numberOfCustodyGroups {
|
||||
custodyGroups := make([]uint64, 0, numberOfCustodyGroups)
|
||||
for i := range numberOfCustodyGroups {
|
||||
custodyGroups = append(custodyGroups, i)
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the custody group ID.
|
||||
custodyGroup := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroup
|
||||
custodyGroup := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroups
|
||||
|
||||
// Add the custody group to the map.
|
||||
if !custodyGroupsMap[custodyGroup] {
|
||||
@@ -88,9 +88,6 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
// Increment the current ID.
|
||||
currentId.Add(currentId, one)
|
||||
}
|
||||
|
||||
// Sort the custody groups.
|
||||
slices.Sort[[]uint64](custodyGroups)
|
||||
}
|
||||
|
||||
// Final check.
|
||||
@@ -98,6 +95,9 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
return nil, errWrongComputedCustodyGroupCount
|
||||
}
|
||||
|
||||
// Sort the custody groups.
|
||||
slices.Sort[[]uint64](custodyGroups)
|
||||
|
||||
return custodyGroups, nil
|
||||
}
|
||||
|
||||
@@ -105,19 +105,19 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
|
||||
if custodyGroup >= numberOfCustodyGroup {
|
||||
if custodyGroup >= numberOfCustodyGroups {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroup
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
for i := range columnsPerGroup {
|
||||
column := numberOfCustodyGroup*i + custodyGroup
|
||||
column := numberOfCustodyGroups*i + custodyGroup
|
||||
columns = append(columns, column)
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#get_data_column_sidecars
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
@@ -151,7 +151,7 @@ func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsA
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
return nil, errors.Wrap(err, "merkle proof KZG commitments")
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := dataColumnsSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
@@ -219,6 +219,7 @@ func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
// the KZG commitment includion proofs and cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/validator.md#get_data_column_sidecars
|
||||
func dataColumnsSidecars(
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
blobKzgCommitments [][]byte,
|
||||
|
||||
@@ -17,8 +17,8 @@ func TestCustodyGroups(t *testing.T) {
|
||||
// --------------------------------------------
|
||||
// The happy path is unit tested in spec tests.
|
||||
// --------------------------------------------
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
_, err := peerdas.CustodyGroups(enode.ID{}, numberOfCustodyGroup+1)
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
_, err := peerdas.CustodyGroups(enode.ID{}, numberOfCustodyGroups+1)
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupCountTooLarge)
|
||||
}
|
||||
|
||||
@@ -26,8 +26,8 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
// --------------------------------------------
|
||||
// The happy path is unit tested in spec tests.
|
||||
// --------------------------------------------
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
_, err := peerdas.ComputeColumnsForCustodyGroup(numberOfCustodyGroup)
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
_, err := peerdas.ComputeColumnsForCustodyGroup(numberOfCustodyGroups)
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
|
||||
}
|
||||
|
||||
|
||||
@@ -123,7 +123,7 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
|
||||
// ConstructDataColumnSidecars constructs data column sidecars from a block, (un-extended) blobs and
|
||||
// cell proofs corresponding the extended blobs. The main purpose of this function is to
|
||||
// construct data columns sidecars from data obtained from the execution client via:
|
||||
// construct data column sidecars from data obtained from the execution client via:
|
||||
// - `engine_getBlobsV2` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getblobsv2, or
|
||||
// - `engine_getPayloadV5` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getpayloadv5
|
||||
// Note: In this function, to stick with the `BlobsBundleV2` format returned by the execution client in `engine_getPayloadV5`,
|
||||
@@ -222,8 +222,8 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
|
||||
// Check if the data column sidecars are aligned with the block.
|
||||
dataColumnSidecars := make([]blocks.RODataColumn, 0, len(verifiedDataColumnSidecars))
|
||||
for _, verifiedDataColumnSidecar := range verifiedDataColumnSidecars {
|
||||
dataColumnSicecar := verifiedDataColumnSidecar.RODataColumn
|
||||
dataColumnSidecars = append(dataColumnSidecars, dataColumnSicecar)
|
||||
dataColumnSidecar := verifiedDataColumnSidecar.RODataColumn
|
||||
dataColumnSidecars = append(dataColumnSidecars, dataColumnSidecar)
|
||||
}
|
||||
|
||||
if err := DataColumnsAlignWithBlock(block, dataColumnSidecars); err != nil {
|
||||
@@ -241,7 +241,7 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
|
||||
return blobSidecars, nil
|
||||
}
|
||||
|
||||
// We need to reconstruct the blobs.
|
||||
// We need to reconstruct the data column sidecars.
|
||||
reconstructedDataColumnSidecars, err := ReconstructDataColumnSidecars(verifiedDataColumnSidecars)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "reconstruct data column sidecars")
|
||||
|
||||
@@ -196,6 +196,26 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
|
||||
})
|
||||
|
||||
t.Run("consecutive duplicates", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
// [0, 1, 1, 3, 4, ...]
|
||||
verifiedRoSidecars[2] = verifiedRoSidecars[1]
|
||||
|
||||
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
|
||||
})
|
||||
|
||||
t.Run("non-consecutive duplicates", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
// [0, 1, 2, 1, 4, ...]
|
||||
verifiedRoSidecars[3] = verifiedRoSidecars[1]
|
||||
|
||||
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
|
||||
})
|
||||
|
||||
t.Run("not enough columns", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
|
||||
@@ -21,10 +21,10 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
|
||||
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
|
||||
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroup), nil
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
|
||||
}
|
||||
|
||||
@@ -126,11 +126,11 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, curre
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return errors.Wrap(err, "valid")
|
||||
return errors.Wrap(err, "valid fields")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
@@ -164,7 +164,7 @@ func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, bl
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current spoch.
|
||||
// Compute the current epoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
|
||||
@@ -251,7 +251,7 @@ func (dcs *DataColumnStorage) Summary(root [fieldparams.RootLength]byte) DataCol
|
||||
}
|
||||
|
||||
// Save saves data column sidecars into the database and asynchronously performs pruning.
|
||||
// The returned chanel is closed when the pruning is complete.
|
||||
// The returned channel is closed when the pruning is complete.
|
||||
func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
@@ -266,8 +266,7 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
|
||||
return errWrongNumberOfColumns
|
||||
}
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
dataColumnSidecarsbyRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
dataColumnSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
|
||||
// Group data column sidecars by root.
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
@@ -278,23 +277,20 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
|
||||
|
||||
// Group data column sidecars by root.
|
||||
root := dataColumnSidecar.BlockRoot()
|
||||
dataColumnSidecarsbyRoot[root] = append(dataColumnSidecarsbyRoot[root], dataColumnSidecar)
|
||||
dataColumnSidecarsByRoot[root] = append(dataColumnSidecarsByRoot[root], dataColumnSidecar)
|
||||
}
|
||||
|
||||
for root, dataColumnSidecars := range dataColumnSidecarsbyRoot {
|
||||
for root, dataColumnSidecars := range dataColumnSidecarsByRoot {
|
||||
// Safety check all data column sidecars for this root are from the same slot.
|
||||
firstSlot := dataColumnSidecars[0].SignedBlockHeader.Header.Slot
|
||||
slot := dataColumnSidecars[0].Slot()
|
||||
for _, dataColumnSidecar := range dataColumnSidecars[1:] {
|
||||
if dataColumnSidecar.SignedBlockHeader.Header.Slot != firstSlot {
|
||||
if dataColumnSidecar.Slot() != slot {
|
||||
return errDataColumnSidecarsFromDifferentSlots
|
||||
}
|
||||
}
|
||||
|
||||
// Set the highest epoch.
|
||||
epoch := slots.ToEpoch(dataColumnSidecars[0].Slot())
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
// Save data columns in the filesystem.
|
||||
epoch := slots.ToEpoch(slot)
|
||||
if err := dcs.saveFilesystem(root, epoch, dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "save filesystem")
|
||||
}
|
||||
@@ -306,7 +302,7 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
|
||||
}
|
||||
|
||||
// Compute the data columns ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: root, Epoch: slots.ToEpoch(dataColumnSidecars[0].Slot()), Indices: indices}
|
||||
dataColumnsIdent := DataColumnsIdent{Root: root, Epoch: epoch, Indices: indices}
|
||||
|
||||
// Set data columns in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
|
||||
@@ -20,7 +20,7 @@ File organisation
|
||||
The remaining 7 bits (from 0 to 127) represent the index of the data column.
|
||||
This sentinel bit is needed to distinguish between the column with index 0 and no column.
|
||||
Example: If the column with index 5 is in the 3th position in the file, then indices[5] = 0x80 + 0x03 = 0x83.
|
||||
- The rest of the file is a repeat of the SSZ encoded data columns sidecars.
|
||||
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
|
||||
|
||||
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
@@ -75,7 +75,7 @@ data-columns
|
||||
|
||||
Computation of the maximum size of a DataColumnSidecar
|
||||
------------------------------------------------------
|
||||
https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#datacolumnsidecar
|
||||
https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar
|
||||
|
||||
|
||||
class DataColumnSidecar(Container):
|
||||
|
||||
@@ -699,6 +699,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
DiscoveryDir: filepath.Join(dataDir, "discovery"),
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
|
||||
@@ -104,6 +104,7 @@ go_library(
|
||||
"@com_github_libp2p_go_mplex//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
|
||||
@@ -27,6 +27,7 @@ type Config struct {
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
DiscoveryDir string
|
||||
MetaDataDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -544,7 +545,7 @@ func (s *Service) createLocalNode(
|
||||
ipAddr net.IP,
|
||||
udpPort, tcpPort, quicPort int,
|
||||
) (*enode.LocalNode, error) {
|
||||
db, err := enode.OpenDB("")
|
||||
db, err := enode.OpenDB(s.cfg.DiscoveryDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not open node's peer database")
|
||||
}
|
||||
@@ -603,7 +604,10 @@ func (s *Service) createLocalNode(
|
||||
localNode.SetFallbackIP(firstIP)
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"seq": localNode.Seq(),
|
||||
"id": localNode.ID(),
|
||||
}).Debug("Local node created")
|
||||
return localNode, nil
|
||||
}
|
||||
|
||||
@@ -619,7 +623,11 @@ func (s *Service) startDiscoveryV5(
|
||||
return nil, errors.Wrap(err, "could not create listener")
|
||||
}
|
||||
record := wrappedListener.Self()
|
||||
log.WithField("ENR", record.String()).Info("Started discovery v5")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ENR": record.String(),
|
||||
"seq": record.Seq(),
|
||||
}).Info("Started discovery v5")
|
||||
return wrappedListener, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -264,6 +264,7 @@ func TestRebootDiscoveryListener(t *testing.T) {
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
@@ -293,6 +294,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{},
|
||||
}
|
||||
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -495,6 +497,35 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
|
||||
assert.Equal(t, true, ipv6Found, "IPv6 discovery address not found")
|
||||
}
|
||||
|
||||
func TestDiscoveryV5_SeqNumber(t *testing.T) {
|
||||
db, err := enode.OpenDB(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
_, key := createAddrAndPrivKey(t)
|
||||
node := enode.NewLocalNode(db, key)
|
||||
node.Set(enr.IPv4{127, 0, 0, 1})
|
||||
currentSeq := node.Seq()
|
||||
s := &Service{dv5Listener: mockListener{localNode: node}}
|
||||
_, err = s.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
newSeq := node.Seq()
|
||||
require.Equal(t, currentSeq+1, newSeq) // node seq should increase when discovery starts
|
||||
|
||||
// see that the keys changing, will change the node seq
|
||||
_, keyTwo := createAddrAndPrivKey(t)
|
||||
nodeTwo := enode.NewLocalNode(db, keyTwo) // use the same db with different key
|
||||
nodeTwo.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
|
||||
seqTwo := nodeTwo.Seq()
|
||||
assert.NotEqual(t, seqTwo, newSeq)
|
||||
sTwo := &Service{dv5Listener: mockListener{localNode: nodeTwo}}
|
||||
_, err = sTwo.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, seqTwo+1, nodeTwo.Seq())
|
||||
|
||||
// see that reloading the same node with same key and db results in same seq number
|
||||
nodeThree := enode.NewLocalNode(db, key)
|
||||
assert.Equal(t, node.Seq(), nodeThree.Seq())
|
||||
}
|
||||
|
||||
func TestCorrectUDPVersion(t *testing.T) {
|
||||
assert.Equal(t, udp4, udpVersionFromIP(net.IPv4zero), "incorrect network version")
|
||||
assert.Equal(t, udp6, udpVersionFromIP(net.IPv6zero), "incorrect network version")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
Package p2p implements the Ethereum consensus networking specification.
|
||||
|
||||
Canonical spec reference: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md
|
||||
Canonical spec reference: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md
|
||||
|
||||
Prysm specific implementation design docs
|
||||
- Networking Design Doc: https://docs.google.com/document/d/1VyhobQRkEjEkEPxmmdWvaHfKWn0j6dEae_wLZlrFtfU/view
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -79,8 +80,8 @@ func (s *Service) disconnectFromPeerOnError(
|
||||
// and validating the response from the peer.
|
||||
func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Context, id peer.ID) error) {
|
||||
// Peer map and lock to keep track of current connection attempts.
|
||||
var peerLock sync.Mutex
|
||||
peerMap := make(map[peer.ID]bool)
|
||||
peerLock := new(sync.Mutex)
|
||||
|
||||
// This is run at the start of each connection attempt, to ensure
|
||||
// that there aren't multiple inflight connection requests for the
|
||||
@@ -108,6 +109,19 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(_ network.Network, conn network.Conn) {
|
||||
remotePeer := conn.RemotePeer()
|
||||
log := log.WithField("peer", remotePeer)
|
||||
direction := conn.Stat().Direction
|
||||
|
||||
// For some reason, right after a disconnection, this `ConnectedF` callback
|
||||
// is called. We want to avoid processing this connection if the peer was
|
||||
// disconnected too recently and if we are at the initiative of this connection.
|
||||
// This is very probably a bug in libp2p.
|
||||
if direction == network.DirOutbound {
|
||||
if err := s.wasDisconnectedTooRecently(remotePeer); err != nil {
|
||||
log.WithError(err).Debug("Skipping connection handler")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Connection handler must be non-blocking as part of libp2p design.
|
||||
go func() {
|
||||
@@ -133,53 +147,56 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
return
|
||||
}
|
||||
|
||||
// Do not perform handshake on inbound dials.
|
||||
if conn.Stat().Direction == network.DirInbound {
|
||||
_, err := s.peers.ChainState(remotePeer)
|
||||
peerExists := err == nil
|
||||
currentTime := prysmTime.Now()
|
||||
|
||||
// Wait for peer to initiate handshake
|
||||
time.Sleep(timeForStatus)
|
||||
|
||||
// Exit if we are disconnected with the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) != network.Connected {
|
||||
if direction != network.DirInbound {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
return
|
||||
}
|
||||
|
||||
// If peer hasn't sent a status request, we disconnect with them
|
||||
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
|
||||
statusMessageMissing.Inc()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
|
||||
return
|
||||
}
|
||||
|
||||
if peerExists {
|
||||
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
|
||||
if err != nil {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
|
||||
return
|
||||
}
|
||||
|
||||
// Exit if we don't receive any current status messages from peer.
|
||||
if updated.IsZero() {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
|
||||
return
|
||||
}
|
||||
|
||||
if updated.Before(currentTime) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.connectToPeer(conn)
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
// The connection is inbound.
|
||||
_, err = s.peers.ChainState(remotePeer)
|
||||
peerExists := err == nil
|
||||
currentTime := prysmTime.Now()
|
||||
|
||||
// Wait for peer to initiate handshake
|
||||
time.Sleep(timeForStatus)
|
||||
|
||||
// Exit if we are disconnected with the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) != network.Connected {
|
||||
return
|
||||
}
|
||||
|
||||
// If peer hasn't sent a status request, we disconnect with them
|
||||
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
|
||||
statusMessageMissing.Inc()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
|
||||
return
|
||||
}
|
||||
|
||||
if !peerExists {
|
||||
s.connectToPeer(conn)
|
||||
return
|
||||
}
|
||||
|
||||
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
|
||||
if err != nil {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
|
||||
return
|
||||
}
|
||||
|
||||
// Exit if we don't receive any current status messages from peer.
|
||||
if updated.IsZero() {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
|
||||
return
|
||||
}
|
||||
|
||||
if updated.Before(currentTime) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -220,6 +237,12 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.Disconnected)
|
||||
if err := s.peerDisconnectionTime.Add(peerID.String(), time.Now(), cache.DefaultExpiration); err != nil {
|
||||
// The `DisconnectedF` funcition already called for this peer less than `cache.DefaultExpiration` ago. Skip.
|
||||
// (Very probably a bug in libp2p.)
|
||||
log.WithError(err).Trace("Failed to set peer disconnection time")
|
||||
return
|
||||
}
|
||||
|
||||
// Only log disconnections if we were fully connected.
|
||||
if priorState == peers.Connected {
|
||||
@@ -231,6 +254,28 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
|
||||
})
|
||||
}
|
||||
|
||||
// wasDisconnectedTooRecently checks if the peer was disconnected within the last second.
|
||||
func (s *Service) wasDisconnectedTooRecently(peerID peer.ID) error {
|
||||
const disconnectionDurationThreshold = 1 * time.Second
|
||||
|
||||
peerDisconnectionTimeObj, ok := s.peerDisconnectionTime.Get(peerID.String())
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
peerDisconnectionTime, ok := peerDisconnectionTimeObj.(time.Time)
|
||||
if !ok {
|
||||
return errors.New("invalid peer disconnection time type")
|
||||
}
|
||||
|
||||
timeSinceDisconnection := time.Since(peerDisconnectionTime)
|
||||
if timeSinceDisconnection < disconnectionDurationThreshold {
|
||||
return errors.Errorf("peer %s was disconnected too recently: %s", peerID, timeSinceDisconnection)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func agentString(pid peer.ID, hst host.Host) string {
|
||||
rawVersion, storeErr := hst.Peerstore().Get(pid, agentVersionKey)
|
||||
|
||||
|
||||
@@ -101,21 +101,24 @@ func (s *BadResponsesScorer) countNoLock(pid peer.ID) (int, error) {
|
||||
|
||||
// Increment increments the number of bad responses we have received from the given remote peer.
|
||||
// If peer doesn't exist this method is no-op.
|
||||
func (s *BadResponsesScorer) Increment(pid peer.ID) {
|
||||
func (s *BadResponsesScorer) Increment(pid peer.ID) int {
|
||||
if pid == "" {
|
||||
return
|
||||
return 0
|
||||
}
|
||||
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
s.store.SetPeerData(pid, &peerdata.PeerData{
|
||||
BadResponses: 1,
|
||||
})
|
||||
return
|
||||
if ok {
|
||||
peerData.BadResponses++
|
||||
return peerData.BadResponses
|
||||
}
|
||||
peerData.BadResponses++
|
||||
|
||||
const badResponses = 1
|
||||
peerData = &peerdata.PeerData{BadResponses: badResponses}
|
||||
s.store.SetPeerData(pid, peerData)
|
||||
return badResponses
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -86,6 +87,7 @@ type Service struct {
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
}
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
@@ -115,16 +117,17 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ipLimiter := leakybucket.NewCollector(ipLimit, ipBurst, 30*time.Second, true /* deleteEmptyBuckets */)
|
||||
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: cfg,
|
||||
addrFilter: addrFilter,
|
||||
ipLimiter: ipLimiter,
|
||||
privKey: privKey,
|
||||
metaData: metaData,
|
||||
isPreGenesis: true,
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: cfg,
|
||||
addrFilter: addrFilter,
|
||||
ipLimiter: ipLimiter,
|
||||
privKey: privKey,
|
||||
metaData: metaData,
|
||||
isPreGenesis: true,
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
peerDisconnectionTime: cache.New(1*time.Second, 1*time.Minute),
|
||||
}
|
||||
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
@@ -408,7 +411,10 @@ func (s *Service) pingPeersAndLogEnr() {
|
||||
defer s.pingMethodLock.RUnlock()
|
||||
|
||||
localENR := s.dv5Listener.Self()
|
||||
log.WithField("ENR", localENR).Info("New node record")
|
||||
log.WithFields(logrus.Fields{
|
||||
"ENR": localENR,
|
||||
"seq": localENR.Seq(),
|
||||
}).Info("New node record")
|
||||
|
||||
if s.pingMethod == nil {
|
||||
return
|
||||
@@ -486,14 +492,17 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
|
||||
if info.ID == s.host.ID() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.Peers().IsBad(info.ID); err != nil {
|
||||
return errors.Wrap(err, "refused to connect to bad peer")
|
||||
return errors.Wrap(err, "bad peer")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
defer cancel()
|
||||
|
||||
if err := s.host.Connect(ctx, info); err != nil {
|
||||
s.Peers().Scorers().BadResponsesScorer().Increment(info.ID)
|
||||
return err
|
||||
s.downscorePeer(info.ID, "connectionError")
|
||||
return errors.Wrap(err, "peer connect")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -524,3 +533,8 @@ func (s *Service) connectToBootnodes() error {
|
||||
func (s *Service) isInitialized() bool {
|
||||
return !s.genesisTime.IsZero() && len(s.genesisValidatorsRoot) == 32
|
||||
}
|
||||
|
||||
func (s *Service) downscorePeer(peerID peer.ID, reason string) {
|
||||
newScore := s.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
@@ -403,7 +403,7 @@ func TestService_connectWithPeer(t *testing.T) {
|
||||
return ps
|
||||
}(),
|
||||
info: peer.AddrInfo{ID: "bad"},
|
||||
wantErr: "refused to connect to bad peer",
|
||||
wantErr: "bad peer",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -181,6 +181,11 @@ func (s *Service) findPeersWithSubnets(
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
node := iterator.Node()
|
||||
|
||||
if !s.filterPeer(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeSubnets, err := filter(node)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter node")
|
||||
|
||||
@@ -985,6 +985,16 @@ func (s *Service) beaconEndpoints(
|
||||
handler: server.GetPendingPartialWithdrawals,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/proposer_lookahead",
|
||||
name: namespace + ".GetProposerLookahead",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetProposerLookahead,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ func Test_endpoints(t *testing.T) {
|
||||
"/eth/v1/beacon/states/{state_id}/pending_deposits": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/pending_partial_withdrawals": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/pending_consolidations": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/proposer_lookahead": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blinded_blocks": {http.MethodPost},
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"handlers_state.go",
|
||||
"handlers_validator.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"server.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/beacon",
|
||||
@@ -61,6 +62,8 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -123,6 +126,7 @@ go_test(
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_stretchr_testify//mock:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
@@ -658,6 +659,12 @@ func (s *Server) PublishBlock(w http.ResponseWriter, r *http.Request) {
|
||||
// broadcast all given signed blobs. The broadcast behaviour may be adjusted via the
|
||||
// `broadcast_validation` query parameter.
|
||||
func (s *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
duration := time.Since(start).Milliseconds()
|
||||
publishBlockV2Duration.Observe(float64(duration))
|
||||
}()
|
||||
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.PublishBlockV2")
|
||||
defer span.End()
|
||||
if shared.IsSyncing(r.Context(), w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
@@ -1790,6 +1797,63 @@ func (s *Server) GetPendingPartialWithdrawals(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) GetProposerLookahead(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetProposerLookahead")
|
||||
defer span.End()
|
||||
|
||||
stateId := r.PathValue("state_id")
|
||||
if stateId == "" {
|
||||
httputil.HandleError(w, "state_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
st, err := s.Stater.State(ctx, []byte(stateId))
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
if st.Version() < version.Fulu {
|
||||
httputil.HandleError(w, "state_id is prior to fulu", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
pl, err := st.ProposerLookahead()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get proposer look ahead: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(st.Version()))
|
||||
if httputil.RespondWithSsz(r) {
|
||||
sszLen := (*primitives.ValidatorIndex)(nil).SizeSSZ()
|
||||
sszData := make([]byte, len(pl)*sszLen)
|
||||
for i, idx := range pl {
|
||||
copy(sszData[i*sszLen:(i+1)*sszLen], ssz.MarshalUint64([]byte{}, uint64(idx)))
|
||||
}
|
||||
httputil.WriteSsz(w, sszData)
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not calculate root of latest block header: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
|
||||
vi := make([]string, len(pl))
|
||||
for i, v := range pl {
|
||||
vi[i] = strconv.FormatUint(uint64(v), 10)
|
||||
}
|
||||
resp := structs.GetProposerLookaheadResponse{
|
||||
Version: version.String(st.Version()),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: isFinalized,
|
||||
Data: vi,
|
||||
}
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
}
|
||||
|
||||
// SerializeItems serializes a slice of items, each of which implements the MarshalSSZ method,
|
||||
// into a single byte array.
|
||||
func serializeItems[T interface{ MarshalSSZ() ([]byte, error) }](items []T) ([]byte, error) {
|
||||
|
||||
@@ -43,6 +43,7 @@ import (
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/mock"
|
||||
@@ -5363,3 +5364,190 @@ func TestGetPendingPartialWithdrawals(t *testing.T) {
|
||||
require.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProposerLookahead(t *testing.T) {
|
||||
numValidators := 50
|
||||
// Create a Fulu state with proposer lookahead data
|
||||
st, _ := util.DeterministicGenesisStateFulu(t, uint64(numValidators))
|
||||
lookaheadSize := int(params.BeaconConfig().MinSeedLookahead+1) * int(params.BeaconConfig().SlotsPerEpoch)
|
||||
lookahead := make([]primitives.ValidatorIndex, lookaheadSize)
|
||||
for i := 0; i < lookaheadSize; i++ {
|
||||
lookahead[i] = primitives.ValidatorIndex(i % numValidators) // Cycle through validators
|
||||
}
|
||||
|
||||
require.NoError(t, st.SetProposerLookahead(lookahead))
|
||||
|
||||
chainService := &chainMock.ChainService{
|
||||
Optimistic: false,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
server := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
t.Run("json response", func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
req.SetPathValue("state_id", "head")
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
server.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
require.Equal(t, "fulu", rec.Header().Get(api.VersionHeader))
|
||||
|
||||
var resp structs.GetProposerLookaheadResponse
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
|
||||
|
||||
expectedVersion := version.String(st.Version())
|
||||
require.Equal(t, expectedVersion, resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
|
||||
// Verify the data
|
||||
require.Equal(t, lookaheadSize, len(resp.Data))
|
||||
for i := 0; i < lookaheadSize; i++ {
|
||||
expectedIdx := strconv.FormatUint(uint64(i%numValidators), 10)
|
||||
require.Equal(t, expectedIdx, resp.Data[i])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ssz response", func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
req.SetPathValue("state_id", "head")
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
server.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
require.Equal(t, "fulu", rec.Header().Get(api.VersionHeader))
|
||||
responseBytes := rec.Body.Bytes()
|
||||
validatorIndexSize := (*primitives.ValidatorIndex)(nil).SizeSSZ()
|
||||
require.Equal(t, len(responseBytes), validatorIndexSize*lookaheadSize)
|
||||
|
||||
recoveredIndices := make([]primitives.ValidatorIndex, lookaheadSize)
|
||||
for i := 0; i < lookaheadSize; i++ {
|
||||
start := i * validatorIndexSize
|
||||
end := start + validatorIndexSize
|
||||
|
||||
idx := ssz.UnmarshallUint64(responseBytes[start:end])
|
||||
recoveredIndices[i] = primitives.ValidatorIndex(idx)
|
||||
}
|
||||
require.DeepEqual(t, lookahead, recoveredIndices)
|
||||
})
|
||||
|
||||
t.Run("pre fulu state", func(t *testing.T) {
|
||||
preEplusSt, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
preFuluServer := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: preEplusSt,
|
||||
},
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
// Test JSON request
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
req.SetPathValue("state_id", "head")
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
preFuluServer.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
|
||||
var errResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &errResp))
|
||||
require.Equal(t, "state_id is prior to fulu", errResp.Message)
|
||||
|
||||
// Test SSZ request
|
||||
sszReq := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
sszReq.Header.Set("Accept", "application/octet-stream")
|
||||
sszReq.SetPathValue("state_id", "head")
|
||||
sszRec := httptest.NewRecorder()
|
||||
sszRec.Body = new(bytes.Buffer)
|
||||
|
||||
preFuluServer.GetProposerLookahead(sszRec, sszReq)
|
||||
require.Equal(t, http.StatusBadRequest, sszRec.Code)
|
||||
|
||||
var sszErrResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(sszRec.Body.Bytes(), &sszErrResp))
|
||||
require.Equal(t, "state_id is prior to fulu", sszErrResp.Message)
|
||||
})
|
||||
|
||||
t.Run("missing state_id parameter", func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
server.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
|
||||
var errResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &errResp))
|
||||
require.Equal(t, "state_id is required in URL params", errResp.Message)
|
||||
})
|
||||
|
||||
t.Run("optimistic node", func(t *testing.T) {
|
||||
optimisticChainService := &chainMock.ChainService{
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
optimisticServer := &Server{
|
||||
Stater: server.Stater,
|
||||
OptimisticModeFetcher: optimisticChainService,
|
||||
FinalizationFetcher: optimisticChainService,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
req.SetPathValue("state_id", "head")
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
optimisticServer.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
|
||||
var resp structs.GetProposerLookaheadResponse
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
|
||||
require.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("finalized node", func(t *testing.T) {
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedChainService := &chainMock.ChainService{
|
||||
Optimistic: false,
|
||||
FinalizedRoots: map[[32]byte]bool{blockRoot: true},
|
||||
}
|
||||
finalizedServer := &Server{
|
||||
Stater: server.Stater,
|
||||
OptimisticModeFetcher: finalizedChainService,
|
||||
FinalizationFetcher: finalizedChainService,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
req.SetPathValue("state_id", "head")
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
finalizedServer.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
|
||||
var resp structs.GetProposerLookaheadResponse
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
|
||||
require.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
16
beacon-chain/rpc/eth/beacon/metrics.go
Normal file
16
beacon-chain/rpc/eth/beacon/metrics.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
publishBlockV2Duration = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "publish_block_v2_duration_milliseconds",
|
||||
Help: "Duration of publishBlockV2 endpoint processing in milliseconds",
|
||||
Buckets: []float64{1, 5, 20, 100, 500, 1000, 2000, 5000},
|
||||
},
|
||||
)
|
||||
)
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
"//network/forks:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetDepositContract retrieves deposit contract address and genesis fork version.
|
||||
@@ -80,39 +82,108 @@ func GetSpec(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, &structs.GetSpecResponse{Data: data})
|
||||
}
|
||||
|
||||
func prepareConfigSpec() (map[string]string, error) {
|
||||
data := make(map[string]string)
|
||||
func convertValueForJSON(v reflect.Value, tag string) interface{} {
|
||||
// Unwrap pointers / interfaces
|
||||
for v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
// ===== Single byte → 0xAB =====
|
||||
case reflect.Uint8:
|
||||
return hexutil.Encode([]byte{uint8(v.Uint())})
|
||||
|
||||
// ===== Other unsigned numbers → "123" =====
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return strconv.FormatUint(v.Uint(), 10)
|
||||
|
||||
// ===== Signed numbers → "123" =====
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(v.Int(), 10)
|
||||
|
||||
// ===== Raw bytes – encode to hex =====
|
||||
case reflect.Slice:
|
||||
if v.Type().Elem().Kind() == reflect.Uint8 {
|
||||
return hexutil.Encode(v.Bytes())
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Array:
|
||||
if v.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// Need a copy because v.Slice is illegal on arrays directly
|
||||
tmp := make([]byte, v.Len())
|
||||
reflect.Copy(reflect.ValueOf(tmp), v)
|
||||
return hexutil.Encode(tmp)
|
||||
}
|
||||
// Generic slice/array handling
|
||||
n := v.Len()
|
||||
out := make([]interface{}, n)
|
||||
for i := 0; i < n; i++ {
|
||||
out[i] = convertValueForJSON(v.Index(i), tag)
|
||||
}
|
||||
return out
|
||||
|
||||
// ===== Struct =====
|
||||
case reflect.Struct:
|
||||
t := v.Type()
|
||||
m := make(map[string]interface{}, v.NumField())
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if !v.Field(i).CanInterface() {
|
||||
continue // unexported
|
||||
}
|
||||
key := f.Tag.Get("json")
|
||||
if key == "" || key == "-" {
|
||||
key = f.Name
|
||||
}
|
||||
m[key] = convertValueForJSON(v.Field(i), tag)
|
||||
}
|
||||
return m
|
||||
|
||||
// ===== Default =====
|
||||
default:
|
||||
log.WithFields(log.Fields{
|
||||
"fn": "prepareConfigSpec",
|
||||
"tag": tag,
|
||||
"kind": v.Kind().String(),
|
||||
"type": v.Type().String(),
|
||||
}).Error("Unsupported config field kind; value forwarded verbatim")
|
||||
return v.Interface()
|
||||
}
|
||||
}
|
||||
|
||||
func prepareConfigSpec() (map[string]interface{}, error) {
|
||||
data := make(map[string]interface{})
|
||||
config := *params.BeaconConfig()
|
||||
|
||||
t := reflect.TypeOf(config)
|
||||
v := reflect.ValueOf(config)
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
tField := t.Field(i)
|
||||
_, isSpecField := tField.Tag.Lookup("spec")
|
||||
if !isSpecField {
|
||||
// Field should not be returned from API.
|
||||
_, isSpec := tField.Tag.Lookup("spec")
|
||||
if !isSpec {
|
||||
continue
|
||||
}
|
||||
if shouldSkip(tField) {
|
||||
continue
|
||||
}
|
||||
|
||||
tagValue := strings.ToUpper(tField.Tag.Get("yaml"))
|
||||
vField := v.Field(i)
|
||||
switch vField.Kind() {
|
||||
case reflect.Int:
|
||||
data[tagValue] = strconv.FormatInt(vField.Int(), 10)
|
||||
case reflect.Uint64:
|
||||
data[tagValue] = strconv.FormatUint(vField.Uint(), 10)
|
||||
case reflect.Slice:
|
||||
data[tagValue] = hexutil.Encode(vField.Bytes())
|
||||
case reflect.Array:
|
||||
data[tagValue] = hexutil.Encode(reflect.ValueOf(&config).Elem().Field(i).Slice(0, vField.Len()).Bytes())
|
||||
case reflect.String:
|
||||
data[tagValue] = vField.String()
|
||||
case reflect.Uint8:
|
||||
data[tagValue] = hexutil.Encode([]byte{uint8(vField.Uint())})
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported config field type: %s", vField.Kind().String())
|
||||
}
|
||||
tag := strings.ToUpper(tField.Tag.Get("yaml"))
|
||||
val := v.Field(i)
|
||||
data[tag] = convertValueForJSON(val, tag)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func shouldSkip(tField reflect.StructField) bool {
|
||||
// Dynamically skip blob schedule if Fulu is not yet scheduled.
|
||||
if params.BeaconConfig().FuluForkEpoch == math.MaxUint64 &&
|
||||
tField.Type == reflect.TypeOf(params.BeaconConfig().BlobSchedule) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
@@ -200,7 +201,7 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 175, len(data))
|
||||
assert.Equal(t, 176, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -577,6 +578,11 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "102", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
|
||||
assert.Equal(t, "103", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
// BLOB_SCHEDULE should be an empty slice when no schedule is defined
|
||||
blobSchedule, ok := v.([]interface{})
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, 0, len(blobSchedule))
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
@@ -637,3 +643,86 @@ func TestForkSchedule_Ok(t *testing.T) {
|
||||
assert.Equal(t, os.Len(), len(resp.Data))
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSpec_BlobSchedule(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.FuluForkEpoch = 1
|
||||
|
||||
// Set up a blob schedule with test data
|
||||
config.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{
|
||||
Epoch: primitives.Epoch(100),
|
||||
MaxBlobsPerBlock: 6,
|
||||
},
|
||||
{
|
||||
Epoch: primitives.Epoch(200),
|
||||
MaxBlobsPerBlock: 9,
|
||||
},
|
||||
}
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/spec", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
GetSpec(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := structs.GetSpecResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// Verify BLOB_SCHEDULE is present and properly formatted
|
||||
blobScheduleValue, exists := data["BLOB_SCHEDULE"]
|
||||
require.Equal(t, true, exists)
|
||||
|
||||
// Verify it's a slice of maps (actual JSON object, not string)
|
||||
// The JSON unmarshaling converts it to []interface{} with map[string]interface{} entries
|
||||
blobScheduleSlice, ok := blobScheduleValue.([]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// Convert to generic interface for easier testing
|
||||
var blobSchedule []map[string]interface{}
|
||||
for _, entry := range blobScheduleSlice {
|
||||
entryMap, ok := entry.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
blobSchedule = append(blobSchedule, entryMap)
|
||||
}
|
||||
|
||||
// Verify the blob schedule content
|
||||
require.Equal(t, 2, len(blobSchedule))
|
||||
|
||||
// Check first entry - values should be strings for consistent API output
|
||||
assert.Equal(t, "100", blobSchedule[0]["EPOCH"])
|
||||
assert.Equal(t, "6", blobSchedule[0]["MAX_BLOBS_PER_BLOCK"])
|
||||
|
||||
// Check second entry - values should be strings for consistent API output
|
||||
assert.Equal(t, "200", blobSchedule[1]["EPOCH"])
|
||||
assert.Equal(t, "9", blobSchedule[1]["MAX_BLOBS_PER_BLOCK"])
|
||||
}
|
||||
|
||||
func TestGetSpec_BlobSchedule_NotFulu(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
// Fulu not scheduled (default: math.MaxUint64)
|
||||
config.FuluForkEpoch = math.MaxUint64
|
||||
config.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: primitives.Epoch(100), MaxBlobsPerBlock: 6},
|
||||
}
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/spec", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
GetSpec(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := structs.GetSpecResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
_, exists := data["BLOB_SCHEDULE"]
|
||||
require.Equal(t, false, exists)
|
||||
}
|
||||
|
||||
@@ -19,12 +19,14 @@ go_library(
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -47,6 +49,7 @@ go_test(
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
|
||||
@@ -9,10 +9,12 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
@@ -75,17 +77,25 @@ func (s *Server) GetIdentity(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, "Could not obtain enr: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(s.GenesisTimeFetcher.CurrentSlot())
|
||||
metadata := s.MetadataProvider.Metadata()
|
||||
md := &structs.Metadata{
|
||||
SeqNumber: strconv.FormatUint(s.MetadataProvider.MetadataSeq(), 10),
|
||||
Attnets: hexutil.Encode(metadata.AttnetsBitfield()),
|
||||
}
|
||||
if currentEpoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
md.Syncnets = hexutil.Encode(metadata.SyncnetsBitfield())
|
||||
}
|
||||
if currentEpoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
md.Cgc = strconv.FormatUint(metadata.CustodyGroupCount(), 10)
|
||||
}
|
||||
resp := &structs.GetIdentityResponse{
|
||||
Data: &structs.Identity{
|
||||
PeerId: peerId,
|
||||
Enr: "enr:" + serializedEnr,
|
||||
P2PAddresses: p2pAddresses,
|
||||
DiscoveryAddresses: discoveryAddresses,
|
||||
Metadata: &structs.Metadata{
|
||||
SeqNumber: strconv.FormatUint(s.MetadataProvider.MetadataSeq(), 10),
|
||||
Attnets: hexutil.Encode(s.MetadataProvider.Metadata().AttnetsBitfield()),
|
||||
},
|
||||
Metadata: md,
|
||||
},
|
||||
}
|
||||
httputil.WriteJson(w, resp)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
mockp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
|
||||
syncmock "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
@@ -144,7 +145,14 @@ func TestGetIdentity(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
attnets := bitfield.NewBitvector64()
|
||||
attnets.SetBitAt(1, true)
|
||||
metadataProvider := &mockp2p.MockMetadataProvider{Data: wrapper.WrappedMetadataV0(&pb.MetaDataV0{SeqNumber: 1, Attnets: attnets})}
|
||||
syncnets := bitfield.NewBitvector4()
|
||||
syncnets.SetBitAt(1, true)
|
||||
metadataProvider := &mockp2p.MockMetadataProvider{Data: wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: 1,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodyGroupCount: 2,
|
||||
})}
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
peerManager := &mockp2p.MockPeerManager{
|
||||
@@ -154,8 +162,9 @@ func TestGetIdentity(t *testing.T) {
|
||||
DiscoveryAddr: []ma.Multiaddr{discAddr1, discAddr2},
|
||||
}
|
||||
s := &Server{
|
||||
PeerManager: peerManager,
|
||||
MetadataProvider: metadataProvider,
|
||||
PeerManager: peerManager,
|
||||
MetadataProvider: metadataProvider,
|
||||
GenesisTimeFetcher: &mock.ChainService{},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/identity", nil)
|
||||
@@ -187,6 +196,33 @@ func TestGetIdentity(t *testing.T) {
|
||||
assert.Equal(t, discAddr1.String(), resp.Data.DiscoveryAddresses[0])
|
||||
assert.Equal(t, discAddr2.String(), resp.Data.DiscoveryAddresses[1])
|
||||
})
|
||||
t.Run("OK Fulu", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
peerManager := &mockp2p.MockPeerManager{
|
||||
Enr: enrRecord,
|
||||
PID: "foo",
|
||||
BHost: &mockp2p.MockHost{Addresses: []ma.Multiaddr{p2pAddr}},
|
||||
DiscoveryAddr: []ma.Multiaddr{discAddr1, discAddr2},
|
||||
}
|
||||
s := &Server{
|
||||
PeerManager: peerManager,
|
||||
MetadataProvider: metadataProvider,
|
||||
GenesisTimeFetcher: &mock.ChainService{},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/identity", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetIdentity(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetIdentityResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, "2", resp.Data.Metadata.Cgc)
|
||||
})
|
||||
|
||||
t.Run("ENR failure", func(t *testing.T) {
|
||||
peerManager := &mockp2p.MockPeerManager{
|
||||
|
||||
@@ -118,8 +118,9 @@ func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *h
|
||||
consensusBlockValue, httpError := getConsensusBlockValue(ctx, s.BlockRewardFetcher, v1alpha1resp.Block)
|
||||
if httpError != nil {
|
||||
log.WithError(httpError).Debug("Failed to get consensus block value")
|
||||
// Having the consensus block value is not critical to block production
|
||||
consensusBlockValue = ""
|
||||
// Having the consensus block value is not critical to block production.
|
||||
// We set it to zero to satisfy the specification, which requires a numeric value.
|
||||
consensusBlockValue = "0"
|
||||
}
|
||||
|
||||
w.Header().Set(api.ExecutionPayloadBlindedHeader, fmt.Sprintf("%v", v1alpha1resp.IsBlinded))
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
rewardtesting "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/rewards/testing"
|
||||
rpctesting "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared/testing"
|
||||
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
mock2 "github.com/OffchainLabs/prysm/v6/testing/mock"
|
||||
@@ -408,6 +409,82 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
require.Equal(t, "electra", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10000000000", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
t.Run("Fulu", func(t *testing.T) {
|
||||
var block *structs.SignedBeaconBlockContentsFulu
|
||||
err := json.Unmarshal([]byte(rpctesting.FuluBlockContents), &block)
|
||||
require.NoError(t, err)
|
||||
jsonBytes, err := json.Marshal(block.ToUnsigned())
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), ð.BlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: bRandao,
|
||||
Graffiti: bGraffiti,
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
b, err := block.ToUnsigned().ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: syncChecker,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BlockRewardFetcher: rewardFetcher,
|
||||
}
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s&graffiti=%s", randao, graffiti), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"fulu","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10000000000","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "fulu", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10000000000", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
t.Run("Blinded Fulu", func(t *testing.T) {
|
||||
var block *structs.SignedBlindedBeaconBlockFulu
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedFuluBlock), &block)
|
||||
require.NoError(t, err)
|
||||
jsonBytes, err := json.Marshal(block.Message)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), ð.BlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: bRandao,
|
||||
Graffiti: bGraffiti,
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: syncChecker,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BlockRewardFetcher: rewardFetcher,
|
||||
}
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s&graffiti=%s", randao, graffiti), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"fulu","execution_payload_blinded":true,"execution_payload_value":"2000","consensus_block_value":"10000000000","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "true", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "fulu", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10000000000", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
t.Run("invalid query parameter slot empty", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
server := &Server{
|
||||
@@ -461,6 +538,46 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing and not serving request on that endpoint"))
|
||||
})
|
||||
t.Run("0 block value is returned on error", func(t *testing.T) {
|
||||
rewardFetcher := &rewardtesting.MockBlockRewardFetcher{Error: &httputil.DefaultJsonError{}}
|
||||
|
||||
var block *structs.SignedBeaconBlockContentsFulu
|
||||
err := json.Unmarshal([]byte(rpctesting.FuluBlockContents), &block)
|
||||
require.NoError(t, err)
|
||||
jsonBytes, err := json.Marshal(block.ToUnsigned())
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), ð.BlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: bRandao,
|
||||
Graffiti: bGraffiti,
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
b, err := block.ToUnsigned().ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: syncChecker,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BlockRewardFetcher: rewardFetcher,
|
||||
}
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s&graffiti=%s", randao, graffiti), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"fulu","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"0","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "fulu", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
@@ -960,4 +1077,47 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
require.Equal(t, "fulu", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10000000000", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
t.Run("0 block value is returned on error", func(t *testing.T) {
|
||||
rewardFetcher := &rewardtesting.MockBlockRewardFetcher{Error: &httputil.DefaultJsonError{}}
|
||||
|
||||
var block *structs.SignedBeaconBlockContentsFulu
|
||||
err := json.Unmarshal([]byte(rpctesting.FuluBlockContents), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), ð.BlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: bRandao,
|
||||
Graffiti: bGraffiti,
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
b, err := block.ToUnsigned().ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: syncChecker,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BlockRewardFetcher: rewardFetcher,
|
||||
}
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s&graffiti=%s", randao, graffiti), nil)
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
g, err := block.ToUnsigned().ToGeneric()
|
||||
require.NoError(t, err)
|
||||
bl, ok := g.Block.(*eth.GenericBeaconBlock_Fulu)
|
||||
require.Equal(t, true, ok)
|
||||
ssz, err := bl.Fulu.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "fulu", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -44,9 +44,9 @@ type StateNotFoundError struct {
|
||||
}
|
||||
|
||||
// NewStateNotFoundError creates a new error instance.
|
||||
func NewStateNotFoundError(stateRootsSize int) StateNotFoundError {
|
||||
func NewStateNotFoundError(stateRootsSize int, stateRoot []byte) StateNotFoundError {
|
||||
return StateNotFoundError{
|
||||
message: fmt.Sprintf("state not found in the last %d state roots", stateRootsSize),
|
||||
message: fmt.Sprintf("state not found in the last %d state roots, looking for state root: %#x", stateRootsSize, stateRoot),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,7 +221,7 @@ func (p *BeaconDbStater) stateByRoot(ctx context.Context, stateRoot []byte) (sta
|
||||
}
|
||||
}
|
||||
|
||||
stateNotFoundErr := NewStateNotFoundError(len(headState.StateRoots()))
|
||||
stateNotFoundErr := NewStateNotFoundError(len(headState.StateRoots()), stateRoot)
|
||||
return nil, &stateNotFoundErr
|
||||
}
|
||||
|
||||
|
||||
@@ -418,8 +418,9 @@ func TestGetStateRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewStateNotFoundError(t *testing.T) {
|
||||
e := NewStateNotFoundError(100)
|
||||
assert.Equal(t, "state not found in the last 100 state roots", e.message)
|
||||
stateRoot := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20}
|
||||
e := NewStateNotFoundError(100, stateRoot)
|
||||
assert.Equal(t, "state not found in the last 100 state roots, looking for state root: 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20", e.message)
|
||||
}
|
||||
|
||||
func TestStateBySlot_FutureSlot(t *testing.T) {
|
||||
|
||||
@@ -315,7 +315,7 @@ func (bs *Server) ListIndexedAttestationsElectra(
|
||||
// that it was included in a block. The attestation may have expired.
|
||||
// Refer to the ethereum consensus specification for more details on how
|
||||
// attestations are processed and when they are no longer valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#attestations
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#attestations
|
||||
func (bs *Server) AttestationPool(_ context.Context, req *ethpb.AttestationPoolRequest) (*ethpb.AttestationPoolResponse, error) {
|
||||
var atts []*ethpb.Attestation
|
||||
var err error
|
||||
|
||||
@@ -262,7 +262,7 @@ func (vs *Server) activationStatus(
|
||||
// It cannot faithfully attest to the head block of the chain, since it has not fully verified that block.
|
||||
//
|
||||
// Spec:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/sync/optimistic.md
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/sync/optimistic.md
|
||||
func (vs *Server) optimisticStatus(ctx context.Context) error {
|
||||
if slots.ToEpoch(vs.TimeFetcher.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return nil
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
@@ -211,7 +212,7 @@ func (s *Service) importBatches(ctx context.Context) {
|
||||
_, err := s.batchImporter(ctx, current, ib, s.store)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(ib.logFields()).Debug("Backfill batch failed to import")
|
||||
s.downscore(ib)
|
||||
s.downscorePeer(ib.blockPid, "backfillBatchImportError")
|
||||
s.batchSeq.update(ib.withState(batchErrRetryable))
|
||||
// If a batch fails, the subsequent batches are no longer considered importable.
|
||||
break
|
||||
@@ -336,10 +337,6 @@ func (s *Service) initBatches() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) downscore(b batch) {
|
||||
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(b.blockPid)
|
||||
}
|
||||
|
||||
func (*Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
@@ -383,3 +380,8 @@ func (s *Service) WaitForCompletion() error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) downscorePeer(peerID peer.ID, reason string) {
|
||||
newScore := s.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
@@ -21,9 +21,9 @@ const (
|
||||
broadcastMissingDataColumnsSlack = 2 * time.Second
|
||||
)
|
||||
|
||||
// reconstructSaveBroadcastDataColumnSidecars reconstructs if possible and
|
||||
// needed all data column sidecars. Then, it saves into the store missing
|
||||
// sidecars. After a delay, it broadcasts in the background not seen via gossip
|
||||
// reconstructSaveBroadcastDataColumnSidecars reconstructs, if possible,
|
||||
// all data column sidecars. Then, it saves missing sidecars to the store.
|
||||
// After a delay, it broadcasts in the background not seen via gossip
|
||||
// (but reconstructed) sidecars.
|
||||
func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
ctx context.Context,
|
||||
@@ -33,15 +33,15 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
) error {
|
||||
startTime := time.Now()
|
||||
|
||||
// Lock to prevent concurrent reconstructions.
|
||||
s.reconstructionLock.Lock()
|
||||
defer s.reconstructionLock.Unlock()
|
||||
|
||||
// Get the columns we store.
|
||||
storedDataColumns := s.cfg.dataColumnStorage.Summary(root)
|
||||
storedColumnsCount := storedDataColumns.Count()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Lock to prevent concurrent reconstructions.
|
||||
s.reconstructionLock.Lock()
|
||||
defer s.reconstructionLock.Unlock()
|
||||
|
||||
// If reconstruction is not possible or if all columns are already stored, exit early.
|
||||
if storedColumnsCount < peerdas.MinimumColumnsCountToReconstruct() || storedColumnsCount == numberOfColumns {
|
||||
return nil
|
||||
@@ -55,7 +55,7 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Load all the possible data columns sidecars, to minimize reconstruction time.
|
||||
// Load all the possible data column sidecars, to minimize reconstruction time.
|
||||
verifiedSidecars, err := s.cfg.dataColumnStorage.Get(root, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get data column sidecars")
|
||||
@@ -76,7 +76,7 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
}
|
||||
}
|
||||
|
||||
// Save the data columns sidecars in the database.
|
||||
// Save the data column sidecars to the database.
|
||||
// Note: We do not call `receiveDataColumn`, because it will ignore
|
||||
// incoming data columns via gossip while we did not broadcast (yet) the reconstructed data columns.
|
||||
if err := s.cfg.dataColumnStorage.Save(toSaveSidecars); err != nil {
|
||||
@@ -95,7 +95,7 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
"reconstructionAndSaveDuration": time.Since(startTime),
|
||||
}).Debug("Data columns reconstructed and saved")
|
||||
|
||||
// Update reconstruction metrics
|
||||
// Update reconstruction metrics.
|
||||
dataColumnReconstructionHistogram.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
dataColumnReconstructionCounter.Add(float64(len(reconstructedSidecars) - len(verifiedSidecars)))
|
||||
|
||||
|
||||
@@ -337,14 +337,15 @@ func (q *blocksQueue) onDataReceivedEvent(ctx context.Context) eventHandlerFn {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if errors.Is(response.err, beaconsync.ErrInvalidFetchedData) {
|
||||
// Peer returned invalid data, penalize.
|
||||
q.blocksFetcher.p2p.Peers().Scorers().BadResponsesScorer().Increment(response.blocksFrom)
|
||||
log.WithField("pid", response.blocksFrom).Debug("Peer is penalized for invalid blocks")
|
||||
} else if errors.Is(response.err, verification.ErrBlobInvalid) {
|
||||
q.blocksFetcher.p2p.Peers().Scorers().BadResponsesScorer().Increment(response.blobsFrom)
|
||||
log.WithField("pid", response.blobsFrom).Debug("Peer is penalized for invalid blob response")
|
||||
q.downscorePeer(response.blocksFrom, "invalidBlocks")
|
||||
}
|
||||
|
||||
if errors.Is(response.err, verification.ErrBlobInvalid) {
|
||||
q.downscorePeer(response.blobsFrom, "invalidBlobs")
|
||||
}
|
||||
|
||||
return m.state, response.err
|
||||
}
|
||||
m.fetched = *response
|
||||
@@ -455,6 +456,11 @@ func (q *blocksQueue) onProcessSkippedEvent(ctx context.Context) eventHandlerFn
|
||||
}
|
||||
}
|
||||
|
||||
func (q *blocksQueue) downscorePeer(peerID peer.ID, reason string) {
|
||||
newScore := q.blocksFetcher.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
// onCheckStaleEvent is an event that allows to mark stale epochs,
|
||||
// so that they can be re-processed.
|
||||
func onCheckStaleEvent(ctx context.Context) eventHandlerFn {
|
||||
|
||||
@@ -522,7 +522,7 @@ func TestBlocksQueue_onDataReceivedEvent(t *testing.T) {
|
||||
})
|
||||
assert.ErrorContains(t, beaconsync.ErrInvalidFetchedData.Error(), err)
|
||||
assert.Equal(t, stateScheduled, updatedState)
|
||||
assert.LogsContain(t, hook, "msg=\"Peer is penalized for invalid blocks\" pid=ZiCa")
|
||||
assert.LogsContain(t, hook, "Downscore peer")
|
||||
})
|
||||
|
||||
t.Run("transition ok", func(t *testing.T) {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -345,12 +346,11 @@ func isPunishableError(err error) bool {
|
||||
func (s *Service) updatePeerScorerStats(data *blocksQueueFetchedData, count uint64, err error) {
|
||||
if isPunishableError(err) {
|
||||
if verification.IsBlobValidationFailure(err) {
|
||||
log.WithError(err).WithField("peer_id", data.blobsFrom).Warn("Downscoring peer for invalid blobs")
|
||||
s.cfg.P2P.Peers().Scorers().BadResponsesScorer().Increment(data.blobsFrom)
|
||||
s.downscorePeer(data.blobsFrom, "invalidBlobs")
|
||||
} else {
|
||||
log.WithError(err).WithField("peer_id", data.blocksFrom).Warn("Downscoring peer for invalid blocks")
|
||||
s.cfg.P2P.Peers().Scorers().BadResponsesScorer().Increment(data.blocksFrom)
|
||||
s.downscorePeer(data.blocksFrom, "invalidBlocks")
|
||||
}
|
||||
|
||||
// If the error is punishable, exit here so that we don't give them credit for providing bad blocks.
|
||||
return
|
||||
}
|
||||
@@ -376,3 +376,8 @@ func (s *Service) isProcessedBlock(ctx context.Context, blk blocks.ROBlock) bool
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Service) downscorePeer(peerID peer.ID, reason string) {
|
||||
newScore := s.cfg.P2P.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
@@ -122,7 +123,7 @@ func (l *limiter) validateRequest(stream network.Stream, amt uint64) error {
|
||||
|
||||
collector, err := l.retrieveCollector(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "retrieve collector")
|
||||
}
|
||||
|
||||
remaining := collector.Remaining(remotePeer.String())
|
||||
@@ -131,7 +132,7 @@ func (l *limiter) validateRequest(stream network.Stream, amt uint64) error {
|
||||
amt = 1
|
||||
}
|
||||
if amt > uint64(remaining) {
|
||||
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
l.downscorePeer(remotePeer, topic, "rateLimitExceeded")
|
||||
writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrRateLimited.Error(), stream, l.p2p)
|
||||
return p2ptypes.ErrRateLimited
|
||||
}
|
||||
@@ -139,22 +140,20 @@ func (l *limiter) validateRequest(stream network.Stream, amt uint64) error {
|
||||
}
|
||||
|
||||
// This is used to validate all incoming rpc streams from external peers.
|
||||
func (l *limiter) validateRawRpcRequest(stream network.Stream) error {
|
||||
func (l *limiter) validateRawRpcRequest(stream network.Stream, amt uint64) error {
|
||||
l.RLock()
|
||||
defer l.RUnlock()
|
||||
|
||||
topic := rpcLimiterTopic
|
||||
|
||||
collector, err := l.retrieveCollector(topic)
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
collector, err := l.retrieveCollector(rpcLimiterTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := stream.Conn().RemotePeer().String()
|
||||
remaining := collector.Remaining(key)
|
||||
// Treat each request as a minimum of 1.
|
||||
amt := int64(1)
|
||||
if amt > remaining {
|
||||
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
|
||||
if amt > uint64(remaining) {
|
||||
l.downscorePeer(remotePeer, rpcLimiterTopic, "rawRateLimitExceeded")
|
||||
writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrRateLimited.Error(), stream, l.p2p)
|
||||
return p2ptypes.ErrRateLimited
|
||||
}
|
||||
@@ -233,3 +232,13 @@ func (l *limiter) retrieveCollector(topic string) (*leakybucket.Collector, error
|
||||
func (_ *limiter) topicLogger(topic string) *logrus.Entry {
|
||||
return log.WithField("rateLimiter", topic)
|
||||
}
|
||||
|
||||
func (l *limiter) downscorePeer(peerID peer.ID, topic, reason string) {
|
||||
newScore := l.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": peerID.String(),
|
||||
"reason": reason,
|
||||
"newScore": newScore,
|
||||
"topic": topic,
|
||||
}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
@@ -85,16 +85,16 @@ func TestRateLimiter_ExceedRawCapacity(t *testing.T) {
|
||||
require.NoError(t, err, "could not create stream")
|
||||
|
||||
for i := 0; i < 2*defaultBurstLimit; i++ {
|
||||
err = rlimiter.validateRawRpcRequest(stream)
|
||||
err = rlimiter.validateRawRpcRequest(stream, 1)
|
||||
rlimiter.addRawStream(stream)
|
||||
require.NoError(t, err, "could not validate incoming request")
|
||||
}
|
||||
// Triggers rate limit error on burst.
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream))
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
|
||||
|
||||
// Make Peer bad.
|
||||
for i := 0; i < defaultBurstLimit; i++ {
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream))
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
|
||||
}
|
||||
assert.NotNil(t, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
||||
require.NoError(t, stream.Close(), "could not close stream")
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -39,7 +40,7 @@ type rpcHandler func(context.Context, interface{}, libp2pcore.Stream) error
|
||||
|
||||
// rpcHandlerByTopicFromFork returns the RPC handlers for a given fork index.
|
||||
func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandler, error) {
|
||||
// Fulu: https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#messages
|
||||
// Fulu: https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#messages
|
||||
if forkIndex >= version.Fulu {
|
||||
return map[string]rpcHandler{
|
||||
p2p.RPCGoodByeTopicV1: s.goodbyeRPCHandler,
|
||||
@@ -54,7 +55,7 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Electra: https://github.com/ethereum/consensus-specs/blob/dev/specs/electra/p2p-interface.md#messages
|
||||
// Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/p2p-interface.md#messages
|
||||
if forkIndex >= version.Electra {
|
||||
return map[string]rpcHandler{
|
||||
p2p.RPCStatusTopicV1: s.statusRPCHandler,
|
||||
@@ -68,7 +69,7 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Deneb: https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/p2p-interface.md#messages
|
||||
// Deneb: https://github.com/ethereum/consensus-specs/blob/master/specs/deneb/p2p-interface.md#messages
|
||||
if forkIndex >= version.Deneb {
|
||||
return map[string]rpcHandler{
|
||||
p2p.RPCStatusTopicV1: s.statusRPCHandler,
|
||||
@@ -82,9 +83,9 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Capella: https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/p2p-interface.md#messages
|
||||
// Bellatrix: https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/p2p-interface.md#messages
|
||||
// Altair: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/p2p-interface.md#messages
|
||||
// Capella: https://github.com/ethereum/consensus-specs/blob/master/specs/capella/p2p-interface.md#messages
|
||||
// Bellatrix: https://github.com/ethereum/consensus-specs/blob/master/specs/bellatrix/p2p-interface.md#messages
|
||||
// Altair: https://github.com/ethereum/consensus-specs/blob/master/specs/altair/p2p-interface.md#messages
|
||||
if forkIndex >= version.Altair {
|
||||
handler := map[string]rpcHandler{
|
||||
p2p.RPCStatusTopicV1: s.statusRPCHandler,
|
||||
@@ -105,7 +106,7 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
return handler, nil
|
||||
}
|
||||
|
||||
// PhaseO: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#messages
|
||||
// PhaseO: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#messages
|
||||
if forkIndex >= version.Phase0 {
|
||||
return map[string]rpcHandler{
|
||||
p2p.RPCStatusTopicV1: s.statusRPCHandler,
|
||||
@@ -238,7 +239,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
defer span.End()
|
||||
span.SetAttributes(trace.StringAttribute("topic", topic))
|
||||
span.SetAttributes(trace.StringAttribute("peer", remotePeer.String()))
|
||||
log := log.WithField("peer", stream.Conn().RemotePeer().String()).WithField("topic", string(stream.Protocol()))
|
||||
log := log.WithFields(logrus.Fields{"peer": remotePeer.String(), "topic": string(stream.Protocol())})
|
||||
|
||||
// Check before hand that peer is valid.
|
||||
if err := s.cfg.p2p.Peers().IsBad(remotePeer); err != nil {
|
||||
@@ -248,7 +249,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
return
|
||||
}
|
||||
// Validate request according to peer limits.
|
||||
if err := s.rateLimiter.validateRawRpcRequest(stream); err != nil {
|
||||
if err := s.rateLimiter.validateRawRpcRequest(stream, 1); err != nil {
|
||||
log.WithError(err).Debug("Could not validate rpc request from peer")
|
||||
return
|
||||
}
|
||||
@@ -304,7 +305,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
logStreamErrors(err, topic)
|
||||
tracing.AnnotateError(span, err)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.downscorePeer(remotePeer, "registerRpcError")
|
||||
return
|
||||
}
|
||||
if err := handle(ctx, msg, stream); err != nil {
|
||||
@@ -324,7 +325,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
logStreamErrors(err, topic)
|
||||
tracing.AnnotateError(span, err)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.downscorePeer(remotePeer, "registerRpcError")
|
||||
return
|
||||
}
|
||||
if err := handle(ctx, nTyp.Elem().Interface(), stream); err != nil {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -43,7 +44,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
rp, err := validateRangeRequest(m, s.cfg.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
s.downscorePeer(remotePeer, "beaconBlocksByRangeRPCHandlerValidationError")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
@@ -201,3 +202,13 @@ func (s *Service) writeBlockBatchToStream(ctx context.Context, batch blockBatch,
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) downscorePeer(peerID peer.ID, reason string, fields ...logrus.Fields) {
|
||||
log := log
|
||||
for _, field := range fields {
|
||||
log = log.WithFields(field)
|
||||
}
|
||||
|
||||
newScore := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
@@ -92,9 +92,11 @@ func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{
|
||||
return errors.New("no block roots provided")
|
||||
}
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
|
||||
if uint64(len(blockRoots)) > params.MaxRequestBlock(currentEpoch) {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.downscorePeer(remotePeer, "beaconBlocksRootRPCHandlerTooManyRoots")
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, "requested more than the max block limit", stream)
|
||||
return errors.New("requested more than the max block limit")
|
||||
}
|
||||
|
||||
@@ -74,10 +74,13 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
rp, err := validateBlobsByRange(r, s.cfg.chain.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.downscorePeer(remotePeer, "blobSidecarsByRangeRpcHandlerValidationError")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
@@ -87,7 +90,7 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
defer ticker.Stop()
|
||||
batcher, err := newBlockRangeBatcher(rp, s.cfg.beaconDB, s.rateLimiter, s.cfg.chain.IsCanonical, ticker)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("error in BlobSidecarsByRange batch")
|
||||
log.WithError(err).Error("Cannot create new block range batcher")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -112,7 +115,7 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
}
|
||||
}
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Debug("error in BlobSidecarsByRange batch")
|
||||
log.WithError(err).Debug("Error in BlobSidecarsByRange batch")
|
||||
|
||||
// If a rate limit is hit, it means an error response has already been sent and the stream has been closed.
|
||||
if !errors.Is(err, p2ptypes.ErrRateLimited) {
|
||||
|
||||
@@ -39,7 +39,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
cs := s.cfg.clock.CurrentSlot()
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
if err := validateBlobByRootRequest(blobIdents, cs); err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
s.downscorePeer(remotePeer, "blobSidecarsByRootRpcHandlerValidationError")
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
rangeParameters, err := validateDataColumnsByRange(request, s.cfg.chain.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
s.downscorePeer(remotePeer, "dataColumnSidecarsByRangeRpcHandlerValidationError")
|
||||
tracing.AnnotateError(span, err)
|
||||
return errors.Wrap(err, "validate data columns by range")
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ var (
|
||||
)
|
||||
|
||||
// dataColumnSidecarByRootRPCHandler handles the data column sidecars by root RPC request.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1
|
||||
func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.dataColumnSidecarByRootRPCHandler")
|
||||
defer span.End()
|
||||
@@ -42,7 +42,7 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
}
|
||||
|
||||
requestedColumnIdents := *ref
|
||||
remotePeerId := stream.Conn().RemotePeer()
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
@@ -51,7 +51,7 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
|
||||
// Penalize peers that send invalid requests.
|
||||
if err := validateDataColumnsByRootRequest(requestedColumnIdents); err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeerId)
|
||||
s.downscorePeer(remotePeer, "dataColumnSidecarByRootRPCHandlerValidationError")
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
return errors.Wrap(err, "validate data columns by root request")
|
||||
}
|
||||
@@ -85,7 +85,7 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peer": remotePeerId,
|
||||
"peer": remotePeer,
|
||||
"columns": requestedColumnsByRootLog,
|
||||
})
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -35,22 +36,40 @@ var backOffTime = map[primitives.SSZUint64]time.Duration{
|
||||
|
||||
// goodbyeRPCHandler reads the incoming goodbye rpc message from the peer.
|
||||
func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
const amount = 1
|
||||
SetRPCStreamDeadlines(stream)
|
||||
peerID := stream.Conn().RemotePeer()
|
||||
|
||||
m, ok := msg.(*primitives.SSZUint64)
|
||||
if !ok {
|
||||
return fmt.Errorf("wrong message type for goodbye, got %T, wanted *uint64", msg)
|
||||
}
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
log.WithError(err).Debug("Goodbye message from rate-limited peer")
|
||||
} else {
|
||||
|
||||
isRateLimitedPeer := false
|
||||
if err := s.rateLimiter.validateRequest(stream, amount); err != nil {
|
||||
if !errors.Is(err, p2ptypes.ErrRateLimited) {
|
||||
return errors.Wrap(err, "validate request")
|
||||
}
|
||||
isRateLimitedPeer = true
|
||||
}
|
||||
|
||||
if !isRateLimitedPeer {
|
||||
s.rateLimiter.add(stream, 1)
|
||||
}
|
||||
log := log.WithField("Reason", goodbyeMessage(*m))
|
||||
log.WithField("peer", stream.Conn().RemotePeer()).Trace("Peer has sent a goodbye message")
|
||||
s.cfg.p2p.Peers().SetNextValidTime(stream.Conn().RemotePeer(), goodByeBackoff(*m))
|
||||
// closes all streams with the peer
|
||||
return s.cfg.p2p.Disconnect(stream.Conn().RemotePeer())
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": peerID,
|
||||
"reason": goodbyeMessage(*m),
|
||||
"isRateLimited": isRateLimitedPeer,
|
||||
}).Debug("Received a goodbye message")
|
||||
|
||||
s.cfg.p2p.Peers().SetNextValidTime(peerID, goodByeBackoff(*m))
|
||||
|
||||
if err := s.cfg.p2p.Disconnect(peerID); err != nil {
|
||||
return errors.Wrap(err, "disconnect")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// disconnectBadPeer checks whether peer is considered bad by some scorer, and tries to disconnect
|
||||
@@ -70,7 +89,7 @@ func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID, badPeerErr
|
||||
"peerID": id,
|
||||
"agent": agentString(id, s.cfg.p2p.Host()),
|
||||
}).
|
||||
Debug("Sent peer disconnection")
|
||||
Debug("Sent bad peer disconnection")
|
||||
}
|
||||
|
||||
// A custom goodbye method that is used by our connection handler, in the
|
||||
|
||||
@@ -26,7 +26,7 @@ func (s *Service) lightClientBootstrapRPCHandler(ctx context.Context, msg interf
|
||||
|
||||
SetRPCStreamDeadlines(stream)
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
logger.WithError(err).Error("s.rateLimiter.validateRequest")
|
||||
logger.WithError(err).Error("Cannot validate request")
|
||||
return err
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
@@ -42,7 +42,7 @@ func (s *Service) lightClientBootstrapRPCHandler(ctx context.Context, msg interf
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
logger.WithError(err).Error("s.cfg.beaconDB.LightClientBootstrap")
|
||||
logger.WithError(err).Error("Cannot bootstrap light client")
|
||||
return err
|
||||
}
|
||||
if bootstrap == nil {
|
||||
@@ -74,10 +74,11 @@ func (s *Service) lightClientUpdatesByRangeRPCHandler(ctx context.Context, msg i
|
||||
defer cancel()
|
||||
|
||||
logger := log.WithField("handler", p2p.LightClientUpdatesByRangeName[1:])
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
SetRPCStreamDeadlines(stream)
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
logger.WithError(err).Error("s.rateLimiter.validateRequest")
|
||||
logger.WithError(err).Error("Cannot validate request")
|
||||
return err
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
@@ -90,7 +91,8 @@ func (s *Service) lightClientUpdatesByRangeRPCHandler(ctx context.Context, msg i
|
||||
|
||||
if r.Count == 0 {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, "count is 0", stream)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.downscorePeer(remotePeer, "lightClientUpdatesByRangeRPCHandlerCount0")
|
||||
|
||||
logger.Error("Count is 0")
|
||||
return nil
|
||||
}
|
||||
@@ -102,7 +104,7 @@ func (s *Service) lightClientUpdatesByRangeRPCHandler(ctx context.Context, msg i
|
||||
endPeriod, err := math.Add64(r.StartPeriod, r.Count-1)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.downscorePeer(remotePeer, "lightClientUpdatesByRangeRPCHandlerEndPeriodOverflow")
|
||||
tracing.AnnotateError(span, err)
|
||||
logger.WithError(err).Error("End period overflows")
|
||||
return err
|
||||
@@ -114,7 +116,7 @@ func (s *Service) lightClientUpdatesByRangeRPCHandler(ctx context.Context, msg i
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
logger.WithError(err).Error("s.cfg.beaconDB.LightClientUpdates")
|
||||
logger.WithError(err).Error("Cannot retrieve light client updates")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -153,7 +155,7 @@ func (s *Service) lightClientFinalityUpdateRPCHandler(ctx context.Context, _ int
|
||||
|
||||
SetRPCStreamDeadlines(stream)
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
logger.WithError(err).Error("s.rateLimiter.validateRequest")
|
||||
logger.WithError(err).Error("Cannot validate request")
|
||||
return err
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
@@ -191,7 +193,7 @@ func (s *Service) lightClientOptimisticUpdateRPCHandler(ctx context.Context, _ i
|
||||
|
||||
SetRPCStreamDeadlines(stream)
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
logger.WithError(err).Error("s.rateLimiter.validateRequest")
|
||||
logger.WithError(err).Error("Cannot validate request")
|
||||
return err
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
|
||||
@@ -172,12 +172,12 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, peerID peer.ID) (meta
|
||||
// Read the METADATA response from the peer.
|
||||
code, errMsg, err := ReadStatusCode(stream, s.cfg.p2p.Encoding())
|
||||
if err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
s.downscorePeer(peerID, "MetadataReadStatusCodeError")
|
||||
return nil, errors.Wrap(err, "read status code")
|
||||
}
|
||||
|
||||
if code != 0 {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
s.downscorePeer(peerID, "NonNullMetadataReadStatusCode")
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
@@ -214,8 +214,8 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, peerID peer.ID) (meta
|
||||
|
||||
// Decode the metadata from the peer.
|
||||
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return nil, err
|
||||
s.downscorePeer(peerID, "MetadataDecodeError")
|
||||
return nil, errors.Wrap(err, "decode with max length")
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// pingHandler reads the incoming ping rpc message from the peer.
|
||||
@@ -27,6 +28,7 @@ func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pc
|
||||
if !ok {
|
||||
return fmt.Errorf("wrong message type for ping, got %T, wanted *uint64", msg)
|
||||
}
|
||||
sequenceNumber := uint64(*m)
|
||||
|
||||
// Validate the incoming request regarding rate limiting.
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
@@ -39,14 +41,9 @@ func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pc
|
||||
peerID := stream.Conn().RemotePeer()
|
||||
|
||||
// Check if the peer sequence number is higher than the one we have in our store.
|
||||
valid, err := s.validateSequenceNum(*m, peerID)
|
||||
valid, err := s.isSequenceNumberUpToDate(sequenceNumber, peerID)
|
||||
if err != nil {
|
||||
// Descore peer for giving us a bad sequence number.
|
||||
if errors.Is(err, p2ptypes.ErrInvalidSequenceNum) {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrInvalidSequenceNum.Error(), stream)
|
||||
}
|
||||
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrInvalidSequenceNum.Error(), stream)
|
||||
return errors.Wrap(err, "validate sequence number")
|
||||
}
|
||||
|
||||
@@ -141,7 +138,7 @@ func (s *Service) sendPingRequest(ctx context.Context, peerID peer.ID) error {
|
||||
|
||||
// If the peer responded with an error, increment the bad responses scorer.
|
||||
if code != 0 {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
s.downscorePeer(peerID, "NotNullPingReadStatusCode")
|
||||
return errors.Errorf("code: %d - %s", code, errMsg)
|
||||
}
|
||||
|
||||
@@ -150,15 +147,11 @@ func (s *Service) sendPingRequest(ctx context.Context, peerID peer.ID) error {
|
||||
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
return errors.Wrap(err, "decode sequence number")
|
||||
}
|
||||
sequenceNumber := uint64(*msg)
|
||||
|
||||
// Determine if the peer's sequence number returned by the peer is higher than the one we have in our store.
|
||||
valid, err := s.validateSequenceNum(*msg, peerID)
|
||||
valid, err := s.isSequenceNumberUpToDate(sequenceNumber, peerID)
|
||||
if err != nil {
|
||||
// Descore peer for giving us a bad sequence number.
|
||||
if errors.Is(err, p2ptypes.ErrInvalidSequenceNum) {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
}
|
||||
|
||||
return errors.Wrap(err, "validate sequence number")
|
||||
}
|
||||
|
||||
@@ -180,27 +173,36 @@ func (s *Service) sendPingRequest(ctx context.Context, peerID peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateSequenceNum validates the peer's sequence number.
|
||||
// - If the peer's sequence number is greater than the sequence number we have in our store for the peer, return false.
|
||||
// - If the peer's sequence number is equal to the sequence number we have in our store for the peer, return true.
|
||||
// - If the peer's sequence number is less than the sequence number we have in our store for the peer, return an error.
|
||||
func (s *Service) validateSequenceNum(seq primitives.SSZUint64, id peer.ID) (bool, error) {
|
||||
// isSequenceNumberUpToDate check if our internal sequence number for the peer is up to date wrt. the incoming one.
|
||||
// - If the incoming sequence number is greater than the sequence number we have in our store for the peer, return false.
|
||||
// - If the incoming sequence number is equal to the sequence number we have in our store for the peer, return true.
|
||||
// - If the incoming sequence number is less than the sequence number we have in our store for the peer, return an error.
|
||||
func (s *Service) isSequenceNumberUpToDate(incomingSequenceNumber uint64, peerID peer.ID) (bool, error) {
|
||||
// Retrieve the metadata for the peer we got in our store.
|
||||
md, err := s.cfg.p2p.Peers().Metadata(id)
|
||||
storedMetadata, err := s.cfg.p2p.Peers().Metadata(peerID)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "get metadata")
|
||||
return false, errors.Wrap(err, "peers metadata")
|
||||
}
|
||||
|
||||
// If we have no metadata for the peer, return false.
|
||||
if md == nil || md.IsNil() {
|
||||
if storedMetadata == nil || storedMetadata.IsNil() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// The peer's sequence number must be less than or equal to the sequence number we have in our store.
|
||||
if md.SequenceNumber() > uint64(seq) {
|
||||
storedSequenceNumber := storedMetadata.SequenceNumber()
|
||||
if storedSequenceNumber > incomingSequenceNumber {
|
||||
s.downscorePeer(peerID, "pingInvalidSequenceNumber", logrus.Fields{
|
||||
"storedSequenceNumber": storedSequenceNumber,
|
||||
"incomingSequenceNumber": incomingSequenceNumber,
|
||||
})
|
||||
return false, p2ptypes.ErrInvalidSequenceNum
|
||||
}
|
||||
|
||||
// Return true if the peer's sequence number is equal to the sequence number we have in our store.
|
||||
return md.SequenceNumber() == uint64(seq), nil
|
||||
// If this is the case, our information about the peer is outdated.
|
||||
if storedSequenceNumber < incomingSequenceNumber {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -35,6 +35,9 @@ func (s *Service) maintainPeerStatuses() {
|
||||
wg.Add(1)
|
||||
go func(id peer.ID) {
|
||||
defer wg.Done()
|
||||
|
||||
log := log.WithField("peer", id)
|
||||
|
||||
// If our peer status has not been updated correctly we disconnect over here
|
||||
// and set the connection state over here instead.
|
||||
if s.cfg.p2p.Host().Network().Connectedness(id) != network.Connected {
|
||||
@@ -43,27 +46,26 @@ func (s *Service) maintainPeerStatuses() {
|
||||
log.WithError(err).Debug("Error when disconnecting with peer")
|
||||
}
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.Disconnected)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": id,
|
||||
"reason": "maintain peer statuses - peer is not connected",
|
||||
}).Debug("Initiate peer disconnection")
|
||||
log.WithField("reason", "maintainPeerStatusesNotConnectedPeer").Debug("Initiate peer disconnection")
|
||||
return
|
||||
}
|
||||
|
||||
// Disconnect from peers that are considered bad by any of the registered scorers.
|
||||
if err := s.cfg.p2p.Peers().IsBad(id); err != nil {
|
||||
s.disconnectBadPeer(s.ctx, id, err)
|
||||
return
|
||||
}
|
||||
|
||||
// If the status hasn't been updated in the recent interval time.
|
||||
lastUpdated, err := s.cfg.p2p.Peers().ChainStateLastUpdated(id)
|
||||
if err != nil {
|
||||
// Peer has vanished; nothing to do.
|
||||
return
|
||||
}
|
||||
|
||||
if prysmTime.Now().After(lastUpdated.Add(interval)) {
|
||||
if err := s.reValidatePeer(s.ctx, id); err != nil {
|
||||
log.WithField("peer", id).WithError(err).Debug("Could not revalidate peer")
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(id)
|
||||
log.WithError(err).Debug("Cannot re-validate peer")
|
||||
}
|
||||
}
|
||||
}(pid)
|
||||
@@ -128,19 +130,20 @@ func (s *Service) shouldReSync() bool {
|
||||
}
|
||||
|
||||
// sendRPCStatusRequest for a given topic with an expected protobuf message type.
|
||||
func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
func (s *Service) sendRPCStatusRequest(ctx context.Context, peer peer.ID) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
headRoot, err := s.cfg.chain.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "head root")
|
||||
}
|
||||
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "current fork digest")
|
||||
}
|
||||
|
||||
cp := s.cfg.chain.FinalizedCheckpt()
|
||||
resp := &pb.Status{
|
||||
ForkDigest: forkDigest[:],
|
||||
@@ -149,37 +152,42 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
HeadRoot: headRoot,
|
||||
HeadSlot: s.cfg.chain.HeadSlot(),
|
||||
}
|
||||
|
||||
log := log.WithField("peer", peer)
|
||||
|
||||
topic, err := p2p.TopicFromMessage(p2p.StatusMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "topic from message")
|
||||
}
|
||||
stream, err := s.cfg.p2p.Send(ctx, resp, topic, id)
|
||||
|
||||
stream, err := s.cfg.p2p.Send(ctx, resp, topic, peer)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "send p2p message")
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
code, errMsg, err := ReadStatusCode(stream, s.cfg.p2p.Encoding())
|
||||
if err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return err
|
||||
s.downscorePeer(peer, "statusRequestReadStatusCodeError")
|
||||
return errors.Wrap(err, "read status code")
|
||||
}
|
||||
|
||||
if code != 0 {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(id)
|
||||
s.downscorePeer(peer, "statusRequestNonNullStatusCode")
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
msg := &pb.Status{}
|
||||
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return err
|
||||
s.downscorePeer(peer, "statusRequestDecodeError")
|
||||
return errors.Wrap(err, "decode status message")
|
||||
}
|
||||
|
||||
// If validation fails, validation error is logged, and peer status scorer will mark peer as bad.
|
||||
err = s.validateStatusMessage(ctx, msg)
|
||||
s.cfg.p2p.Peers().Scorers().PeerStatusScorer().SetPeerStatus(id, msg, err)
|
||||
if err := s.cfg.p2p.Peers().IsBad(id); err != nil {
|
||||
s.disconnectBadPeer(s.ctx, id, err)
|
||||
s.cfg.p2p.Peers().Scorers().PeerStatusScorer().SetPeerStatus(peer, msg, err)
|
||||
if err := s.cfg.p2p.Peers().IsBad(peer); err != nil {
|
||||
s.disconnectBadPeer(s.ctx, peer, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -238,7 +246,7 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
|
||||
return nil
|
||||
default:
|
||||
respCode = responseCodeInvalidRequest
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
s.downscorePeer(remotePeer, "statusRpcHandlerInvalidMessage")
|
||||
}
|
||||
|
||||
originalErr := err
|
||||
|
||||
@@ -11,10 +11,12 @@ import (
|
||||
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
@@ -278,19 +280,32 @@ func (s *Service) Start() {
|
||||
// Stop the regular sync service.
|
||||
func (s *Service) Stop() error {
|
||||
defer func() {
|
||||
s.cancel()
|
||||
|
||||
if s.rateLimiter != nil {
|
||||
s.rateLimiter.free()
|
||||
}
|
||||
}()
|
||||
|
||||
// Say goodbye to all peers.
|
||||
for _, peerID := range s.cfg.p2p.Peers().Connected() {
|
||||
if s.cfg.p2p.Host().Network().Connectedness(peerID) == network.Connected {
|
||||
if err := s.sendGoodByeAndDisconnect(s.ctx, p2ptypes.GoodbyeCodeClientShutdown, peerID); err != nil {
|
||||
log.WithError(err).WithField("peerID", peerID).Error("Failed to send goodbye message")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Removing RPC Stream handlers.
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
s.cfg.p2p.Host().RemoveStreamHandler(p)
|
||||
}
|
||||
|
||||
// Deregister Topic Subscribers.
|
||||
for _, t := range s.cfg.p2p.PubSub().GetTopics() {
|
||||
s.unSubscribeFromTopic(t)
|
||||
}
|
||||
defer s.cancel()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -135,7 +135,7 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, ro
|
||||
blockSlot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
|
||||
// Broadcast and save data columns sidecars to custody but not yet received.
|
||||
// Broadcast and save data column sidecars to custody but not yet received.
|
||||
sidecarCount := uint64(len(sidecars))
|
||||
for columnIndex := range info.CustodyColumns {
|
||||
log := log.WithField("columnIndex", columnIndex)
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#the-gossip-domain-gossipsub
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#the-gossip-domain-gossipsub
|
||||
func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
const dataColumnSidecarSubTopic = "/data_column_sidecar_%d/"
|
||||
|
||||
@@ -74,7 +74,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
verifier := s.newColumnsVerifier(roDataColumns, verification.GossipDataColumnSidecarRequirements)
|
||||
|
||||
// Start the verification process.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#the-gossip-domain-gossipsub
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#data_column_sidecar_subnet_id
|
||||
|
||||
// [REJECT] The sidecar is valid as verified by `verify_data_column_sidecar(sidecar)`.
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
@@ -86,7 +86,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// [IGNORE] The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY`` allowance
|
||||
// [IGNORE] The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
||||
// -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
|
||||
if err := verifier.NotFromFutureSlot(); err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
@@ -132,13 +132,13 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// [REJECT] The current finalized_checkpoint is an ancestor of the sidecar's block
|
||||
// [REJECT] The current `finalized_checkpoint` is an ancestor of the sidecar's block
|
||||
// -- i.e. `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`.
|
||||
if err := verifier.SidecarDescendsFromFinalized(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's kzg_commitments field inclusion proof is valid as verified by `verify_data_column_sidecar_inclusion_proof(sidecar)`.
|
||||
// [REJECT] The sidecar's `kzg_commitments` field inclusion proof is valid as verified by `verify_data_column_sidecar_inclusion_proof(sidecar)`.
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
@@ -154,7 +154,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by block_header.parent_root/block_header.slot).
|
||||
// [REJECT] The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_header.parent_root`/`block_header.slot`).
|
||||
// If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated
|
||||
// -- in such a case do not REJECT, instead IGNORE this message.
|
||||
if err := verifier.SidecarProposerExpected(ctx); err != nil {
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
var (
|
||||
// GossipDataColumnSidecarRequirements defines the set of requirements that DataColumnSidecars received on gossip
|
||||
// must satisfy in order to upgrade an RODataColumn to a VerifiedRODataColumn.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#data_column_sidecar_subnet_id
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#data_column_sidecar_subnet_id
|
||||
GossipDataColumnSidecarRequirements = []Requirement{
|
||||
RequireValidFields,
|
||||
RequireCorrectSubnet,
|
||||
@@ -40,7 +40,7 @@ var (
|
||||
|
||||
// ByRangeRequestDataColumnSidecarRequirements defines the set of requirements that DataColumnSidecars received
|
||||
// via the by range request must satisfy in order to upgrade an RODataColumn to a VerifiedRODataColumn.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
ByRangeRequestDataColumnSidecarRequirements = []Requirement{
|
||||
RequireValidFields,
|
||||
RequireSidecarInclusionProven,
|
||||
@@ -165,7 +165,7 @@ func (dv *RODataColumnsVerifier) NotFromFutureSlot() (err error) {
|
||||
// Extract the data column slot.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Skip if the data column slotis the same as the current slot.
|
||||
// Skip if the data column slot is the same as the current slot.
|
||||
if currentSlot == dataColumnSlot {
|
||||
continue
|
||||
}
|
||||
@@ -174,7 +174,7 @@ func (dv *RODataColumnsVerifier) NotFromFutureSlot() (err error) {
|
||||
// We lower the time by MAXIMUM_GOSSIP_CLOCK_DISPARITY in case system time is running slightly behind real time.
|
||||
earliestStart, err := dv.clock.SlotStart(dataColumnSlot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to determine slot start time from clock waiter: %w", err)
|
||||
return columnErrBuilder(errors.Wrap(err, "failed to determine slot start time from clock waiter"))
|
||||
}
|
||||
earliestStart = earliestStart.Add(-maximumGossipClockDisparity)
|
||||
|
||||
@@ -204,11 +204,8 @@ func (dv *RODataColumnsVerifier) SlotAboveFinalized() (err error) {
|
||||
}
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the data column slot.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Check if the data column slot is after first slot of the epoch corresponding to the finalized checkpoint.
|
||||
if dataColumnSlot <= startSlot {
|
||||
if dataColumn.Slot() <= startSlot {
|
||||
return columnErrBuilder(errSlotNotAfterFinalized)
|
||||
}
|
||||
}
|
||||
@@ -271,10 +268,8 @@ func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.
|
||||
defer dv.recordResult(RequireSidecarParentSeen, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Skip if the parent root has been seen.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
if parentSeen != nil && parentSeen(parentRoot) {
|
||||
continue
|
||||
}
|
||||
@@ -295,10 +290,7 @@ func (dv *RODataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.
|
||||
defer dv.recordResult(RequireSidecarParentValid, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
if badParent != nil && badParent(parentRoot) {
|
||||
if badParent != nil && badParent(dataColumn.ParentRoot()) {
|
||||
return columnErrBuilder(errSidecarParentInvalid)
|
||||
}
|
||||
}
|
||||
@@ -314,21 +306,15 @@ func (dv *RODataColumnsVerifier) SidecarParentSlotLower() (err error) {
|
||||
defer dv.recordResult(RequireSidecarParentSlotLower, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Compute the slot of the parent block.
|
||||
parentSlot, err := dv.fc.Slot(parentRoot)
|
||||
parentSlot, err := dv.fc.Slot(dataColumn.ParentRoot())
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "slot"))
|
||||
}
|
||||
|
||||
// Extract the slot of the data column.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Check if the data column slot is after the parent slot.
|
||||
if parentSlot >= dataColumnSlot {
|
||||
return errSlotNotAfterParent
|
||||
if parentSlot >= dataColumn.Slot() {
|
||||
return columnErrBuilder(errSlotNotAfterParent)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -435,7 +421,7 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
|
||||
// Compute the target root for the epoch.
|
||||
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
if err != nil {
|
||||
return [fieldparams.RootLength]byte{}, errors.Wrap(err, "target root from epoch")
|
||||
return [fieldparams.RootLength]byte{}, columnErrBuilder(errors.Wrap(err, "target root from epoch"))
|
||||
}
|
||||
|
||||
// Store the target root in the cache.
|
||||
@@ -534,7 +520,7 @@ func inclusionProofKey(c blocks.RODataColumn) ([160]byte, error) {
|
||||
|
||||
root, err := c.SignedBlockHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return [160]byte{}, errors.Wrap(err, "hash tree root")
|
||||
return [160]byte{}, columnErrBuilder(errors.Wrap(err, "hash tree root"))
|
||||
}
|
||||
|
||||
for i := range c.KzgCommitmentsInclusionProof {
|
||||
|
||||
3
changelog/James-prysm_persistent-seq-number.md
Normal file
3
changelog/James-prysm_persistent-seq-number.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- changed from in-memory to persistent discv5 db to keep local node information persistent for the key to keep the ENR sequence number deterministic when restarting.
|
||||
3
changelog/add-blob-schedule-to-config-spec-endpoint.md
Normal file
3
changelog/add-blob-schedule-to-config-spec-endpoint.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add BLOB_SCHEDULE field to `/eth/v1/config/spec` endpoint response to expose blob scheduling configuration for networks.
|
||||
3
changelog/james-prysm_proposer-lookahead-api.md
Normal file
3
changelog/james-prysm_proposer-lookahead-api.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Implements the `/eth/v1/beacon/states/{state_id}/proposer_lookahead` beacon api endpoint.
|
||||
7
changelog/james-prysm_remove-ssz-only-flag.md
Normal file
7
changelog/james-prysm_remove-ssz-only-flag.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Removed
|
||||
|
||||
- Partially reverting pr #15390 removing the `ssz-only` debug flag until there is a real usecase for the flag
|
||||
|
||||
### Added
|
||||
|
||||
- Added new PRYSM_API_OVERRIDE_ACCEPT environment variable to override ssz accept header as a replacement to flag
|
||||
3
changelog/jtraglia_das-core-fixes.md
Normal file
3
changelog/jtraglia_das-core-fixes.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed variable names, links, and typos in das core code.
|
||||
3
changelog/jtraglia_dev-to-master.md
Normal file
3
changelog/jtraglia_dev-to-master.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Update links to consensus-specs to point to `master` branch
|
||||
3
changelog/jtraglia_move-reconstruction-lock.md
Normal file
3
changelog/jtraglia_move-reconstruction-lock.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Moved reconstruction lock to prevent unnecessary work.
|
||||
3
changelog/jtraglia_nits-dcsc-verification.md
Normal file
3
changelog/jtraglia_nits-dcsc-verification.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Fix some nits associated with data column sidecar verification
|
||||
3
changelog/jtraglia_update-within-da-period.md
Normal file
3
changelog/jtraglia_update-within-da-period.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Use `MinEpochsForDataColumnSidecarsRequest` in `WithinDAPeriod` when in Fulu
|
||||
2
changelog/manu-peer-ban-at-restart.md
Normal file
2
changelog/manu-peer-ban-at-restart.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Fixed various reasons why a node is banned by its peers when it stops.
|
||||
2
changelog/potuz_add_hashtree.md
Normal file
2
changelog/potuz_add_hashtree.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Add optional `--use-hashtree` flag to beacon-chain and validator clients for using the hashtree library instead of gohashtree for vectorized SHA-256 merkle tree hashing. The hashtree library provides optimized assembly implementations for x86_64 and ARM64 architectures without CGO overhead.
|
||||
3
changelog/potuz_add_publishv2_metric.md
Normal file
3
changelog/potuz_add_publishv2_metric.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add timing metric `publish_block_v2_duration_milliseconds` to measure processing duration of the `PublishBlockV2` beacon API endpoint.
|
||||
3
changelog/radek_consensus-value-unavailable.md
Normal file
3
changelog/radek_consensus-value-unavailable.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Return zero value for `Eth-Consensus-Block-Value` on error to avoid missed block proposals.
|
||||
3
changelog/raulk_beacon-api-metadata.md
Normal file
3
changelog/raulk_beacon-api-metadata.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Added new metadata fields (attnets,syncnets,custody_group_count) to `/eth/v1/node/identity`.
|
||||
3
changelog/tt_state_root_debug.md
Normal file
3
changelog/tt_state_root_debug.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Include state root in StateNotFoundError for better debugging of consensus validation failures
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/beacon-chain/sync/backfill/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/cmd"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash/htr"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -51,7 +52,6 @@ type Flags struct {
|
||||
EnableExperimentalAttestationPool bool // EnableExperimentalAttestationPool enables an experimental attestation pool design.
|
||||
DisableDutiesV2 bool // DisableDutiesV2 sets validator client to use the get Duties endpoint
|
||||
EnableWeb bool // EnableWeb enables the webui on the validator client
|
||||
SSZOnly bool // SSZOnly forces the validator client to use SSZ for communication with the beacon node when REST mode is enabled (useful for debugging)
|
||||
// Logging related toggles.
|
||||
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
|
||||
EnableFullSSZDataLogging bool // Enables logging for full ssz data on rejected gossip messages
|
||||
@@ -276,6 +276,13 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
cfg.ForceHead = ctx.String(forceHeadFlag.Name)
|
||||
}
|
||||
|
||||
if ctx.Bool(UseHashtreeFlag.Name) {
|
||||
logEnabled(UseHashtreeFlag)
|
||||
htr.SetUseHashtree(true)
|
||||
} else {
|
||||
log.Info("Using gohashtree library for vectorized SHA-256 hashing")
|
||||
}
|
||||
|
||||
if ctx.IsSet(blacklistRoots.Name) {
|
||||
logEnabled(blacklistRoots)
|
||||
cfg.BlacklistedRoots = parseBlacklistedRoots(ctx.StringSlice(blacklistRoots.Name))
|
||||
@@ -332,6 +339,12 @@ func ConfigureValidator(ctx *cli.Context) error {
|
||||
logEnabled(EnableBeaconRESTApi)
|
||||
cfg.EnableBeaconRESTApi = true
|
||||
}
|
||||
if ctx.Bool(UseHashtreeFlag.Name) {
|
||||
logEnabled(UseHashtreeFlag)
|
||||
htr.SetUseHashtree(true)
|
||||
} else {
|
||||
log.Info("Using gohashtree library for vectorized SHA-256 hashing")
|
||||
}
|
||||
if ctx.Bool(DisableDutiesV2.Name) {
|
||||
logEnabled(DisableDutiesV2)
|
||||
cfg.DisableDutiesV2 = true
|
||||
@@ -340,10 +353,6 @@ func ConfigureValidator(ctx *cli.Context) error {
|
||||
logEnabled(EnableWebFlag)
|
||||
cfg.EnableWeb = true
|
||||
}
|
||||
if ctx.Bool(SSZOnly.Name) {
|
||||
logEnabled(SSZOnly)
|
||||
cfg.SSZOnly = true
|
||||
}
|
||||
|
||||
cfg.KeystoreImportDebounceInterval = ctx.Duration(dynamicKeyReloadDebounceInterval.Name)
|
||||
Init(cfg)
|
||||
|
||||
@@ -198,10 +198,11 @@ var (
|
||||
Value: false,
|
||||
}
|
||||
|
||||
// SSZOnly forces the validator client to use SSZ for communication with the beacon node when REST mode is enabled
|
||||
SSZOnly = &cli.BoolFlag{
|
||||
Name: "ssz-only",
|
||||
Usage: "(debug): Forces the validator client to use SSZ for communication with the beacon node when REST mode is enabled",
|
||||
// UseHashtreeFlag enables using the hashtree library instead of gohashtree for vectorized hashing.
|
||||
UseHashtreeFlag = &cli.BoolFlag{
|
||||
Name: "use-hashtree",
|
||||
Usage: "Uses the hashtree library instead of gohashtree for vectorized SHA-256 merkle tree hashing.",
|
||||
Value: false,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -225,7 +226,7 @@ var ValidatorFlags = append(deprecatedFlags, []cli.Flag{
|
||||
EnableBeaconRESTApi,
|
||||
DisableDutiesV2,
|
||||
EnableWebFlag,
|
||||
SSZOnly,
|
||||
UseHashtreeFlag,
|
||||
}...)
|
||||
|
||||
// E2EValidatorFlags contains a list of the validator feature flags to be tested in E2E.
|
||||
@@ -264,6 +265,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
enableExperimentalAttestationPool,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
UseHashtreeFlag,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {
|
||||
|
||||
@@ -297,7 +297,7 @@ type BeaconChainConfig struct {
|
||||
NodeIdBits uint64 `yaml:"NODE_ID_BITS" spec:"true"` // NodeIdBits defines the bit length of a node id.
|
||||
|
||||
// Blobs Values
|
||||
BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE"`
|
||||
BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" spec:"true"`
|
||||
|
||||
// Deprecated_MaxBlobsPerBlock defines the max blobs that could exist in a block.
|
||||
// Deprecated: This field is no longer supported. Avoid using it.
|
||||
@@ -336,8 +336,8 @@ func (b *BeaconChainConfig) ExecutionRequestLimits() enginev1.ExecutionRequestLi
|
||||
}
|
||||
|
||||
type BlobScheduleEntry struct {
|
||||
Epoch primitives.Epoch `yaml:"EPOCH"`
|
||||
MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK"`
|
||||
Epoch primitives.Epoch `yaml:"EPOCH" json:"EPOCH"`
|
||||
MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" json:"MAX_BLOBS_PER_BLOCK"`
|
||||
}
|
||||
|
||||
// InitializeForkSchedule initializes the schedules forks baked into the config.
|
||||
@@ -498,7 +498,11 @@ func FuluEnabled() bool {
|
||||
return BeaconConfig().FuluForkEpoch < math.MaxUint64
|
||||
}
|
||||
|
||||
// WithinDAPeriod checks if the block epoch is within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS of the given current epoch.
|
||||
// WithinDAPeriod checks if the block epoch is within the data availability retention period.
|
||||
func WithinDAPeriod(block, current primitives.Epoch) bool {
|
||||
if block >= BeaconConfig().FuluForkEpoch {
|
||||
return block+BeaconConfig().MinEpochsForDataColumnSidecarsRequest >= current
|
||||
}
|
||||
|
||||
return block+BeaconConfig().MinEpochsForBlobsSidecarsRequest >= current
|
||||
}
|
||||
|
||||
@@ -4,6 +4,10 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
EnvNameOverrideAccept = "PRYSM_API_OVERRIDE_ACCEPT"
|
||||
)
|
||||
|
||||
// SetupTestConfigCleanup preserves configurations allowing to modify them within tests without any
|
||||
// restrictions, everything is restored after the test.
|
||||
func SetupTestConfigCleanup(t testing.TB) {
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
# gazelle:resolve go github.com/prysmaticlabs/hashtree //third_party/hashtree:go_default_library
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["hashtree.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/crypto/hash/htr",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_prysmaticlabs_gohashtree//:go_default_library"],
|
||||
deps = [
|
||||
"//third_party/hashtree:go_default_library",
|
||||
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
@@ -5,27 +5,52 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/gohashtree"
|
||||
hashtreelib "github.com/prysmaticlabs/hashtree"
|
||||
)
|
||||
|
||||
const minSliceSizeToParallelize = 5000
|
||||
|
||||
// HashFunc defines the interface for vectorized hash implementations
|
||||
type HashFunc func(output [][32]byte, input [][32]byte) error
|
||||
|
||||
var (
|
||||
// currentHashFunc holds the active hash implementation
|
||||
currentHashFunc HashFunc = gohashtree.Hash
|
||||
|
||||
// useHashtree flag determines which implementation to use
|
||||
useHashtree bool = false
|
||||
)
|
||||
|
||||
// SetUseHashtree configures whether to use the hashtree library (true) or gohashtree (false)
|
||||
func SetUseHashtree(use bool) {
|
||||
useHashtree = use
|
||||
if use {
|
||||
currentHashFunc = hashtreelib.Hash
|
||||
} else {
|
||||
currentHashFunc = gohashtree.Hash
|
||||
}
|
||||
}
|
||||
|
||||
// GetUseHashtree returns the current hashtree usage setting
|
||||
func GetUseHashtree() bool {
|
||||
return useHashtree
|
||||
}
|
||||
|
||||
func hashParallel(inputList [][32]byte, outputList [][32]byte, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
err := gohashtree.Hash(outputList, inputList)
|
||||
err := currentHashFunc(outputList, inputList)
|
||||
if err != nil {
|
||||
panic(err) // lint:nopanic -- This should never panic.
|
||||
}
|
||||
}
|
||||
|
||||
// VectorizedSha256 takes a list of roots and hashes them using CPU
|
||||
// specific vector instructions. Depending on host machine's specific
|
||||
// hardware configuration, using this routine can lead to a significant
|
||||
// performance improvement compared to the default method of hashing
|
||||
// lists.
|
||||
// specific vector instructions. Uses either gohashtree or hashtree
|
||||
// implementation based on the current configuration.
|
||||
func VectorizedSha256(inputList [][32]byte) [][32]byte {
|
||||
outputList := make([][32]byte, len(inputList)/2)
|
||||
if len(inputList) < minSliceSizeToParallelize {
|
||||
err := gohashtree.Hash(outputList, inputList)
|
||||
err := currentHashFunc(outputList, inputList)
|
||||
if err != nil {
|
||||
panic(err) // lint:nopanic -- This should never panic.
|
||||
}
|
||||
@@ -38,7 +63,7 @@ func VectorizedSha256(inputList [][32]byte) [][32]byte {
|
||||
for j := 0; j < n; j++ {
|
||||
go hashParallel(inputList[j*2*groupSize:(j+1)*2*groupSize], outputList[j*groupSize:], &wg)
|
||||
}
|
||||
err := gohashtree.Hash(outputList[n*groupSize:], inputList[n*2*groupSize:])
|
||||
err := currentHashFunc(outputList[n*groupSize:], inputList[n*2*groupSize:])
|
||||
if err != nil {
|
||||
panic(err) // lint:nopanic -- This should never panic.
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package htr
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@@ -25,3 +26,247 @@ func Test_VectorizedSha256(t *testing.T) {
|
||||
require.Equal(t, r, hash2[i])
|
||||
}
|
||||
}
|
||||
|
||||
// generateTestData creates random test data for hashing tests
|
||||
func generateTestData(size int) [][32]byte {
|
||||
data := make([][32]byte, size)
|
||||
for i := range data {
|
||||
_, err := rand.Read(data[i][:])
|
||||
if err != nil {
|
||||
panic(err) // This should never happen in tests
|
||||
}
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// Test_GohashtreeVsHashtree verifies both implementations produce identical results
|
||||
func Test_GohashtreeVsHashtree(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
size int
|
||||
}{
|
||||
{"small", 100},
|
||||
{"medium", 1000},
|
||||
{"large", 10000},
|
||||
{"very_large", 50000},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Generate test data (must be even number for hash pairs)
|
||||
input := generateTestData(tt.size * 2)
|
||||
|
||||
// Test with gohashtree (default)
|
||||
SetUseHashtree(false)
|
||||
gohashtreeResult := VectorizedSha256(input)
|
||||
|
||||
// Test with hashtree
|
||||
SetUseHashtree(true)
|
||||
hashtreeResult := VectorizedSha256(input)
|
||||
|
||||
// Reset to default
|
||||
SetUseHashtree(false)
|
||||
|
||||
// Results should be identical
|
||||
require.Equal(t, len(gohashtreeResult), len(hashtreeResult), "Result lengths should match")
|
||||
for i := range gohashtreeResult {
|
||||
require.Equal(t, gohashtreeResult[i], hashtreeResult[i], "Hash results should be identical at index %d", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test_GohashtreeImplementation tests gohashtree specifically
|
||||
func Test_GohashtreeImplementation(t *testing.T) {
|
||||
// Force gohashtree
|
||||
SetUseHashtree(false)
|
||||
defer SetUseHashtree(false) // Reset after test
|
||||
|
||||
require.Equal(t, false, GetUseHashtree(), "Should be using gohashtree")
|
||||
|
||||
// Test small input (non-parallel path)
|
||||
smallInput := generateTestData(10)
|
||||
smallResult := VectorizedSha256(smallInput)
|
||||
require.Equal(t, 5, len(smallResult), "Small input should produce correct number of hashes")
|
||||
|
||||
// Test large input (parallel path)
|
||||
largeInput := generateTestData(minSliceSizeToParallelize + 100)
|
||||
largeResult := VectorizedSha256(largeInput)
|
||||
expectedLen := (minSliceSizeToParallelize + 100) / 2
|
||||
require.Equal(t, expectedLen, len(largeResult), "Large input should produce correct number of hashes")
|
||||
}
|
||||
|
||||
// Test_HashtreeImplementation tests hashtree specifically
|
||||
func Test_HashtreeImplementation(t *testing.T) {
|
||||
// Force hashtree
|
||||
SetUseHashtree(true)
|
||||
defer SetUseHashtree(false) // Reset after test
|
||||
|
||||
require.Equal(t, true, GetUseHashtree(), "Should be using hashtree")
|
||||
|
||||
// Test small input
|
||||
smallInput := generateTestData(10)
|
||||
smallResult := VectorizedSha256(smallInput)
|
||||
require.Equal(t, 5, len(smallResult), "Small input should produce correct number of hashes")
|
||||
|
||||
// Test large input
|
||||
largeInput := generateTestData(minSliceSizeToParallelize + 100)
|
||||
largeResult := VectorizedSha256(largeInput)
|
||||
expectedLen := (minSliceSizeToParallelize + 100) / 2
|
||||
require.Equal(t, expectedLen, len(largeResult), "Large input should produce correct number of hashes")
|
||||
}
|
||||
|
||||
// Test_ThreadSafety verifies both implementations work correctly with concurrent access
|
||||
func Test_ThreadSafety(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
useHashtree bool
|
||||
}{
|
||||
{"gohashtree_concurrent", false},
|
||||
{"hashtree_concurrent", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
SetUseHashtree(tt.useHashtree)
|
||||
defer SetUseHashtree(false)
|
||||
|
||||
const numGoroutines = 10
|
||||
const inputSize = 1000
|
||||
|
||||
results := make([][][32]byte, numGoroutines)
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
// Run concurrent hashing
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
input := generateTestData(inputSize)
|
||||
results[index] = VectorizedSha256(input)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Verify all results have correct length
|
||||
expectedLen := inputSize / 2
|
||||
for i, result := range results {
|
||||
require.Equal(t, expectedLen, len(result), "Result %d should have correct length", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_GohashtreeSmall benchmarks gohashtree with small input
|
||||
func Benchmark_GohashtreeSmall(b *testing.B) {
|
||||
SetUseHashtree(false)
|
||||
input := generateTestData(100)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = VectorizedSha256(input)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_HashtreeSmall benchmarks hashtree with small input
|
||||
func Benchmark_HashtreeSmall(b *testing.B) {
|
||||
SetUseHashtree(true)
|
||||
defer SetUseHashtree(false)
|
||||
input := generateTestData(100)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = VectorizedSha256(input)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_GohashtreeMedium benchmarks gohashtree with medium input
|
||||
func Benchmark_GohashtreeMedium(b *testing.B) {
|
||||
SetUseHashtree(false)
|
||||
input := generateTestData(2000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = VectorizedSha256(input)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_HashtreeMedium benchmarks hashtree with medium input
|
||||
func Benchmark_HashtreeMedium(b *testing.B) {
|
||||
SetUseHashtree(true)
|
||||
defer SetUseHashtree(false)
|
||||
input := generateTestData(2000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = VectorizedSha256(input)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_GohashtreeLarge benchmarks gohashtree with large input (parallel path)
|
||||
func Benchmark_GohashtreeLarge(b *testing.B) {
|
||||
SetUseHashtree(false)
|
||||
input := generateTestData(minSliceSizeToParallelize + 1000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = VectorizedSha256(input)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_HashtreeLarge benchmarks hashtree with large input (parallel path)
|
||||
func Benchmark_HashtreeLarge(b *testing.B) {
|
||||
SetUseHashtree(true)
|
||||
defer SetUseHashtree(false)
|
||||
input := generateTestData(minSliceSizeToParallelize + 1000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = VectorizedSha256(input)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_GohashtreeVeryLarge benchmarks gohashtree with very large input
|
||||
func Benchmark_GohashtreeVeryLarge(b *testing.B) {
|
||||
SetUseHashtree(false)
|
||||
input := generateTestData(50000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = VectorizedSha256(input)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_HashtreeVeryLarge benchmarks hashtree with very large input
|
||||
func Benchmark_HashtreeVeryLarge(b *testing.B) {
|
||||
SetUseHashtree(true)
|
||||
defer SetUseHashtree(false)
|
||||
input := generateTestData(50000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = VectorizedSha256(input)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_Comparison runs both implementations side by side for direct comparison
|
||||
func Benchmark_Comparison(b *testing.B) {
|
||||
input := generateTestData(10000)
|
||||
|
||||
b.Run("gohashtree", func(b *testing.B) {
|
||||
SetUseHashtree(false)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = VectorizedSha256(input)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("hashtree", func(b *testing.B) {
|
||||
SetUseHashtree(true)
|
||||
defer SetUseHashtree(false)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = VectorizedSha256(input)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ type VersionedUnmarshaler struct {
|
||||
// Fork aligns with the fork names in config/params/values.go
|
||||
Fork int
|
||||
// Version corresponds to the Version type defined in the beacon-chain spec, aka a "fork version number":
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#custom-types
|
||||
Version [fieldparams.VersionLength]byte
|
||||
}
|
||||
|
||||
|
||||
1
go.mod
1
go.mod
@@ -61,6 +61,7 @@ require (
|
||||
github.com/prometheus/prom2json v1.3.0
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20241008181541-518c4ce73516
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e
|
||||
github.com/prysmaticlabs/hashtree v0.2.0
|
||||
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c
|
||||
github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294
|
||||
github.com/r3labs/sse/v2 v2.10.0
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user