Compare commits

..

1 Commits

Author SHA1 Message Date
terence tsao
3120746e59 ssz interop write: only capture invalid block 2024-03-11 08:22:57 -10:00
184 changed files with 9450 additions and 6209 deletions

View File

@@ -6,12 +6,6 @@ import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# hermetic_cc_toolchain v3.0.1 required changes.
common --enable_platform_specific_config
build:linux --sandbox_add_mount_pair=/tmp
build:macos --sandbox_add_mount_pair=/var/tmp
build:windows --sandbox_add_mount_pair=C:\Temp
# E2E run with debug gotag
test:e2e --define gotags=debug

View File

@@ -1 +1 @@
7.1.0
7.0.0

821
MODULE.bazel.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.4.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.4.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.4.0)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)

View File

@@ -16,14 +16,12 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
http_archive(
name = "hermetic_cc_toolchain",
sha256 = "3bc6ec127622fdceb4129cb06b6f7ab098c4d539124dde96a6318e7c32a53f7a",
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
urls = [
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
],
)
@@ -83,10 +81,10 @@ bazel_skylib_workspace()
http_archive(
name = "bazel_gazelle",
integrity = "sha256-MpOL2hbmcABjA1R5Bj2dJMYO2o15/Uc5Vj9Q0zHLMgk=",
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
],
)
@@ -153,13 +151,17 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
sha256 = "d6ab6b57e48c09523e93050f13698f708428cfd5e619252e369d377af6597707",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
],
)
load("//:distroless_deps.bzl", "distroless_deps")
distroless_deps()
# Override default import in rules_go with special patch until
# https://github.com/gogo/protobuf/pull/582 is merged.
git_repository(
@@ -198,14 +200,10 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.21.8",
go_version = "1.21.6",
nogo = "@//:nogo",
)
load("//:distroless_deps.bzl", "distroless_deps")
distroless_deps()
http_archive(
name = "io_kubernetes_build",
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
@@ -243,7 +241,9 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0"
consensus_spec_version = "v1.4.0-beta.7"
consensus_spec_test_version = "v1.4.0-beta.7-hotfix"
bls_test_version = "v0.1.1"
@@ -260,7 +260,7 @@ filegroup(
)
""",
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -276,7 +276,7 @@ filegroup(
)
""",
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -292,7 +292,7 @@ filegroup(
)
""",
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -306,7 +306,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
sha256 = "049c29267310e6b88280f4f834a75866c2f5b9036fa97acb9d9c6db8f64d9118",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -6,14 +6,12 @@ go_library(
"checkpoint.go",
"client.go",
"doc.go",
"health.go",
"log.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon/iface:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
@@ -39,12 +37,10 @@ go_test(
srcs = [
"checkpoint_test.go",
"client_test.go",
"health_test.go",
],
embed = [":go_default_library"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -58,6 +54,5 @@ go_test(
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -1,55 +0,0 @@
package beacon
import (
"context"
"sync"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
)
type NodeHealthTracker struct {
isHealthy *bool
healthChan chan bool
node iface.HealthNode
sync.RWMutex
}
func NewNodeHealthTracker(node iface.HealthNode) *NodeHealthTracker {
return &NodeHealthTracker{
node: node,
healthChan: make(chan bool, 1),
}
}
// HealthUpdates provides a read-only channel for health updates.
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
return n.healthChan
}
func (n *NodeHealthTracker) IsHealthy() bool {
n.RLock()
defer n.RUnlock()
if n.isHealthy == nil {
return false
}
return *n.isHealthy
}
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
n.RLock()
newStatus := n.node.IsHealthy(ctx)
if n.isHealthy == nil {
n.isHealthy = &newStatus
}
isStatusChanged := newStatus != *n.isHealthy
n.RUnlock()
if isStatusChanged {
n.Lock()
// Double-check the condition to ensure it hasn't changed since the first check.
n.isHealthy = &newStatus
n.Unlock() // It's better to unlock as soon as the protected section is over.
n.healthChan <- newStatus
}
return newStatus
}

View File

@@ -1,118 +0,0 @@
package beacon
import (
"context"
"sync"
"testing"
healthTesting "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing"
"go.uber.org/mock/gomock"
)
func TestNodeHealth_IsHealthy(t *testing.T) {
tests := []struct {
name string
isHealthy bool
want bool
}{
{"initially healthy", true, true},
{"initially unhealthy", false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
n := &NodeHealthTracker{
isHealthy: &tt.isHealthy,
healthChan: make(chan bool, 1),
}
if got := n.IsHealthy(); got != tt.want {
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
}
})
}
}
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
tests := []struct {
name string
initial bool // Initial health status
newStatus bool // Status to update to
shouldSend bool // Should a message be sent through the channel
}{
{"healthy to unhealthy", true, false, true},
{"unhealthy to healthy", false, true, true},
{"remain healthy", true, true, false},
{"remain unhealthy", false, false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := healthTesting.NewMockHealthClient(ctrl)
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
n := &NodeHealthTracker{
isHealthy: &tt.initial,
node: client,
healthChan: make(chan bool, 1),
}
s := n.CheckHealth(context.Background())
// Check if health status was updated
if s != tt.newStatus {
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
}
select {
case status := <-n.HealthUpdates():
if !tt.shouldSend {
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
} else if status != tt.newStatus {
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
}
default:
if tt.shouldSend {
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
}
}
})
}
}
func TestNodeHealth_Concurrency(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := healthTesting.NewMockHealthClient(ctrl)
n := NewNodeHealthTracker(client)
var wg sync.WaitGroup
// Number of goroutines to spawn for both reading and writing
numGoroutines := 6
go func() {
for range n.HealthUpdates() {
// Consume values to avoid blocking on channel send.
}
}()
wg.Add(numGoroutines * 2) // for readers and writers
// Concurrently update health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
client.EXPECT().IsHealthy(gomock.Any()).Return(false)
n.CheckHealth(context.Background())
client.EXPECT().IsHealthy(gomock.Any()).Return(true)
n.CheckHealth(context.Background())
}()
}
// Concurrently read health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
_ = n.IsHealthy() // Just read the value
}()
}
wg.Wait() // Wait for all goroutines to finish
}

View File

@@ -1,8 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["health.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface",
visibility = ["//visibility:public"],
)

View File

@@ -1,13 +0,0 @@
package iface
import "context"
type HealthTracker interface {
HealthUpdates() <-chan bool
IsHealthy() bool
CheckHealth(ctx context.Context) bool
}
type HealthNode interface {
IsHealthy(ctx context.Context) bool
}

View File

@@ -1,12 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["mock.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing",
visibility = ["//visibility:public"],
deps = [
"//api/client/beacon/iface:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -1,53 +0,0 @@
package testing
import (
"context"
"reflect"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
"go.uber.org/mock/gomock"
)
var (
_ = iface.HealthNode(&MockHealthClient{})
)
// MockHealthClient is a mock of HealthClient interface.
type MockHealthClient struct {
ctrl *gomock.Controller
recorder *MockHealthClientMockRecorder
}
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
type MockHealthClientMockRecorder struct {
mock *MockHealthClient
}
// IsHealthy mocks base method.
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsHealthy", arg0)
ret0, ok := ret[0].(bool)
if !ok {
return false
}
return ret0
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
return m.recorder
}
// IsHealthy indicates an expected call of IsHealthy.
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
}
// NewMockHealthClient creates a new mock instance.
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
mock := &MockHealthClient{ctrl: ctrl}
mock.recorder = &MockHealthClientMockRecorder{mock}
return mock
}

View File

@@ -304,8 +304,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
@@ -343,8 +341,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
@@ -383,8 +379,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {

View File

@@ -321,8 +321,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
@@ -349,8 +347,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
@@ -380,8 +376,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
var req structs.SignedBlindedBeaconBlockDeneb
err := json.NewDecoder(r.Body).Decode(&req)
require.NoError(t, err)

View File

@@ -21,9 +21,6 @@ var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")
// ErrConnectionIssue represents a connection problem.
var ErrConnectionIssue = errors.New("could not connect")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)

View File

@@ -1,24 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["event_stream.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/event",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/client:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["event_stream_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,148 +0,0 @@
package event
import (
"bufio"
"context"
"net/http"
"net/url"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/client"
log "github.com/sirupsen/logrus"
)
const (
EventHead = "head"
EventBlock = "block"
EventAttestation = "attestation"
EventVoluntaryExit = "voluntary_exit"
EventBlsToExecutionChange = "bls_to_execution_change"
EventProposerSlashing = "proposer_slashing"
EventAttesterSlashing = "attester_slashing"
EventFinalizedCheckpoint = "finalized_checkpoint"
EventChainReorg = "chain_reorg"
EventContributionAndProof = "contribution_and_proof"
EventLightClientFinalityUpdate = "light_client_finality_update"
EventLightClientOptimisticUpdate = "light_client_optimistic_update"
EventPayloadAttributes = "payload_attributes"
EventBlobSidecar = "blob_sidecar"
EventError = "error"
EventConnectionError = "connection_error"
)
var (
_ = EventStreamClient(&EventStream{})
)
var DefaultEventTopics = []string{EventHead}
type EventStreamClient interface {
Subscribe(eventsChannel chan<- *Event)
}
type Event struct {
EventType string
Data []byte
}
// EventStream is responsible for subscribing to the Beacon API events endpoint
// and dispatching received events to subscribers.
type EventStream struct {
ctx context.Context
httpClient *http.Client
host string
topics []string
}
func NewEventStream(ctx context.Context, httpClient *http.Client, host string, topics []string) (*EventStream, error) {
// Check if the host is a valid URL
_, err := url.ParseRequestURI(host)
if err != nil {
return nil, err
}
if len(topics) == 0 {
return nil, errors.New("no topics provided")
}
return &EventStream{
ctx: ctx,
httpClient: httpClient,
host: host,
topics: topics,
}, nil
}
func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
allTopics := strings.Join(h.topics, ",")
log.WithField("topics", allTopics).Info("Listening to Beacon API events")
fullUrl := h.host + "/eth/v1/events?topics=" + allTopics
req, err := http.NewRequestWithContext(h.ctx, http.MethodGet, fullUrl, nil)
if err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
}
}
req.Header.Set("Accept", api.EventStreamMediaType)
req.Header.Set("Connection", api.KeepAlive)
resp, err := h.httpClient.Do(req)
if err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
}
}
defer func() {
if closeErr := resp.Body.Close(); closeErr != nil {
log.WithError(closeErr).Error("Failed to close events response body")
}
}()
// Create a new scanner to read lines from the response body
scanner := bufio.NewScanner(resp.Body)
var eventType, data string // Variables to store event type and data
// Iterate over lines of the event stream
for scanner.Scan() {
select {
case <-h.ctx.Done():
log.Info("Context canceled, stopping event stream")
close(eventsChannel)
return
default:
line := scanner.Text() // TODO(13730): scanner does not handle /r and does not fully adhere to https://html.spec.whatwg.org/multipage/server-sent-events.html#the-eventsource-interface
// Handle the event based on your specific format
if line == "" {
// Empty line indicates the end of an event
if eventType != "" && data != "" {
// Process the event when both eventType and data are set
eventsChannel <- &Event{EventType: eventType, Data: []byte(data)}
}
// Reset eventType and data for the next event
eventType, data = "", ""
continue
}
et, ok := strings.CutPrefix(line, "event: ")
if ok {
// Extract event type from the "event" field
eventType = et
}
d, ok := strings.CutPrefix(line, "data: ")
if ok {
// Extract data from the "data" field
data = d
}
}
}
if err := scanner.Err(); err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, errors.Wrap(client.ErrConnectionIssue, "scanner failed").Error()).Error()),
}
}
}

View File

@@ -1,80 +0,0 @@
package event
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v5/testing/require"
log "github.com/sirupsen/logrus"
)
func TestNewEventStream(t *testing.T) {
validURL := "http://localhost:8080"
invalidURL := "://invalid"
topics := []string{"topic1", "topic2"}
tests := []struct {
name string
host string
topics []string
wantErr bool
}{
{"Valid input", validURL, topics, false},
{"Invalid URL", invalidURL, topics, true},
{"No topics", validURL, []string{}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics)
if (err != nil) != tt.wantErr {
t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestEventStream(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
require.Equal(t, true, ok)
for i := 1; i <= 2; i++ {
_, err := fmt.Fprintf(w, "event: head\ndata: data%d\n\n", i)
require.NoError(t, err)
flusher.Flush() // Trigger flush to simulate streaming data
time.Sleep(100 * time.Millisecond) // Simulate delay between events
}
})
server := httptest.NewServer(mux)
defer server.Close()
topics := []string{"head"}
eventsChannel := make(chan *Event, 1)
stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics)
require.NoError(t, err)
go stream.Subscribe(eventsChannel)
// Collect events
var events []*Event
for len(events) != 2 {
select {
case event := <-eventsChannel:
log.Info(event)
events = append(events, event)
}
}
// Assertions to verify the events content
expectedData := []string{"data1", "data2"}
for i, event := range events {
if string(event.Data) != expectedData[i] {
t.Errorf("Expected event data %q, got %q", expectedData[i], string(event.Data))
}
}
}

View File

@@ -563,9 +563,3 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
func (s *Service) inRegularSync() bool {
return s.cfg.SyncChecker.Synced()
}
// validating returns true if the beacon is tracking some validators that have
// registered for proposing.
func (s *Service) validating() bool {
return s.cfg.TrackedValidatorsCache.Validating()
}

View File

@@ -82,20 +82,19 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
"deposits": len(block.Body().Deposits()),
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"deposits": len(block.Body().Deposits()),
}
log.WithFields(lf).Debug("Synced new block")
} else {

View File

@@ -18,63 +18,17 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch < headEpoch {
return nil
}
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err != nil {
return nil
}
if slots.ToEpoch(targetSlot)+1 < headEpoch {
return nil
}
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return nil
}
return st
}
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil
}
// Try if we have already set the checkpoint cache
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
lock := async.NewMultilock(string(c.Root) + epochKey)
lock.Lock()
defer lock.Unlock()
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
if err != nil {
return nil
}
if cachedState != nil && !cachedState.IsNil() {
return cachedState
}
st, err := s.HeadState(ctx)
if err != nil {
return nil
}
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, c.Root, slot)
if err != nil {
return nil
}
if err := s.checkpointStateCache.AddCheckpointState(c, st); err != nil {
return nil
}
return st
}
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
if st := s.getRecentPreState(ctx, c); st != nil {
return st, nil
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return s.HeadStateReadOnly(ctx)
}
}
}
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.

View File

@@ -146,28 +146,6 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
func TestService_GetRecentPreState(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 31,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -559,20 +558,6 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
// The gossip handler for blobs writes the index of each verified blob referencing the given
// root to the channel returned by blobNotifiers.forRoot.
nc := s.blobNotifiers.forRoot(root)
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
nst := time.AfterFunc(time.Until(nextSlot), func() {
if len(missing) == 0 {
return
}
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
Error("Still waiting for DA check at slot end.")
})
defer nst.Stop()
}
for {
select {
case idx := <-nc:
@@ -591,15 +576,6 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
}
}
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,
"root": root,
"blobsExpected": expected,
"blobsWaiting": missing,
}
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot

View File

@@ -95,9 +95,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
s.UpdateHead(s.ctx, slotInterval.Slot+1)
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {

View File

@@ -109,6 +109,10 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
if c.disabled {
return nil
}
c.lock.Lock()
defer c.lock.Unlock()
@@ -122,6 +126,10 @@ func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
if c.disabled {
return
}
c.lock.Lock()
defer c.lock.Unlock()

View File

@@ -2,7 +2,6 @@ package cache_test
import (
"context"
"sync"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
@@ -36,28 +35,3 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
require.NoError(t, err)
assert.DeepEqual(t, res.ToProto(), s.ToProto(), "Expected equal protos to return from cache")
}
func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
ctx := context.Background()
c := cache.NewSkipSlotCache()
r := [32]byte{'a'}
c.Disable()
require.NoError(t, c.MarkInProgress(r))
c.Enable()
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
// Get call will only terminate when
// it is not longer in progress.
obj, err := c.Get(ctx, r)
require.NoError(t, err)
require.IsNil(t, obj)
wg.Done()
}()
c.MarkNotInProgress(r)
wg.Wait()
}

View File

@@ -41,9 +41,3 @@ func (t *TrackedValidatorsCache) Prune() {
defer t.Unlock()
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
}
func (t *TrackedValidatorsCache) Validating() bool {
t.Lock()
defer t.Unlock()
return len(t.trackedValidators) > 0
}

View File

@@ -14,7 +14,6 @@ go_library(
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
"//tools:__subpackages__",
],
deps = [
"//beacon-chain/core/helpers:go_default_library",

View File

@@ -24,7 +24,6 @@ go_library(
"//beacon-chain/core/execution:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition/interop:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",

View File

@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
b "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition/interop"
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
@@ -58,9 +57,6 @@ func ExecuteStateTransitionNoVerifyAnySig(
defer span.End()
var err error
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
interop.WriteStateToDisk(st)
parentRoot := signed.Block().ParentRoot()
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
if err != nil {

View File

@@ -201,19 +201,20 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
return err
}
encs[i-1] = penc
}
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
lastIdx := len(blocks) - 1
fbrs[lastIdx].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[lastIdx])
if err != nil {
tracing.AnnotateError(span, err)
return err
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
if i == len(blocks)-1 {
fbrs[i].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[i])
if err != nil {
tracing.AnnotateError(span, err)
return err
}
encs[i] = enc
}
}
encs[lastIdx] = enc
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)

View File

@@ -237,50 +237,6 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte)
return ifaceBlocks
}
func TestStore_BackfillFinalizedIndexSingle(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
// we're making 4 blocks so we can test an element without a valid child at the end
blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 4, [32]byte{}))
require.NoError(t, err)
// existing is the child that we'll set up in the index by hand to seed the index.
existing := blks[3]
// toUpdate is a single item update, emulating a backfill batch size of 1. it is the parent of `existing`.
toUpdate := blks[2]
// set up existing finalized block
ebpr := existing.Block().ParentRoot()
ebr := existing.Root()
ebf := &ethpb.FinalizedBlockRootContainer{
ParentRoot: ebpr[:],
ChildRoot: make([]byte, 32), // we're bypassing validation to seed the db, so we don't need a valid child.
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
// writing this to the index outside of the validating function to seed the test.
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
require.NoError(t, err)
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{toUpdate}, ebr))
// make sure that we still correctly validate descendents in the single item case.
noChild := blks[0] // will fail to update because we don't have blks[1] in the db.
// test wrong child param
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, ebr), errNotConnectedToFinalized)
// test parent of child that isn't finalized
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, blks[1].Root()), errFinalizedChildNotFound)
// now make it work by writing the missing block
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[1]}, blks[2].Root()))
// since blks[1] is now in the index, we should be able to update blks[0]
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[0]}, blks[1].Root()))
}
func TestStore_BackfillFinalizedIndex(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
@@ -296,23 +252,23 @@ func TestStore_BackfillFinalizedIndex(t *testing.T) {
ParentRoot: ebpr[:],
ChildRoot: chldr[:],
}
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
require.NoError(t, err)
// reslice to remove the existing blocks
blks = blks[0:64]
// check the other error conditions with a descendent root that really doesn't exist
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
require.NoError(t, err)
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, disjoint, [32]byte{}), errIncorrectBlockParent)
require.NoError(t, err)
require.ErrorIs(t, errFinalizedChildNotFound, db.BackfillFinalizedIndex(ctx, blks, [32]byte{}))
// use the real root so that it succeeds

View File

@@ -7,11 +7,9 @@ import (
"bytes"
"context"
"fmt"
"net"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
@@ -129,16 +127,51 @@ type BeaconNode struct {
// New creates a new node instance, sets up configuration options, and registers
// every required service to the node.
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
if err := configureBeacon(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not set beacon configuration options")
if err := configureTracing(cliCtx); err != nil {
return nil, err
}
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
return nil, fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
}
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
return nil, err
}
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
return nil, err
}
flags.ConfigureGlobalFlags(cliCtx)
if err := configureChainConfig(cliCtx); err != nil {
return nil, err
}
if err := configureHistoricalSlasher(cliCtx); err != nil {
return nil, err
}
err := configureBuilderCircuitBreaker(cliCtx)
if err != nil {
return nil, err
}
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
return nil, err
}
if err := configureEth1Config(cliCtx); err != nil {
return nil, err
}
configureNetwork(cliCtx)
if err := configureInteropConfig(cliCtx); err != nil {
return nil, err
}
if err := configureExecutionSetting(cliCtx); err != nil {
return nil, err
}
configureFastSSZHashingAlgorithm()
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()
registry := runtime.NewServiceRegistry()
ctx := cliCtx.Context
ctx := cliCtx.Context
beacon := &BeaconNode{
cliCtx: cliCtx,
ctx: ctx,
@@ -158,10 +191,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
slasherBlockHeadersFeed: new(event.Feed),
slasherAttestationsFeed: new(event.Feed),
serviceFlagOpts: &serviceFlagOpts{},
initialSyncComplete: make(chan struct{}),
syncChecker: &initialsync.SyncChecker{},
}
beacon.initialSyncComplete = make(chan struct{})
beacon.syncChecker = &initialsync.SyncChecker{}
for _, opt := range opts {
if err := opt(beacon); err != nil {
return nil, err
@@ -170,8 +203,8 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
synchronizer := startup.NewClockSynchronizer()
beacon.clockWaiter = synchronizer
beacon.forkChoicer = doublylinkedtree.New()
beacon.forkChoicer = doublylinkedtree.New()
depositAddress, err := execution.DepositContractAddress()
if err != nil {
return nil, err
@@ -187,29 +220,112 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.BlobStorage = blobs
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, err
}
beacon.BlobStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
return nil, errors.Wrap(err, "backfill status initialization error")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
if errors.Is(err, stategen.ErrNoGenesisBlock) {
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
"state bundled in the application. You must provide the --%s or --%s flag to load "+
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
}
return nil, err
}
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
beacon.BackfillOpts = append(
beacon.BackfillOpts,
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
)
beacon.BackfillOpts = append(beacon.BackfillOpts, backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)))
bf, err := backfill.NewService(ctx, bfs, beacon.BlobStorage, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
if err != nil {
return nil, errors.Wrap(err, "error initializing backfill service")
}
if err := beacon.services.RegisterService(bf); err != nil {
return nil, errors.Wrap(err, "error registering backfill service")
}
if err := registerServices(cliCtx, beacon, synchronizer, bf, bfs); err != nil {
return nil, errors.Wrap(err, "could not register services")
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return nil, err
}
log.Debugln("Registering Attestation Pool Service")
if err := beacon.registerAttestationPool(); err != nil {
return nil, err
}
log.Debugln("Registering Deterministic Genesis Service")
if err := beacon.registerDeterministicGenesisService(); err != nil {
return nil, err
}
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
return nil, err
}
log.Debugln("Registering Initial Sync Service")
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
return nil, err
}
log.Debugln("Registering Sync Service")
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
return nil, err
}
log.Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return nil, err
}
log.Debugln("Registering builder service")
if err := beacon.registerBuilderService(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering RPC Service")
router := newRouter(cliCtx)
if err := beacon.registerRPCService(router); err != nil {
return nil, err
}
log.Debugln("Registering GRPC Gateway Service")
if err := beacon.registerGRPCGateway(router); err != nil {
return nil, err
}
log.Debugln("Registering Validator Monitoring Service")
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
return nil, err
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
log.Debugln("Registering Prometheus Service")
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return nil, err
}
}
// db.DatabasePath is the path to the containing directory
@@ -227,170 +343,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
return beacon, nil
}
func configureBeacon(cliCtx *cli.Context) error {
if err := configureTracing(cliCtx); err != nil {
return errors.Wrap(err, "could not configure tracing")
}
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
return fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
}
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
return errors.Wrap(err, "could not configure beacon chain")
}
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
return errors.Wrap(err, "could not configure beacon chain")
}
flags.ConfigureGlobalFlags(cliCtx)
if err := configureChainConfig(cliCtx); err != nil {
return errors.Wrap(err, "could not configure chain config")
}
if err := configureHistoricalSlasher(cliCtx); err != nil {
return errors.Wrap(err, "could not configure historical slasher")
}
if err := configureBuilderCircuitBreaker(cliCtx); err != nil {
return errors.Wrap(err, "could not configure builder circuit breaker")
}
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
return errors.Wrap(err, "could not configure slots per archived point")
}
if err := configureEth1Config(cliCtx); err != nil {
return errors.Wrap(err, "could not configure eth1 config")
}
configureNetwork(cliCtx)
if err := configureInteropConfig(cliCtx); err != nil {
return errors.Wrap(err, "could not configure interop config")
}
if err := configureExecutionSetting(cliCtx); err != nil {
return errors.Wrap(err, "could not configure execution setting")
}
configureFastSSZHashingAlgorithm()
return nil
}
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
ctx := cliCtx.Context
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, errors.Wrap(err, "could not start DB")
}
beacon.BlobStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not start slashing DB")
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not register P2P service")
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "could not create backfill updater")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
if errors.Is(err, stategen.ErrNoGenesisBlock) {
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
"state bundled in the application. You must provide the --%s or --%s flag to load "+
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
}
return nil, errors.Wrap(err, "could not start state generation")
}
return bfs, nil
}
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bf *backfill.Service, bfs *backfill.Store) error {
if err := beacon.services.RegisterService(bf); err != nil {
return errors.Wrap(err, "could not register backfill service")
}
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return errors.Wrap(err, "could not register POW chain service")
}
log.Debugln("Registering Attestation Pool Service")
if err := beacon.registerAttestationPool(); err != nil {
return errors.Wrap(err, "could not register attestation pool service")
}
log.Debugln("Registering Deterministic Genesis Service")
if err := beacon.registerDeterministicGenesisService(); err != nil {
return errors.Wrap(err, "could not register deterministic genesis service")
}
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register blockchain service")
}
log.Debugln("Registering Initial Sync Service")
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register initial sync service")
}
log.Debugln("Registering Sync Service")
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
return errors.Wrap(err, "could not register sync service")
}
log.Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return errors.Wrap(err, "could not register slasher service")
}
log.Debugln("Registering builder service")
if err := beacon.registerBuilderService(cliCtx); err != nil {
return errors.Wrap(err, "could not register builder service")
}
log.Debugln("Registering RPC Service")
router := newRouter(cliCtx)
if err := beacon.registerRPCService(router); err != nil {
return errors.Wrap(err, "could not register RPC service")
}
log.Debugln("Registering GRPC Gateway Service")
if err := beacon.registerGRPCGateway(router); err != nil {
return errors.Wrap(err, "could not register GRPC gateway service")
}
log.Debugln("Registering Validator Monitoring Service")
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register validator monitoring service")
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
log.Debugln("Registering Prometheus Service")
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return errors.Wrap(err, "could not register prometheus service")
}
}
return nil
}
func initSyncWaiter(ctx context.Context, complete chan struct{}) func() error {
return func() error {
select {
@@ -479,86 +431,40 @@ func (b *BeaconNode) Close() {
close(b.stop)
}
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
var err error
clearDBConfirmed := false
if clearDB && !forceClearDB {
const (
actionText = "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText = "Database will not be deleted. No changes have been made."
)
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return nil, errors.Wrapf(err, "could not confirm action")
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return nil, errors.Wrap(err, "could not clear blob storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return nil, errors.Wrap(err, "could not create new database")
}
}
return d, nil
}
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
return errors.Wrap(err, "could not get deposit contract address")
}
addr := common.HexToAddress(depositAddress)
if len(knownContract) == 0 {
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
return errors.Wrap(err, "could not save deposit contract")
}
}
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
"you are trying to run on a different network than what the database contains. You can run once with "+
"--%s to wipe the old database or use an alternative data directory with --%s",
knownContract, addr.Bytes(), cmd.ClearDB.Name, cmd.DataDirFlag.Name)
}
return nil
}
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
var depositCache cache.DepositCache
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrapf(err, "could not create database at %s", dbPath)
return err
}
if clearDBRequired || forceClearDBRequired {
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
clearDBConfirmed := false
if clearDB && !forceClearDB {
actionText := "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText := "Database will not be deleted. No changes have been made."
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return err
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return errors.Wrap(err, "could not clear blob storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrap(err, "could not create new database")
}
}
if err := d.RunMigrations(b.ctx); err != nil {
@@ -567,6 +473,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
b.db = d
var depositCache cache.DepositCache
if features.Get().EnableEIP4881 {
depositCache, err = depositsnapshot.New()
} else {
@@ -581,17 +488,16 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
if b.GenesisInitializer != nil {
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
if err == db.ErrExistingGenesisState {
return errors.Errorf("Genesis state flag specified but a genesis state "+
"exists already. Run again with --%s and/or ensure you are using the "+
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
return errors.New("Genesis state flag specified but a genesis state " +
"exists already. Run again with --clear-db and/or ensure you are using the " +
"appropriate testnet flag to load the given genesis state.")
}
return errors.Wrap(err, "could not load genesis from file")
}
}
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
return errors.Wrap(err, "could not ensure embedded genesis")
return err
}
if b.CheckpointInitializer != nil {
@@ -600,11 +506,23 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
}
}
if err := b.checkAndSaveDepositContract(depositAddress); err != nil {
return errors.Wrap(err, "could not check and save deposit contract")
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
return err
}
log.WithField("address", depositAddress).Info("Deposit contract")
addr := common.HexToAddress(depositAddress)
if len(knownContract) == 0 {
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
return errors.Wrap(err, "could not save deposit contract")
}
}
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
"you are trying to run on a different network than what the database contains. You can run once with "+
"'--clear-db' to wipe the old database or use an alternative data directory with '--datadir'",
knownContract, addr.Bytes())
}
log.Infof("Deposit contract: %#x", addr.Bytes())
return nil
}
@@ -692,31 +610,31 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBl
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
bootstrapNodeAddrs, dataDir, err := registration.P2PPreregistration(cliCtx)
if err != nil {
return errors.Wrapf(err, "could not register p2p service")
return err
}
svc, err := p2p.NewService(b.ctx, &p2p.Config{
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
DataDir: dataDir,
LocalIP: cliCtx.String(cmd.P2PIP.Name),
HostAddress: cliCtx.String(cmd.P2PHost.Name),
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
StateNotifier: b,
DB: b.db,
ClockWaiter: b.clockWaiter,
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
BootstrapNodeAddr: bootstrapNodeAddrs,
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
DataDir: dataDir,
LocalIP: cliCtx.String(cmd.P2PIP.Name),
HostAddress: cliCtx.String(cmd.P2PHost.Name),
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
StateNotifier: b,
DB: b.db,
ClockWaiter: b.clockWaiter,
})
if err != nil {
return err
@@ -1058,13 +976,11 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
if b.cliCtx.Bool(flags.DisableGRPCGateway.Name) {
return nil
}
gatewayHost := b.cliCtx.String(flags.GRPCGatewayHost.Name)
gatewayPort := b.cliCtx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := b.cliCtx.String(flags.GRPCGatewayHost.Name)
rpcHost := b.cliCtx.String(flags.RPCHost.Name)
rpcPort := b.cliCtx.Int(flags.RPCPort.Name)
selfAddress := net.JoinHostPort(rpcHost, strconv.Itoa(rpcPort))
gatewayAddress := net.JoinHostPort(gatewayHost, strconv.Itoa(gatewayPort))
selfAddress := fmt.Sprintf("%s:%d", rpcHost, b.cliCtx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
allowedOrigins := strings.Split(b.cliCtx.String(flags.GPRCGatewayCorsDomain.Name), ",")
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
selfCert := b.cliCtx.String(flags.CertFlag.Name)
@@ -1158,9 +1074,9 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
return err
}
opts := b.serviceFlagOpts.builderOpts
opts = append(opts, builder.WithHeadFetcher(chainService), builder.WithDatabase(b.db))
opts := append(b.serviceFlagOpts.builderOpts,
builder.WithHeadFetcher(chainService),
builder.WithDatabase(b.db))
// make cache the default.
if !cliCtx.Bool(features.DisableRegistrationCache.Name) {
opts = append(opts, builder.WithRegistrationCache())

View File

@@ -11,7 +11,6 @@ go_library(
deps = [
"//cmd:go_default_library",
"//config/params:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@in_gopkg_yaml_v2//:go_default_library",

View File

@@ -4,7 +4,6 @@ import (
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/urfave/cli/v2"
@@ -32,9 +31,9 @@ func P2PPreregistration(cliCtx *cli.Context) (bootstrapNodeAddrs []string, dataD
if dataDir == "" {
dataDir = cmd.DefaultDataDir()
if dataDir == "" {
err = errors.Errorf(
"Could not determine your system's HOME path, please specify a --%s you wish to use for your chain data",
cmd.DataDirFlag.Name,
log.Fatal(
"Could not determine your system's HOME path, please specify a --datadir you wish " +
"to use for your chain data",
)
}
}

View File

@@ -49,7 +49,6 @@ go_test(
"//beacon-chain/operations/attestations/kv:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",

View File

@@ -6,7 +6,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// pruneAttsPool prunes attestations pool on every slot interval.
@@ -67,18 +66,7 @@ func (s *Service) pruneExpiredAtts() {
// Return true if the input slot has been expired.
// Expired is defined as one epoch behind than current time.
func (s *Service) expired(providedSlot primitives.Slot) bool {
providedEpoch := slots.ToEpoch(providedSlot)
currSlot := slots.CurrentSlot(s.genesisTime)
currEpoch := slots.ToEpoch(currSlot)
if currEpoch < params.BeaconConfig().DenebForkEpoch {
return s.expiredPreDeneb(providedSlot)
}
return providedEpoch+1 < currEpoch
}
// Handles expiration of attestations before deneb.
func (s *Service) expiredPreDeneb(slot primitives.Slot) bool {
func (s *Service) expired(slot primitives.Slot) bool {
expirationSlot := slot + params.BeaconConfig().SlotsPerEpoch
expirationTime := s.genesisTime + uint64(expirationSlot.Mul(params.BeaconConfig().SecondsPerSlot))
currentTime := uint64(prysmTime.Now().Unix())

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/async"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -128,22 +127,3 @@ func TestPruneExpired_Expired(t *testing.T) {
assert.Equal(t, true, s.expired(0), "Should be expired")
assert.Equal(t, false, s.expired(1), "Should not be expired")
}
func TestPruneExpired_ExpiredDeneb(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.DenebForkEpoch = 3
params.OverrideBeaconConfig(cfg)
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
require.NoError(t, err)
// Rewind back 4 epochs + 10 slots worth of time.
s.genesisTime = uint64(prysmTime.Now().Unix()) - (4*uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) + 10)
secondEpochStart := primitives.Slot(2 * uint64(params.BeaconConfig().SlotsPerEpoch))
thirdEpochStart := primitives.Slot(3 * uint64(params.BeaconConfig().SlotsPerEpoch))
assert.Equal(t, true, s.expired(secondEpochStart), "Should be expired")
assert.Equal(t, false, s.expired(thirdEpochStart), "Should not be expired")
}

View File

@@ -94,7 +94,6 @@ go_library(
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_libp2p_go_mplex//:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -15,7 +15,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/protobuf/proto"
)
@@ -69,7 +68,7 @@ func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *
}
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
go s.internalBroadcastAttestation(ctx, subnet, att, forkDigest)
go s.broadcastAttestation(ctx, subnet, att, forkDigest)
return nil
}
@@ -95,8 +94,8 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
return nil
}
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastAttestation")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -138,10 +137,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
// acceptable threshold, we exit early and do not broadcast it.
currSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
log.WithFields(logrus.Fields{
"attestationSlot": att.Data.Slot,
"currentSlot": currSlot,
}).Warning("Attestation is too old to broadcast, discarding it")
log.Warnf("Attestation is too old to broadcast, discarding it. Current Slot: %d , Attestation Slot: %d", currSlot, att.Data.Slot)
return
}
@@ -222,13 +218,13 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
}
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
go s.internalBroadcastBlob(ctx, subnet, blob, forkDigest)
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
return nil
}
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.

View File

@@ -240,8 +240,9 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
var hosts []host.Host
// setup other nodes.
cfg = &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
MaxPeers: 30,
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
MaxPeers: 30,
}
// Setup 2 different hosts
for i := 1; i <= 2; i++ {

View File

@@ -12,27 +12,28 @@ const defaultPubsubQueueSize = 600
// Config for the p2p service. These parameters are set from application level flags
// to initialize the p2p service.
type Config struct {
NoDiscovery bool
EnableUPnP bool
StaticPeerID bool
StaticPeers []string
Discv5BootStrapAddrs []string
RelayNodeAddr string
LocalIP string
HostAddress string
HostDNS string
PrivateKey string
DataDir string
MetaDataDir string
TCPPort uint
UDPPort uint
MaxPeers uint
QueueSize uint
AllowListCIDR string
DenyListCIDR []string
StateNotifier statefeed.Notifier
DB db.ReadOnlyDatabase
ClockWaiter startup.ClockWaiter
NoDiscovery bool
EnableUPnP bool
StaticPeerID bool
StaticPeers []string
BootstrapNodeAddr []string
Discv5BootStrapAddr []string
RelayNodeAddr string
LocalIP string
HostAddress string
HostDNS string
PrivateKey string
DataDir string
MetaDataDir string
TCPPort uint
UDPPort uint
MaxPeers uint
QueueSize uint
AllowListCIDR string
DenyListCIDR []string
StateNotifier statefeed.Notifier
DB db.ReadOnlyDatabase
ClockWaiter startup.ClockWaiter
}
// validateConfig validates whether the values provided are accurate and will set

View File

@@ -25,7 +25,7 @@ const (
)
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
func (_ *Service) InterceptPeerDial(_ peer.ID) (allow bool) {
return true
}
@@ -63,12 +63,12 @@ func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
// InterceptSecured tests whether a given connection, now authenticated,
// is allowed.
func (*Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
func (_ *Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
return true
}
// InterceptUpgraded tests whether a fully capable connection is allowed.
func (*Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
func (_ *Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
return true, 0
}

View File

@@ -34,11 +34,6 @@ type Listener interface {
LocalNode() *enode.LocalNode
}
const (
udp4 = iota
udp6
)
// RefreshENR uses an epoch to refresh the enr entry for our node
// with the tracked committee ids for the epoch, allowing our node
// to be dynamically discoverable by others given our tracked committee ids.
@@ -67,14 +62,8 @@ func (s *Service) RefreshENR() {
// Compare current epoch with our fork epochs
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
switch {
case currEpoch < altairForkEpoch:
// Phase 0 behaviour.
if bytes.Equal(bitV, currentBitV) {
// return early if bitfield hasn't changed
return
}
s.updateSubnetRecordWithMetadata(bitV)
default:
// Altair Behaviour
case currEpoch >= altairForkEpoch:
// Retrieve sync subnets from application level
// cache.
bitS := bitfield.Bitvector4{byte(0x00)}
@@ -93,6 +82,13 @@ func (s *Service) RefreshENR() {
return
}
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
default:
// Phase 0 behaviour.
if bytes.Equal(bitV, currentBitV) {
// return early if bitfield hasn't changed
return
}
s.updateSubnetRecordWithMetadata(bitV)
}
// ping all peers to inform them of new metadata
s.pingPeers()
@@ -144,9 +140,9 @@ func (s *Service) createListener(
// by default we will listen to all interfaces.
var bindIP net.IP
switch udpVersionFromIP(ipAddr) {
case udp4:
case "udp4":
bindIP = net.IPv4zero
case udp6:
case "udp6":
bindIP = net.IPv6zero
default:
return nil, errors.New("invalid ip provided")
@@ -164,7 +160,6 @@ func (s *Service) createListener(
IP: bindIP,
Port: int(s.cfg.UDPPort),
}
// Listen to all network interfaces
// for both ip protocols.
networkVersion := "udp"
@@ -182,27 +177,44 @@ func (s *Service) createListener(
if err != nil {
return nil, errors.Wrap(err, "could not create local node")
}
bootNodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
for _, addr := range s.cfg.Discv5BootStrapAddrs {
if s.cfg.HostAddress != "" {
hostIP := net.ParseIP(s.cfg.HostAddress)
if hostIP.To4() == nil && hostIP.To16() == nil {
log.Errorf("Invalid host address given: %s", hostIP.String())
} else {
localNode.SetFallbackIP(hostIP)
localNode.SetStaticIP(hostIP)
}
}
if s.cfg.HostDNS != "" {
host := s.cfg.HostDNS
ips, err := net.LookupIP(host)
if err != nil {
return nil, errors.Wrap(err, "could not resolve host address")
}
if len(ips) > 0 {
// Use first IP returned from the
// resolver.
firstIP := ips[0]
localNode.SetFallbackIP(firstIP)
}
}
dv5Cfg := discover.Config{
PrivateKey: privKey,
}
dv5Cfg.Bootnodes = []*enode.Node{}
for _, addr := range s.cfg.Discv5BootStrapAddr {
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
if err != nil {
return nil, errors.Wrap(err, "could not bootstrap addr")
}
bootNodes = append(bootNodes, bootNode)
}
dv5Cfg := discover.Config{
PrivateKey: privKey,
Bootnodes: bootNodes,
dv5Cfg.Bootnodes = append(dv5Cfg.Bootnodes, bootNode)
}
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
if err != nil {
return nil, errors.Wrap(err, "could not listen to discV5")
}
return listener, nil
}
@@ -230,35 +242,8 @@ func (s *Service) createLocalNode(
if err != nil {
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
}
localNode = initializeAttSubnets(localNode)
localNode = initializeSyncCommSubnets(localNode)
if s.cfg != nil && s.cfg.HostAddress != "" {
hostIP := net.ParseIP(s.cfg.HostAddress)
if hostIP.To4() == nil && hostIP.To16() == nil {
return nil, errors.Errorf("invalid host address: %s", s.cfg.HostAddress)
} else {
localNode.SetFallbackIP(hostIP)
localNode.SetStaticIP(hostIP)
}
}
if s.cfg != nil && s.cfg.HostDNS != "" {
host := s.cfg.HostDNS
ips, err := net.LookupIP(host)
if err != nil {
return nil, errors.Wrapf(err, "could not resolve host address: %s", host)
}
if len(ips) > 0 {
// Use first IP returned from the
// resolver.
firstIP := ips[0]
localNode.SetFallbackIP(firstIP)
}
}
return localNode, nil
return initializeSyncCommSubnets(localNode), nil
}
func (s *Service) startDiscoveryV5(
@@ -277,69 +262,58 @@ func (s *Service) startDiscoveryV5(
// filterPeer validates each node that we retrieve from our dht. We
// try to ascertain that the peer can be a valid protocol peer.
// Validity Conditions:
// 1. Peer has a valid IP and TCP port set in their enr.
// 2. Peer hasn't been marked as 'bad'.
// 3. Peer is not currently active or connected.
// 4. Peer is ready to receive incoming connections.
// 5. Peer's fork digest in their ENR matches that of
// 1. The local node is still actively looking for peers to
// connect to.
// 2. Peer has a valid IP and TCP port set in their enr.
// 3. Peer hasn't been marked as 'bad'
// 4. Peer is not currently active or connected.
// 5. Peer is ready to receive incoming connections.
// 6. Peer's fork digest in their ENR matches that of
// our localnodes.
func (s *Service) filterPeer(node *enode.Node) bool {
// Ignore nil node entries passed in.
if node == nil {
return false
}
// Ignore nodes with no IP address stored.
// ignore nodes with no ip address stored.
if node.IP() == nil {
return false
}
// Ignore nodes with their TCP ports not set.
// do not dial nodes with their tcp ports not set
if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil {
if !enr.IsNotFound(err) {
log.WithError(err).Debug("Could not retrieve tcp port")
}
return false
}
peerData, multiAddr, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Debug("Could not convert to peer data")
return false
}
// Ignore bad nodes.
if s.peers.IsBad(peerData.ID) {
return false
}
// Ignore nodes that are already active.
if s.peers.IsActive(peerData.ID) {
return false
}
// Ignore nodes that are already connected.
if s.host.Network().Connectedness(peerData.ID) == network.Connected {
return false
}
// Ignore nodes that are not ready to receive incoming connections.
if !s.peers.IsReadyToDial(peerData.ID) {
return false
}
// Ignore nodes that don't match our fork digest.
nodeENR := node.Record()
// Decide whether or not to connect to peer that does not
// match the proper fork ENR data with our local node.
if s.genesisValidatorsRoot != nil {
if err := s.compareForkENR(nodeENR); err != nil {
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
return false
}
}
// Add peer to peer handler.
s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown)
return true
}
@@ -389,7 +363,7 @@ func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
return allAddrs, nil
}
func ParseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
func parseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
discv5Nodes, _ = parseGenericAddrs(addrs)
if len(discv5Nodes) == 0 {
log.Warn("No bootstrap addresses supplied")
@@ -509,9 +483,9 @@ func multiAddrFromString(address string) (ma.Multiaddr, error) {
return ma.NewMultiaddr(address)
}
func udpVersionFromIP(ipAddr net.IP) int {
func udpVersionFromIP(ipAddr net.IP) string {
if ipAddr.To4() != nil {
return udp4
return "udp4"
}
return udp6
return "udp6"
}

View File

@@ -42,6 +42,10 @@ import (
var discoveryWaitTime = 1 * time.Second
func init() {
rand.Seed(time.Now().Unix())
}
func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
ip, err := prysmNetwork.ExternalIPv4()
require.NoError(t, err, "Could not get ip")
@@ -99,8 +103,8 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
for i := 1; i <= 5; i++ {
port = 3000 + i
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
Discv5BootStrapAddr: []string{bootNode.String()},
UDPPort: uint(port),
}
ipAddr, pkey := createAddrAndPrivKey(t)
s = &Service{
@@ -130,106 +134,6 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
}
}
func TestCreateLocalNode(t *testing.T) {
testCases := []struct {
name string
cfg *Config
expectedError bool
}{
{
name: "valid config",
cfg: nil,
expectedError: false,
},
{
name: "invalid host address",
cfg: &Config{HostAddress: "invalid"},
expectedError: true,
},
{
name: "valid host address",
cfg: &Config{HostAddress: "192.168.0.1"},
expectedError: false,
},
{
name: "invalid host DNS",
cfg: &Config{HostDNS: "invalid"},
expectedError: true,
},
{
name: "valid host DNS",
cfg: &Config{HostDNS: "www.google.com"},
expectedError: false,
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
// Define ports.
const (
udpPort = 2000
tcpPort = 3000
)
// Create a private key.
address, privKey := createAddrAndPrivKey(t)
// Create a service.
service := &Service{
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: tt.cfg,
}
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort)
if tt.expectedError {
require.NotNil(t, err)
return
}
require.NoError(t, err)
expectedAddress := address
if tt.cfg != nil && tt.cfg.HostAddress != "" {
expectedAddress = net.ParseIP(tt.cfg.HostAddress)
}
// Check IP.
// IP is not checked int case of DNS, since it can be resolved to different IPs.
if tt.cfg == nil || tt.cfg.HostDNS == "" {
ip := new(net.IP)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("ip", ip)))
require.Equal(t, true, ip.Equal(expectedAddress))
require.Equal(t, true, localNode.Node().IP().Equal(expectedAddress))
}
// Check UDP.
udp := new(uint16)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("udp", udp)))
require.Equal(t, udpPort, localNode.Node().UDP())
// Check TCP.
tcp := new(uint16)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("tcp", tcp)))
require.Equal(t, tcpPort, localNode.Node().TCP())
// Check fork is set.
fork := new([]byte)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2ENRKey, fork)))
require.NotEmpty(t, *fork)
// Check att subnets.
attSubnets := new([]byte)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(attSubnetEnrKey, attSubnets)))
require.DeepSSZEqual(t, []byte{0, 0, 0, 0, 0, 0, 0, 0}, *attSubnets)
// Check sync committees subnets.
syncSubnets := new([]byte)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
})
}
}
func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
addr := net.ParseIP("invalidIP")
_, pkey := createAddrAndPrivKey(t)
@@ -406,12 +310,12 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
}
func TestCorrectUDPVersion(t *testing.T) {
assert.Equal(t, udp4, udpVersionFromIP(net.IPv4zero), "incorrect network version")
assert.Equal(t, udp6, udpVersionFromIP(net.IPv6zero), "incorrect network version")
assert.Equal(t, udp4, udpVersionFromIP(net.IP{200, 20, 12, 255}), "incorrect network version")
assert.Equal(t, udp6, udpVersionFromIP(net.IP{22, 23, 24, 251, 17, 18, 0, 0, 0, 0, 12, 14, 212, 213, 16, 22}), "incorrect network version")
assert.Equal(t, "udp4", udpVersionFromIP(net.IPv4zero), "incorrect network version")
assert.Equal(t, "udp6", udpVersionFromIP(net.IPv6zero), "incorrect network version")
assert.Equal(t, "udp4", udpVersionFromIP(net.IP{200, 20, 12, 255}), "incorrect network version")
assert.Equal(t, "udp6", udpVersionFromIP(net.IP{22, 23, 24, 251, 17, 18, 0, 0, 0, 0, 12, 14, 212, 213, 16, 22}), "incorrect network version")
// v4 in v6
assert.Equal(t, udp4, udpVersionFromIP(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 212, 213, 16, 22}), "incorrect network version")
assert.Equal(t, "udp4", udpVersionFromIP(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 212, 213, 16, 22}), "incorrect network version")
}
// addPeer is a helper to add a peer with a given connection state)

View File

@@ -46,9 +46,9 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
bootNode := bootListener.Self()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
StateNotifier: &mock.MockStateNotifier{},
Discv5BootStrapAddr: []string{bootNode.String()},
UDPPort: uint(port),
StateNotifier: &mock.MockStateNotifier{},
}
var listeners []*discover.UDPv5
@@ -132,8 +132,8 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
bootNode := bootListener.Self()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
Discv5BootStrapAddr: []string{bootNode.String()},
UDPPort: uint(port),
}
var listeners []*discover.UDPv5

View File

@@ -20,7 +20,7 @@ self=%s
%d peers
%v
`,
s.cfg.Discv5BootStrapAddrs,
s.cfg.BootstrapNodeAddr,
s.selfAddresses(),
len(s.host.Network().Peers()),
formatPeers(s.host), // Must be last. Writes one entry per row.

View File

@@ -4,7 +4,6 @@ import (
"crypto/ecdsa"
"fmt"
"net"
"time"
"github.com/libp2p/go-libp2p"
mplex "github.com/libp2p/go-libp2p-mplex"
@@ -12,7 +11,6 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/security/noise"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
gomplex "github.com/libp2p/go-mplex"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/features"
@@ -33,31 +31,29 @@ func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
}
// buildOptions for the libp2p host.
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Option {
cfg := s.cfg
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip.String(), cfg.TCPPort)
log.WithError(err).Fatal("Failed to p2p listen")
}
if cfg.LocalIP != "" {
if net.ParseIP(cfg.LocalIP) == nil {
return nil, errors.Wrapf(err, "invalid local ip provided: %s:%d", cfg.LocalIP, cfg.TCPPort)
log.Fatalf("Invalid local ip provided: %s", cfg.LocalIP)
}
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", cfg.LocalIP, cfg.TCPPort)
log.WithError(err).Fatal("Failed to p2p listen")
}
}
ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(priKey)
if err != nil {
return nil, errors.Wrap(err, "cannot convert private key to interface private key. (Private key not displayed in logs for security reasons)")
log.WithError(err).Fatal("Failed to retrieve private key")
}
id, err := peer.IDFromPublicKey(ifaceKey.GetPublic())
if err != nil {
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
log.WithError(err).Fatal("Failed to retrieve peer id")
}
log.Infof("Running node with peer id of %s ", id.String())
options := []libp2p.Option{
@@ -68,10 +64,10 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
libp2p.Transport(tcp.NewTCPTransport),
libp2p.DefaultMuxers,
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
libp2p.Security(noise.ID, noise.New),
libp2p.Ping(false), // Disable Ping Service.
}
options = append(options, libp2p.Security(noise.ID, noise.New))
if cfg.EnableUPnP {
options = append(options, libp2p.NATPortMap()) // Allow to use UPnP
}
@@ -103,11 +99,12 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
return addrs
}))
}
// Disable Ping Service.
options = append(options, libp2p.Ping(false))
if features.Get().DisableResourceManager {
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
}
return options, nil
return options
}
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
@@ -137,8 +134,3 @@ func privKeyOption(privkey *ecdsa.PrivateKey) libp2p.Option {
return cfg.Apply(libp2p.Identity(ifaceKey))
}
}
// Configures stream timeouts on mplex.
func configureMplex() {
gomplex.ResetStreamTimeout = 5 * time.Second
}

View File

@@ -119,9 +119,7 @@ func TestDefaultMultiplexers(t *testing.T) {
svc.privKey, err = privKey(svc.cfg)
assert.NoError(t, err)
ipAddr := network.IPAddr()
opts, err := svc.buildOptions(ipAddr, svc.privKey)
assert.NoError(t, err)
opts := svc.buildOptions(ipAddr, svc.privKey)
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
assert.NoError(t, err)

View File

@@ -107,4 +107,5 @@ func TestStore_TrustedPeers(t *testing.T) {
assert.Equal(t, false, store.IsTrustedPeer(pid1))
assert.Equal(t, false, store.IsTrustedPeer(pid2))
assert.Equal(t, false, store.IsTrustedPeer(pid3))
}

View File

@@ -56,12 +56,12 @@ func newBadResponsesScorer(store *peerdata.Store, config *BadResponsesScorerConf
func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
s.store.RLock()
defer s.store.RUnlock()
return s.scoreNoLock(pid)
return s.score(pid)
}
// scoreNoLock is a lock-free version of Score.
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
if s.isBadPeerNoLock(pid) {
// score is a lock-free version of Score.
func (s *BadResponsesScorer) score(pid peer.ID) float64 {
if s.isBadPeer(pid) {
return BadPeerScore
}
score := float64(0)
@@ -87,11 +87,11 @@ func (s *BadResponsesScorer) Params() *BadResponsesScorerConfig {
func (s *BadResponsesScorer) Count(pid peer.ID) (int, error) {
s.store.RLock()
defer s.store.RUnlock()
return s.countNoLock(pid)
return s.count(pid)
}
// countNoLock is a lock-free version of Count.
func (s *BadResponsesScorer) countNoLock(pid peer.ID) (int, error) {
// count is a lock-free version of Count.
func (s *BadResponsesScorer) count(pid peer.ID) (int, error) {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.BadResponses, nil
}
@@ -119,11 +119,11 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
return s.isBadPeer(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
// isBadPeer is lock-free version of IsBadPeer.
func (s *BadResponsesScorer) isBadPeer(pid peer.ID) bool {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.BadResponses >= s.config.Threshold
}
@@ -137,7 +137,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) {
if s.isBadPeer(pid) {
badPeers = append(badPeers, pid)
}
}

View File

@@ -15,8 +15,6 @@ import (
)
func TestScorers_BadResponses_Score(t *testing.T) {
const pid = "peer1"
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -30,23 +28,15 @@ func TestScorers_BadResponses_Score(t *testing.T) {
})
scorer := peerStatuses.Scorers().BadResponsesScorer()
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
scorer.Increment(pid)
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.Equal(t, -2.5, scorer.Score(pid))
scorer.Increment(pid)
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-5), scorer.Score(pid))
scorer.Increment(pid)
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-7.5), scorer.Score(pid))
scorer.Increment(pid)
assert.Equal(t, true, scorer.IsBadPeer(pid))
assert.Equal(t, -100.0, scorer.Score(pid))
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score for unregistered peer")
scorer.Increment("peer1")
assert.Equal(t, -2.5, scorer.Score("peer1"))
scorer.Increment("peer1")
assert.Equal(t, float64(-5), scorer.Score("peer1"))
scorer.Increment("peer1")
scorer.Increment("peer1")
assert.Equal(t, -100.0, scorer.Score("peer1"))
assert.Equal(t, true, scorer.IsBadPeer("peer1"))
}
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {

View File

@@ -98,11 +98,11 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo
func (s *BlockProviderScorer) Score(pid peer.ID) float64 {
s.store.RLock()
defer s.store.RUnlock()
return s.scoreNoLock(pid)
return s.score(pid)
}
// scoreNoLock is a lock-free version of Score.
func (s *BlockProviderScorer) scoreNoLock(pid peer.ID) float64 {
// score is a lock-free version of Score.
func (s *BlockProviderScorer) score(pid peer.ID) float64 {
score := float64(0)
peerData, ok := s.store.PeerData(pid)
// Boost score of new peers or peers that haven't been accessed for too long.
@@ -126,7 +126,7 @@ func (s *BlockProviderScorer) Params() *BlockProviderScorerConfig {
func (s *BlockProviderScorer) IncrementProcessedBlocks(pid peer.ID, cnt uint64) {
s.store.Lock()
defer s.store.Unlock()
defer s.touchNoLock(pid)
defer s.touch(pid)
if cnt <= 0 {
return
@@ -145,11 +145,11 @@ func (s *BlockProviderScorer) IncrementProcessedBlocks(pid peer.ID, cnt uint64)
func (s *BlockProviderScorer) Touch(pid peer.ID, t ...time.Time) {
s.store.Lock()
defer s.store.Unlock()
s.touchNoLock(pid, t...)
s.touch(pid, t...)
}
// touchNoLock is a lock-free version of Touch.
func (s *BlockProviderScorer) touchNoLock(pid peer.ID, t ...time.Time) {
// touch is a lock-free version of Touch.
func (s *BlockProviderScorer) touch(pid peer.ID, t ...time.Time) {
peerData := s.store.PeerDataGetOrCreate(pid)
if len(t) == 1 {
peerData.BlockProviderUpdated = t[0]
@@ -162,11 +162,11 @@ func (s *BlockProviderScorer) touchNoLock(pid peer.ID, t ...time.Time) {
func (s *BlockProviderScorer) ProcessedBlocks(pid peer.ID) uint64 {
s.store.RLock()
defer s.store.RUnlock()
return s.processedBlocksNoLock(pid)
return s.processedBlocks(pid)
}
// processedBlocksNoLock is a lock-free version of ProcessedBlocks.
func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
// processedBlocks is a lock-free version of ProcessedBlocks.
func (s *BlockProviderScorer) processedBlocks(pid peer.ID) uint64 {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.ProcessedBlocks
}
@@ -177,13 +177,13 @@ func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
// out low-scorers (see WeightSorted method).
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
func (_ *BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
return false
}
// BadPeers returns the peers that are considered bad.
// No peers are considered bad by block providers scorer.
func (*BlockProviderScorer) BadPeers() []peer.ID {
func (_ *BlockProviderScorer) BadPeers() []peer.ID {
return []peer.ID{}
}
@@ -277,9 +277,9 @@ func (s *BlockProviderScorer) mapScoresAndPeers(
peers := make([]peer.ID, len(pids))
for i, pid := range pids {
if scoreFn != nil {
scores[pid] = scoreFn(pid, s.scoreNoLock(pid))
scores[pid] = scoreFn(pid, s.score(pid))
} else {
scores[pid] = s.scoreNoLock(pid)
scores[pid] = s.score(pid)
}
peers[i] = pid
}
@@ -293,9 +293,9 @@ func (s *BlockProviderScorer) FormatScorePretty(pid peer.ID) string {
if !features.Get().EnablePeerScorer {
return "disabled"
}
score := s.scoreNoLock(pid)
score := s.score(pid)
return fmt.Sprintf("[%0.1f%%, raw: %0.2f, blocks: %d/%d]",
(score/s.MaxScore())*100, score, s.processedBlocksNoLock(pid), s.config.ProcessedBlocksCap)
(score/s.MaxScore())*100, score, s.processedBlocks(pid), s.config.ProcessedBlocksCap)
}
// MaxScore exposes maximum score attainable by peers.

View File

@@ -38,11 +38,11 @@ func newGossipScorer(store *peerdata.Store, config *GossipScorerConfig) *GossipS
func (s *GossipScorer) Score(pid peer.ID) float64 {
s.store.RLock()
defer s.store.RUnlock()
return s.scoreNoLock(pid)
return s.score(pid)
}
// scoreNoLock is a lock-free version of Score.
func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
// score is a lock-free version of Score.
func (s *GossipScorer) score(pid peer.ID) float64 {
peerData, ok := s.store.PeerData(pid)
if !ok {
return 0
@@ -54,11 +54,11 @@ func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
return s.isBadPeer(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
// isBadPeer is lock-free version of IsBadPeer.
func (s *GossipScorer) isBadPeer(pid peer.ID) bool {
peerData, ok := s.store.PeerData(pid)
if !ok {
return false
@@ -73,7 +73,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) {
if s.isBadPeer(pid) {
badPeers = append(badPeers, pid)
}
}
@@ -98,11 +98,11 @@ func (s *GossipScorer) SetGossipData(pid peer.ID, gScore float64,
func (s *GossipScorer) GossipData(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
s.store.RLock()
defer s.store.RUnlock()
return s.gossipDataNoLock(pid)
return s.gossipData(pid)
}
// gossipDataNoLock lock-free version of GossipData.
func (s *GossipScorer) gossipDataNoLock(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
// gossipData lock-free version of GossipData.
func (s *GossipScorer) gossipData(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.GossipScore, peerData.BehaviourPenalty, peerData.TopicScores, nil
}

View File

@@ -41,12 +41,12 @@ func newPeerStatusScorer(store *peerdata.Store, config *PeerStatusScorerConfig)
func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
s.store.RLock()
defer s.store.RUnlock()
return s.scoreNoLock(pid)
return s.score(pid)
}
// scoreNoLock is a lock-free version of Score.
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
if s.isBadPeerNoLock(pid) {
// score is a lock-free version of Score.
func (s *PeerStatusScorer) score(pid peer.ID) float64 {
if s.isBadPeer(pid) {
return BadPeerScore
}
score := float64(0)
@@ -70,11 +70,11 @@ func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
return s.isBadPeer(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
// isBadPeer is lock-free version of IsBadPeer.
func (s *PeerStatusScorer) isBadPeer(pid peer.ID) bool {
peerData, ok := s.store.PeerData(pid)
if !ok {
return false
@@ -100,7 +100,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) {
if s.isBadPeer(pid) {
badPeers = append(badPeers, pid)
}
}
@@ -129,11 +129,11 @@ func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, val
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
s.store.RLock()
defer s.store.RUnlock()
return s.peerStatusNoLock(pid)
return s.peerStatus(pid)
}
// peerStatusNoLock lock-free version of PeerStatus.
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.Status, error) {
// peerStatus lock-free version of PeerStatus.
func (s *PeerStatusScorer) peerStatus(pid peer.ID) (*pb.Status, error) {
if peerData, ok := s.store.PeerData(pid); ok {
if peerData.ChainState == nil {
return nil, peerdata.ErrNoPeerStatus

View File

@@ -116,10 +116,10 @@ func (s *Service) ScoreNoLock(pid peer.ID) float64 {
if _, ok := s.store.PeerData(pid); !ok {
return 0
}
score += s.scorers.badResponsesScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.badResponsesScorer)
score += s.scorers.blockProviderScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.blockProviderScorer)
score += s.scorers.peerStatusScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.peerStatusScorer)
score += s.scorers.gossipScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.gossipScorer)
score += s.scorers.badResponsesScorer.score(pid) * s.scorerWeight(s.scorers.badResponsesScorer)
score += s.scorers.blockProviderScorer.score(pid) * s.scorerWeight(s.scorers.blockProviderScorer)
score += s.scorers.peerStatusScorer.score(pid) * s.scorerWeight(s.scorers.peerStatusScorer)
score += s.scorers.gossipScorer.score(pid) * s.scorerWeight(s.scorers.gossipScorer)
return math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor
}
@@ -132,14 +132,14 @@ func (s *Service) IsBadPeer(pid peer.ID) bool {
// IsBadPeerNoLock is a lock-free version of IsBadPeer.
func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
if s.scorers.badResponsesScorer.isBadPeerNoLock(pid) {
if s.scorers.badResponsesScorer.isBadPeer(pid) {
return true
}
if s.scorers.peerStatusScorer.isBadPeerNoLock(pid) {
if s.scorers.peerStatusScorer.isBadPeer(pid) {
return true
}
if features.Get().EnablePeerScorer {
if s.scorers.gossipScorer.isBadPeerNoLock(pid) {
if s.scorers.gossipScorer.isBadPeer(pid) {
return true
}
}

View File

@@ -82,50 +82,44 @@ type Service struct {
// NewService initializes a new p2p service compatible with shared.Service interface. No
// connections are made until the Start function is called during the service registry startup.
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
var err error
ctx, cancel := context.WithCancel(ctx)
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop().
cfg = validateConfig(cfg)
privKey, err := privKey(cfg)
if err != nil {
return nil, errors.Wrapf(err, "failed to generate p2p private key")
}
metaData, err := metaDataFromConfig(cfg)
if err != nil {
log.WithError(err).Error("Failed to create peer metadata")
return nil, err
}
addrFilter, err := configureFilter(cfg)
if err != nil {
log.WithError(err).Error("Failed to create address filter")
return nil, err
}
ipLimiter := leakybucket.NewCollector(ipLimit, ipBurst, 30*time.Second, true /* deleteEmptyBuckets */)
s := &Service{
ctx: ctx,
cancel: cancel,
cfg: cfg,
addrFilter: addrFilter,
ipLimiter: ipLimiter,
privKey: privKey,
metaData: metaData,
isPreGenesis: true,
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
subnetsLock: make(map[uint64]*sync.RWMutex),
}
ipAddr := prysmnetwork.IPAddr()
s.cfg = validateConfig(s.cfg)
opts, err := s.buildOptions(ipAddr, s.privKey)
dv5Nodes := parseBootStrapAddrs(s.cfg.BootstrapNodeAddr)
cfg.Discv5BootStrapAddr = dv5Nodes
ipAddr := prysmnetwork.IPAddr()
s.privKey, err = privKey(s.cfg)
if err != nil {
return nil, errors.Wrapf(err, "failed to build p2p options")
log.WithError(err).Error("Failed to generate p2p private key")
return nil, err
}
// Sets mplex timeouts
configureMplex()
s.metaData, err = metaDataFromConfig(s.cfg)
if err != nil {
log.WithError(err).Error("Failed to create peer metadata")
return nil, err
}
s.addrFilter, err = configureFilter(s.cfg)
if err != nil {
log.WithError(err).Error("Failed to create address filter")
return nil, err
}
s.ipLimiter = leakybucket.NewCollector(ipLimit, ipBurst, 30*time.Second, true /* deleteEmptyBuckets */)
opts := s.buildOptions(ipAddr, s.privKey)
h, err := libp2p.New(opts...)
if err != nil {
log.WithError(err).Error("Failed to create p2p host")
@@ -291,7 +285,7 @@ func (s *Service) Started() bool {
}
// Encoding returns the configured networking encoding.
func (*Service) Encoding() encoder.NetworkEncoding {
func (_ *Service) Encoding() encoder.NetworkEncoding {
return &encoder.SszNetworkEncoder{}
}
@@ -457,8 +451,8 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
}
func (s *Service) connectToBootnodes() error {
nodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
for _, addr := range s.cfg.Discv5BootStrapAddrs {
nodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddr))
for _, addr := range s.cfg.Discv5BootStrapAddr {
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
if err != nil {
return err

View File

@@ -213,9 +213,10 @@ func TestListenForNewNodes(t *testing.T) {
// setup other nodes.
cs := startup.NewClockSynchronizer()
cfg = &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
MaxPeers: 30,
ClockWaiter: cs,
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
MaxPeers: 30,
ClockWaiter: cs,
}
for i := 1; i <= 5; i++ {
h, pkey, ipAddr := createHost(t, port+i)

View File

@@ -46,13 +46,9 @@ const syncLockerVal = 100
const blobSubnetLockerVal = 110
// FindPeersWithSubnet performs a network search for peers
// subscribed to a particular subnet. Then it tries to connect
// with those peers. This method will block until either:
// - the required amount of peers are found, or
// - the context is terminated.
// On some edge cases, this method may hang indefinitely while peers
// are actually found. In such a case, the user should cancel the context
// and re-run the method again.
// subscribed to a particular subnet. Then we try to connect
// with those peers. This method will block until the required amount of
// peers are found, the method only exits in the event of context timeouts.
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
index uint64, threshold int) (bool, error) {
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
@@ -77,9 +73,9 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
return false, errors.New("no subnet exists for provided topic")
}
currNum := len(s.pubsub.ListPeers(topic))
wg := new(sync.WaitGroup)
for {
currNum := len(s.pubsub.ListPeers(topic))
if currNum >= threshold {
break
}
@@ -103,6 +99,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
}
// Wait for all dials to be completed.
wg.Wait()
currNum = len(s.pubsub.ListPeers(topic))
}
return true, nil
}
@@ -113,13 +110,18 @@ func (s *Service) filterPeerForAttSubnet(index uint64) func(node *enode.Node) bo
if !s.filterPeer(node) {
return false
}
subnets, err := attSubnets(node.Record())
if err != nil {
return false
}
return subnets[index]
indExists := false
for _, comIdx := range subnets {
if comIdx == index {
indExists = true
break
}
}
return indExists
}
}
@@ -203,10 +205,8 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
//
// return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)]
func computeSubscribedSubnets(nodeID enode.ID, epoch primitives.Epoch) ([]uint64, error) {
subnetsPerNode := params.BeaconConfig().SubnetsPerNode
subs := make([]uint64, 0, subnetsPerNode)
for i := uint64(0); i < subnetsPerNode; i++ {
subs := []uint64{}
for i := uint64(0); i < params.BeaconConfig().SubnetsPerNode; i++ {
sub, err := computeSubscribedSubnet(nodeID, epoch, i)
if err != nil {
return nil, err
@@ -281,20 +281,19 @@ func initializeSyncCommSubnets(node *enode.LocalNode) *enode.LocalNode {
// Reads the attestation subnets entry from a node's ENR and determines
// the committee indices of the attestation subnets the node is subscribed to.
func attSubnets(record *enr.Record) (map[uint64]bool, error) {
func attSubnets(record *enr.Record) ([]uint64, error) {
bitV, err := attBitvector(record)
if err != nil {
return nil, err
}
committeeIdxs := make(map[uint64]bool)
// lint:ignore uintcast -- subnet count can be safely cast to int.
if len(bitV) != byteCount(int(attestationSubnetCount)) {
return committeeIdxs, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
}
var committeeIdxs []uint64
for i := uint64(0); i < attestationSubnetCount; i++ {
if bitV.BitAt(i) {
committeeIdxs[i] = true
committeeIdxs = append(committeeIdxs, i)
}
}
return committeeIdxs, nil

View File

@@ -3,46 +3,49 @@ package p2p
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
// Topology of this test:
//
//
// Node 1 (subscribed to subnet 1) --\
// |
// Node 2 (subscribed to subnet 2) --+--> BootNode (not subscribed to any subnet) <------- Node 0 (not subscribed to any subnet)
// |
// Node 3 (subscribed to subnet 3) --/
//
// The purpose of this test is to ensure that the "Node 0" (connected only to the boot node) is able to
// find and connect to a node already subscribed to a specific subnet.
// In our case: The node i is subscribed to subnet i, with i = 1, 2, 3
// Define the genesis validators root, to ensure everybody is on the same network.
const genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
genesisValidatorsRoot, err := hex.DecodeString(genesisValidatorRootStr[2:])
func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
params.SetupTestConfigCleanup(t)
// This test needs to be entirely rewritten and should be done in a follow up PR from #7885.
t.Skip("This test is now failing after PR 7885 due to false positive")
gFlags := new(flags.GlobalFlags)
gFlags.MinimumPeersPerSubnet = 4
flags.Init(gFlags)
// Reset config.
defer flags.Init(new(flags.GlobalFlags))
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
s := &Service{
cfg: &Config{UDPPort: uint(port)},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
// Create a context.
ctx := context.Background()
bootNode := bootListener.Self()
// Use shorter period for testing.
currentPeriod := pollingPeriod
pollingPeriod = 1 * time.Second
@@ -50,150 +53,113 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
pollingPeriod = currentPeriod
}()
// Create flags.
params.SetupTestConfigCleanup(t)
gFlags := new(flags.GlobalFlags)
gFlags.MinimumPeersPerSubnet = 1
flags.Init(gFlags)
params.BeaconNetworkConfig().MinimumPeersInSubnetSearch = 1
// Reset config.
defer flags.Init(new(flags.GlobalFlags))
// First, generate a bootstrap node.
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
bootNodeService := &Service{
cfg: &Config{TCPPort: 2000, UDPPort: 3000},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
bootNodeForkDigest, err := bootNodeService.currentForkDigest()
require.NoError(t, err)
bootListener, err := bootNodeService.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
bootNodeENR := bootListener.Self().String()
// Create 3 nodes, each subscribed to a different subnet.
// Each node is connected to the boostrap node.
services := make([]*Service, 0, 3)
var listeners []*discover.UDPv5
for i := 1; i <= 3; i++ {
subnet := uint64(i)
service, err := NewService(ctx, &Config{
Discv5BootStrapAddrs: []string{bootNodeENR},
MaxPeers: 30,
TCPPort: uint(2000 + i),
UDPPort: uint(3000 + i),
})
require.NoError(t, err)
service.genesisTime = genesisTime
service.genesisValidatorsRoot = genesisValidatorsRoot
nodeForkDigest, err := service.currentForkDigest()
require.NoError(t, err)
require.Equal(t, true, nodeForkDigest == bootNodeForkDigest, "fork digest of the node doesn't match the boot node")
// Start the service.
service.Start()
// Set the ENR `attnets`, used by Prysm to filter peers by subnet.
port = 3000 + i
cfg := &Config{
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
MaxPeers: 30,
UDPPort: uint(port),
}
ipAddr, pkey := createAddrAndPrivKey(t)
s = &Service{
cfg: cfg,
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
listener, err := s.startDiscoveryV5(ipAddr, pkey)
assert.NoError(t, err, "Could not start discovery for node")
bitV := bitfield.NewBitvector64()
bitV.SetBitAt(subnet, true)
bitV.SetBitAt(uint64(i), true)
entry := enr.WithEntry(attSubnetEnrKey, &bitV)
service.dv5Listener.LocalNode().Set(entry)
// Join and subscribe to the subnet, needed by libp2p.
topic, err := service.pubsub.Join(fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet) + "/ssz_snappy")
require.NoError(t, err)
_, err = topic.Subscribe()
require.NoError(t, err)
// Memoize the service.
services = append(services, service)
listener.LocalNode().Set(entry)
listeners = append(listeners, listener)
}
// Stop the services.
defer func() {
for _, service := range services {
err := service.Stop()
require.NoError(t, err)
// Close down all peers.
for _, listener := range listeners {
listener.Close()
}
}()
// Make one service on port 4001.
port = 4001
gs := startup.NewClockSynchronizer()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNodeENR},
MaxPeers: 30,
TCPPort: 2010,
UDPPort: 3010,
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
MaxPeers: 30,
UDPPort: uint(port),
ClockWaiter: gs,
}
service, err := NewService(ctx, cfg)
s, err = NewService(context.Background(), cfg)
require.NoError(t, err)
service.genesisTime = genesisTime
service.genesisValidatorsRoot = genesisValidatorsRoot
service.Start()
defer func() {
err := service.Stop()
require.NoError(t, err)
exitRoutine := make(chan bool)
go func() {
s.Start()
<-exitRoutine
}()
time.Sleep(50 * time.Millisecond)
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
var vr [32]byte
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
// Look up 3 different subnets.
exists := make([]bool, 0, 3)
for i := 1; i <= 3; i++ {
subnet := uint64(i)
topic := fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet)
exist := false
// This for loop is used to ensure we don't get stuck in `FindPeersWithSubnet`.
// Read the documentation of `FindPeersWithSubnet` for more details.
for j := 0; j < 3; j++ {
ctxWithTimeOut, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
exist, err = service.FindPeersWithSubnet(ctxWithTimeOut, topic, subnet, 1)
require.NoError(t, err)
if exist {
break
}
}
require.NoError(t, err)
exists = append(exists, exist)
// Wait for the nodes to have their local routing tables to be populated with the other nodes
time.Sleep(6 * discoveryWaitTime)
// look up 3 different subnets
ctx := context.Background()
exists, err := s.FindPeersWithSubnet(ctx, "", 1, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
exists2, err := s.FindPeersWithSubnet(ctx, "", 2, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
exists3, err := s.FindPeersWithSubnet(ctx, "", 3, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
if !exists || !exists2 || !exists3 {
t.Fatal("Peer with subnet doesn't exist")
}
// Check if all peers are found.
for _, exist := range exists {
require.Equal(t, true, exist, "Peer with subnet doesn't exist")
// Update ENR of a peer.
testService := &Service{
dv5Listener: listeners[0],
metaData: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
Attnets: bitfield.NewBitvector64(),
}),
}
cache.SubnetIDs.AddAttesterSubnetID(0, 10)
testService.RefreshENR()
time.Sleep(2 * time.Second)
exists, err = s.FindPeersWithSubnet(ctx, "", 2, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
assert.Equal(t, true, exists, "Peer with subnet doesn't exist")
assert.NoError(t, s.Stop())
exitRoutine <- true
}
func Test_AttSubnets(t *testing.T) {
params.SetupTestConfigCleanup(t)
tests := []struct {
name string
record func(localNode *enode.LocalNode) *enr.Record
record func(t *testing.T) *enr.Record
want []uint64
wantErr bool
errContains string
}{
{
name: "valid record",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
localNode = initializeAttSubnets(localNode)
return localNode.Node().Record()
},
@@ -202,7 +168,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "too small subnet",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
entry := enr.WithEntry(attSubnetEnrKey, []byte{})
localNode.Set(entry)
return localNode.Node().Record()
@@ -213,7 +186,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "half sized subnet",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, 4))
localNode.Set(entry)
return localNode.Node().Record()
@@ -224,7 +204,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "too large subnet",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, byteCount(int(attestationSubnetCount))+1))
localNode.Set(entry)
return localNode.Node().Record()
@@ -235,7 +222,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "very large subnet",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, byteCount(int(attestationSubnetCount))+100))
localNode.Set(entry)
return localNode.Node().Record()
@@ -246,7 +240,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "single subnet",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
bitV := bitfield.NewBitvector64()
bitV.SetBitAt(0, true)
entry := enr.WithEntry(attSubnetEnrKey, bitV.Bytes())
@@ -258,10 +259,17 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "multiple subnets",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
bitV := bitfield.NewBitvector64()
for i := uint64(0); i < bitV.Len(); i++ {
// Keep only odd subnets.
// skip 2 subnets
if (i+1)%2 == 0 {
continue
}
@@ -279,7 +287,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "all subnets",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
bitV := bitfield.NewBitvector64()
for i := uint64(0); i < bitV.Len(); i++ {
bitV.SetBitAt(i, true)
@@ -296,35 +311,16 @@ func Test_AttSubnets(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record := tt.record(localNode)
got, err := attSubnets(record)
got, err := attSubnets(tt.record(t))
if (err != nil) != tt.wantErr {
t.Errorf("syncSubnets() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr {
assert.ErrorContains(t, tt.errContains, err)
}
want := make(map[uint64]bool, len(tt.want))
for _, subnet := range tt.want {
want[subnet] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("syncSubnets() got = %v, want %v", got, want)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("syncSubnets() got = %v, want %v", got, tt.want)
}
})
}

View File

@@ -54,45 +54,38 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
return privKeyFromFile(cfg.PrivateKey)
}
// Default keys have the next highest precedence, if they exist.
_, err := os.Stat(defaultKeyPath)
defaultKeysExist := !os.IsNotExist(err)
if err != nil && defaultKeysExist {
return nil, err
}
// Default keys have the next highest precedence, if they exist.
if defaultKeysExist {
return privKeyFromFile(defaultKeyPath)
}
// There are no keys on the filesystem, so we need to generate one.
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
if err != nil {
return nil, err
}
// If the StaticPeerID flag is not set, return the private key.
if !cfg.StaticPeerID {
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
// If the StaticPeerID flag is set, save the generated key as the default
// key, so that it will be used by default on the next node start.
if cfg.StaticPeerID {
rawbytes, err := priv.Raw()
if err != nil {
return nil, err
}
dst := make([]byte, hex.EncodedLen(len(rawbytes)))
hex.Encode(dst, rawbytes)
if err := file.WriteFile(defaultKeyPath, dst); err != nil {
return nil, err
}
log.Infof("Wrote network key to file")
// Read the key from the defaultKeyPath file just written
// for the strongest guarantee that the next start will be the same as this one.
return privKeyFromFile(defaultKeyPath)
}
// Save the generated key as the default key, so that it will be used by
// default on the next node start.
rawbytes, err := priv.Raw()
if err != nil {
return nil, err
}
dst := make([]byte, hex.EncodedLen(len(rawbytes)))
hex.Encode(dst, rawbytes)
if err := file.WriteFile(defaultKeyPath, dst); err != nil {
return nil, err
}
log.Info("Wrote network key to file")
// Read the key from the defaultKeyPath file just written
// for the strongest guarantee that the next start will be the same as this one.
return privKeyFromFile(defaultKeyPath)
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
}
// Retrieves a p2p networking private key from a file path.

View File

@@ -25,7 +25,7 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -8,7 +8,6 @@ import (
time2 "time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
@@ -24,6 +23,7 @@ import (
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -124,93 +124,86 @@ func (s *Server) StreamEvents(w http.ResponseWriter, r *http.Request) {
// stalling while waiting for the first response chunk.
// After that we send a keepalive dummy message every SECONDS_PER_SLOT
// to prevent anyone (e.g. proxy servers) from closing connections.
if err := sendKeepalive(w, flusher); err != nil {
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
return
}
sendKeepalive(w, flusher)
keepaliveTicker := time2.NewTicker(time2.Duration(params.BeaconConfig().SecondsPerSlot) * time2.Second)
for {
select {
case event := <-opsChan:
if err := handleBlockOperationEvents(w, flusher, topicsMap, event); err != nil {
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
return
}
handleBlockOperationEvents(w, flusher, topicsMap, event)
case event := <-stateChan:
if err := s.handleStateEvents(ctx, w, flusher, topicsMap, event); err != nil {
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
return
}
s.handleStateEvents(ctx, w, flusher, topicsMap, event)
case <-keepaliveTicker.C:
if err := sendKeepalive(w, flusher); err != nil {
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
return
}
sendKeepalive(w, flusher)
case <-ctx.Done():
return
}
}
}
func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) error {
func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) {
switch event.Type {
case operation.AggregatedAttReceived:
if _, ok := requestedTopics[AttestationTopic]; !ok {
return nil
return
}
attData, ok := event.Data.(*operation.AggregatedAttReceivedData)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
return
}
att := structs.AttFromConsensus(attData.Attestation.Aggregate)
return send(w, flusher, AttestationTopic, att)
send(w, flusher, AttestationTopic, att)
case operation.UnaggregatedAttReceived:
if _, ok := requestedTopics[AttestationTopic]; !ok {
return nil
return
}
attData, ok := event.Data.(*operation.UnAggregatedAttReceivedData)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
return
}
att := structs.AttFromConsensus(attData.Attestation)
return send(w, flusher, AttestationTopic, att)
send(w, flusher, AttestationTopic, att)
case operation.ExitReceived:
if _, ok := requestedTopics[VoluntaryExitTopic]; !ok {
return nil
return
}
exitData, ok := event.Data.(*operation.ExitReceivedData)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, VoluntaryExitTopic)
write(w, flusher, topicDataMismatch, event.Data, VoluntaryExitTopic)
return
}
exit := structs.SignedExitFromConsensus(exitData.Exit)
return send(w, flusher, VoluntaryExitTopic, exit)
send(w, flusher, VoluntaryExitTopic, exit)
case operation.SyncCommitteeContributionReceived:
if _, ok := requestedTopics[SyncCommitteeContributionTopic]; !ok {
return nil
return
}
contributionData, ok := event.Data.(*operation.SyncCommitteeContributionReceivedData)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, SyncCommitteeContributionTopic)
write(w, flusher, topicDataMismatch, event.Data, SyncCommitteeContributionTopic)
return
}
contribution := structs.SignedContributionAndProofFromConsensus(contributionData.Contribution)
return send(w, flusher, SyncCommitteeContributionTopic, contribution)
send(w, flusher, SyncCommitteeContributionTopic, contribution)
case operation.BLSToExecutionChangeReceived:
if _, ok := requestedTopics[BLSToExecutionChangeTopic]; !ok {
return nil
return
}
changeData, ok := event.Data.(*operation.BLSToExecutionChangeReceivedData)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, BLSToExecutionChangeTopic)
write(w, flusher, topicDataMismatch, event.Data, BLSToExecutionChangeTopic)
return
}
return send(w, flusher, BLSToExecutionChangeTopic, structs.SignedBLSChangeFromConsensus(changeData.Change))
send(w, flusher, BLSToExecutionChangeTopic, structs.SignedBLSChangeFromConsensus(changeData.Change))
case operation.BlobSidecarReceived:
if _, ok := requestedTopics[BlobSidecarTopic]; !ok {
return nil
return
}
blobData, ok := event.Data.(*operation.BlobSidecarReceivedData)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, BlobSidecarTopic)
write(w, flusher, topicDataMismatch, event.Data, BlobSidecarTopic)
return
}
versionedHash := blockchain.ConvertKzgCommitmentToVersionedHash(blobData.Blob.KzgCommitment)
blobEvent := &structs.BlobSidecarEvent{
@@ -220,36 +213,38 @@ func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, req
VersionedHash: versionedHash.String(),
KzgCommitment: hexutil.Encode(blobData.Blob.KzgCommitment),
}
return send(w, flusher, BlobSidecarTopic, blobEvent)
send(w, flusher, BlobSidecarTopic, blobEvent)
case operation.AttesterSlashingReceived:
if _, ok := requestedTopics[AttesterSlashingTopic]; !ok {
return nil
return
}
attesterSlashingData, ok := event.Data.(*operation.AttesterSlashingReceivedData)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, AttesterSlashingTopic)
write(w, flusher, topicDataMismatch, event.Data, AttesterSlashingTopic)
return
}
return send(w, flusher, AttesterSlashingTopic, structs.AttesterSlashingFromConsensus(attesterSlashingData.AttesterSlashing))
send(w, flusher, AttesterSlashingTopic, structs.AttesterSlashingFromConsensus(attesterSlashingData.AttesterSlashing))
case operation.ProposerSlashingReceived:
if _, ok := requestedTopics[ProposerSlashingTopic]; !ok {
return nil
return
}
proposerSlashingData, ok := event.Data.(*operation.ProposerSlashingReceivedData)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, ProposerSlashingTopic)
write(w, flusher, topicDataMismatch, event.Data, ProposerSlashingTopic)
return
}
return send(w, flusher, ProposerSlashingTopic, structs.ProposerSlashingFromConsensus(proposerSlashingData.ProposerSlashing))
send(w, flusher, ProposerSlashingTopic, structs.ProposerSlashingFromConsensus(proposerSlashingData.ProposerSlashing))
}
return nil
}
func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) error {
func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) {
switch event.Type {
case statefeed.NewHead:
if _, ok := requestedTopics[HeadTopic]; ok {
headData, ok := event.Data.(*ethpb.EventHead)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, HeadTopic)
write(w, flusher, topicDataMismatch, event.Data, HeadTopic)
return
}
head := &structs.HeadEvent{
Slot: fmt.Sprintf("%d", headData.Slot),
@@ -260,22 +255,23 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
PreviousDutyDependentRoot: hexutil.Encode(headData.PreviousDutyDependentRoot),
CurrentDutyDependentRoot: hexutil.Encode(headData.CurrentDutyDependentRoot),
}
return send(w, flusher, HeadTopic, head)
send(w, flusher, HeadTopic, head)
}
if _, ok := requestedTopics[PayloadAttributesTopic]; ok {
return s.sendPayloadAttributes(ctx, w, flusher)
s.sendPayloadAttributes(ctx, w, flusher)
}
case statefeed.MissedSlot:
if _, ok := requestedTopics[PayloadAttributesTopic]; ok {
return s.sendPayloadAttributes(ctx, w, flusher)
s.sendPayloadAttributes(ctx, w, flusher)
}
case statefeed.FinalizedCheckpoint:
if _, ok := requestedTopics[FinalizedCheckpointTopic]; !ok {
return nil
return
}
checkpointData, ok := event.Data.(*ethpb.EventFinalizedCheckpoint)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, FinalizedCheckpointTopic)
write(w, flusher, topicDataMismatch, event.Data, FinalizedCheckpointTopic)
return
}
checkpoint := &structs.FinalizedCheckpointEvent{
Block: hexutil.Encode(checkpointData.Block),
@@ -283,14 +279,15 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
Epoch: fmt.Sprintf("%d", checkpointData.Epoch),
ExecutionOptimistic: checkpointData.ExecutionOptimistic,
}
return send(w, flusher, FinalizedCheckpointTopic, checkpoint)
send(w, flusher, FinalizedCheckpointTopic, checkpoint)
case statefeed.LightClientFinalityUpdate:
if _, ok := requestedTopics[LightClientFinalityUpdateTopic]; !ok {
return nil
return
}
updateData, ok := event.Data.(*ethpbv2.LightClientFinalityUpdateWithVersion)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, LightClientFinalityUpdateTopic)
write(w, flusher, topicDataMismatch, event.Data, LightClientFinalityUpdateTopic)
return
}
var finalityBranch []string
@@ -321,14 +318,15 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
SignatureSlot: fmt.Sprintf("%d", updateData.Data.SignatureSlot),
},
}
return send(w, flusher, LightClientFinalityUpdateTopic, update)
send(w, flusher, LightClientFinalityUpdateTopic, update)
case statefeed.LightClientOptimisticUpdate:
if _, ok := requestedTopics[LightClientOptimisticUpdateTopic]; !ok {
return nil
return
}
updateData, ok := event.Data.(*ethpbv2.LightClientOptimisticUpdateWithVersion)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, LightClientOptimisticUpdateTopic)
write(w, flusher, topicDataMismatch, event.Data, LightClientOptimisticUpdateTopic)
return
}
update := &structs.LightClientOptimisticUpdateEvent{
Version: version.String(int(updateData.Version)),
@@ -347,14 +345,15 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
SignatureSlot: fmt.Sprintf("%d", updateData.Data.SignatureSlot),
},
}
return send(w, flusher, LightClientOptimisticUpdateTopic, update)
send(w, flusher, LightClientOptimisticUpdateTopic, update)
case statefeed.Reorg:
if _, ok := requestedTopics[ChainReorgTopic]; !ok {
return nil
return
}
reorgData, ok := event.Data.(*ethpb.EventChainReorg)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, ChainReorgTopic)
write(w, flusher, topicDataMismatch, event.Data, ChainReorgTopic)
return
}
reorg := &structs.ChainReorgEvent{
Slot: fmt.Sprintf("%d", reorgData.Slot),
@@ -366,69 +365,78 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
Epoch: fmt.Sprintf("%d", reorgData.Epoch),
ExecutionOptimistic: reorgData.ExecutionOptimistic,
}
return send(w, flusher, ChainReorgTopic, reorg)
send(w, flusher, ChainReorgTopic, reorg)
case statefeed.BlockProcessed:
if _, ok := requestedTopics[BlockTopic]; !ok {
return nil
return
}
blkData, ok := event.Data.(*statefeed.BlockProcessedData)
if !ok {
return write(w, flusher, topicDataMismatch, event.Data, BlockTopic)
write(w, flusher, topicDataMismatch, event.Data, BlockTopic)
return
}
blockRoot, err := blkData.SignedBlock.Block().HashTreeRoot()
if err != nil {
return write(w, flusher, "Could not get block root: "+err.Error())
write(w, flusher, "Could not get block root: "+err.Error())
return
}
blk := &structs.BlockEvent{
Slot: fmt.Sprintf("%d", blkData.Slot),
Block: hexutil.Encode(blockRoot[:]),
ExecutionOptimistic: blkData.Optimistic,
}
return send(w, flusher, BlockTopic, blk)
send(w, flusher, BlockTopic, blk)
}
return nil
}
// This event stream is intended to be used by builders and relays.
// Parent fields are based on state at N_{current_slot}, while the rest of fields are based on state of N_{current_slot + 1}
func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWriter, flusher http.Flusher) error {
func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWriter, flusher http.Flusher) {
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
return write(w, flusher, "Could not get head root: "+err.Error())
write(w, flusher, "Could not get head root: "+err.Error())
return
}
st, err := s.HeadFetcher.HeadState(ctx)
if err != nil {
return write(w, flusher, "Could not get head state: "+err.Error())
write(w, flusher, "Could not get head state: "+err.Error())
return
}
// advance the head state
headState, err := transition.ProcessSlotsIfPossible(ctx, st, s.ChainInfoFetcher.CurrentSlot()+1)
if err != nil {
return write(w, flusher, "Could not advance head state: "+err.Error())
write(w, flusher, "Could not advance head state: "+err.Error())
return
}
headBlock, err := s.HeadFetcher.HeadBlock(ctx)
if err != nil {
return write(w, flusher, "Could not get head block: "+err.Error())
write(w, flusher, "Could not get head block: "+err.Error())
return
}
headPayload, err := headBlock.Block().Body().Execution()
if err != nil {
return write(w, flusher, "Could not get execution payload: "+err.Error())
write(w, flusher, "Could not get execution payload: "+err.Error())
return
}
t, err := slots.ToTime(headState.GenesisTime(), headState.Slot())
if err != nil {
return write(w, flusher, "Could not get head state slot time: "+err.Error())
write(w, flusher, "Could not get head state slot time: "+err.Error())
return
}
prevRando, err := helpers.RandaoMix(headState, time.CurrentEpoch(headState))
if err != nil {
return write(w, flusher, "Could not get head state randao mix: "+err.Error())
write(w, flusher, "Could not get head state randao mix: "+err.Error())
return
}
proposerIndex, err := helpers.BeaconProposerIndex(ctx, headState)
if err != nil {
return write(w, flusher, "Could not get head state proposer index: "+err.Error())
write(w, flusher, "Could not get head state proposer index: "+err.Error())
return
}
var attributes interface{}
@@ -442,7 +450,8 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
case version.Capella:
withdrawals, err := headState.ExpectedWithdrawals()
if err != nil {
return write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
return
}
attributes = &structs.PayloadAttributesV2{
Timestamp: fmt.Sprintf("%d", t.Unix()),
@@ -453,11 +462,13 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
case version.Deneb:
withdrawals, err := headState.ExpectedWithdrawals()
if err != nil {
return write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
return
}
parentRoot, err := headBlock.Block().HashTreeRoot()
if err != nil {
return write(w, flusher, "Could not get head block root: "+err.Error())
write(w, flusher, "Could not get head block root: "+err.Error())
return
}
attributes = &structs.PayloadAttributesV3{
Timestamp: fmt.Sprintf("%d", t.Unix()),
@@ -467,12 +478,14 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
ParentBeaconBlockRoot: hexutil.Encode(parentRoot[:]),
}
default:
return write(w, flusher, "Payload version %s is not supported", version.String(headState.Version()))
write(w, flusher, "Payload version %s is not supported", version.String(headState.Version()))
return
}
attributesBytes, err := json.Marshal(attributes)
if err != nil {
return write(w, flusher, err.Error())
write(w, flusher, err.Error())
return
}
eventData := structs.PayloadAttributesEventData{
ProposerIndex: fmt.Sprintf("%d", proposerIndex),
@@ -484,31 +497,32 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
}
eventDataBytes, err := json.Marshal(eventData)
if err != nil {
return write(w, flusher, err.Error())
write(w, flusher, err.Error())
return
}
return send(w, flusher, PayloadAttributesTopic, &structs.PayloadAttributesEvent{
send(w, flusher, PayloadAttributesTopic, &structs.PayloadAttributesEvent{
Version: version.String(headState.Version()),
Data: eventDataBytes,
})
}
func send(w http.ResponseWriter, flusher http.Flusher, name string, data interface{}) error {
func send(w http.ResponseWriter, flusher http.Flusher, name string, data interface{}) {
j, err := json.Marshal(data)
if err != nil {
return write(w, flusher, "Could not marshal event to JSON: "+err.Error())
write(w, flusher, "Could not marshal event to JSON: "+err.Error())
return
}
return write(w, flusher, "event: %s\ndata: %s\n\n", name, string(j))
write(w, flusher, "event: %s\ndata: %s\n\n", name, string(j))
}
func sendKeepalive(w http.ResponseWriter, flusher http.Flusher) error {
return write(w, flusher, ":\n\n")
func sendKeepalive(w http.ResponseWriter, flusher http.Flusher) {
write(w, flusher, ":\n\n")
}
func write(w http.ResponseWriter, flusher http.Flusher, format string, a ...any) error {
func write(w http.ResponseWriter, flusher http.Flusher, format string, a ...any) {
_, err := fmt.Fprintf(w, format, a...)
if err != nil {
return errors.Wrap(err, "could not write to response writer")
log.WithError(err).Error("Could not write to response writer")
}
flusher.Flush()
return nil
}

View File

@@ -18,10 +18,8 @@ go_library(
"@com_github_golang_protobuf//ptypes/timestamp",
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
],
@@ -47,9 +45,7 @@ go_test(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//reflection:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",

View File

@@ -6,9 +6,7 @@ package node
import (
"context"
"fmt"
"net/http"
"sort"
"strconv"
"time"
"github.com/golang/protobuf/ptypes/empty"
@@ -23,10 +21,8 @@ import (
"github.com/prysmaticlabs/prysm/v5/io/logs"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"go.opencensus.io/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
)
@@ -49,35 +45,6 @@ type Server struct {
BeaconMonitoringPort int
}
// GetHealth checks the health of the node
func (ns *Server) GetHealth(ctx context.Context, request *ethpb.HealthRequest) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "node.GetHealth")
defer span.End()
// Set a timeout for the health check operation
timeoutDuration := 10 * time.Second
ctx, cancel := context.WithTimeout(ctx, timeoutDuration)
defer cancel() // Important to avoid a context leak
if ns.SyncChecker.Synced() {
return &empty.Empty{}, nil
}
if ns.SyncChecker.Syncing() || ns.SyncChecker.Initialized() {
if request.SyncingStatus != 0 {
// override the 200 success with the provided request status
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(request.SyncingStatus, 10))); err != nil {
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
}
return &empty.Empty{}, nil
}
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
}
return &empty.Empty{}, nil
}
return &empty.Empty{}, status.Errorf(codes.Unavailable, "service unavailable")
}
// GetSyncStatus checks the current network sync status of the node.
func (ns *Server) GetSyncStatus(_ context.Context, _ *empty.Empty) (*ethpb.SyncStatus, error) {
return &ethpb.SyncStatus{

View File

@@ -3,14 +3,12 @@ package node
import (
"context"
"errors"
"fmt"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
@@ -24,7 +22,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/reflection"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
@@ -173,53 +170,3 @@ func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) {
assert.Equal(t, ep, res.CurrentAddress)
assert.Equal(t, errStr, res.CurrentConnectionError)
}
func TestNodeServer_GetHealth(t *testing.T) {
tests := []struct {
name string
input *mockSync.Sync
customStatus uint64
wantedErr string
}{
{
name: "happy path",
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
},
{
name: "syncing",
input: &mockSync.Sync{IsSyncing: false},
wantedErr: "service unavailable",
},
{
name: "custom sync status",
input: &mockSync.Sync{IsSyncing: true},
customStatus: 206,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
server := grpc.NewServer()
ns := &Server{
SyncChecker: tt.input,
}
ethpb.RegisterNodeServer(server, ns)
reflection.Register(server)
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
_, err := ns.GetHealth(ctx, &ethpb.HealthRequest{SyncingStatus: tt.customStatus})
if tt.wantedErr == "" {
require.NoError(t, err)
return
}
if tt.customStatus != 0 {
// Assuming the call was successful, now extract the headers
headers, _ := metadata.FromIncomingContext(ctx)
// Check for the specific header
values, ok := headers["x-http-code"]
require.Equal(t, true, ok && len(values) > 0)
require.Equal(t, fmt.Sprintf("%d", tt.customStatus), values[0])
}
require.ErrorContains(t, tt.wantedErr, err)
})
}
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
@@ -40,12 +39,6 @@ var (
})
)
func setFeeRecipientIfBurnAddress(val *cache.TrackedValidator) {
if val.FeeRecipient == primitives.ExecutionAddress([20]byte{}) && val.Index == 0 {
val.FeeRecipient = primitives.ExecutionAddress(params.BeaconConfig().DefaultFeeRecipient)
}
}
// This returns the local execution payload of a given slot. The function has full awareness of pre and post merge.
func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) (interfaces.ExecutionData, bool, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.getLocalPayload")
@@ -69,7 +62,6 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
if !tracked {
logrus.WithFields(logFields).Warn("could not find tracked proposer index")
}
setFeeRecipientIfBurnAddress(&val)
var err error
if ok && payloadId != [8]byte{} {

View File

@@ -383,16 +383,3 @@ func TestServer_getTerminalBlockHashIfExists(t *testing.T) {
})
}
}
func TestSetFeeRecipientIfBurnAddress(t *testing.T) {
val := &cache.TrackedValidator{Index: 1}
cfg := params.BeaconConfig().Copy()
cfg.DefaultFeeRecipient = common.Address([20]byte{'a'})
params.OverrideBeaconConfig(cfg)
require.NotEqual(t, common.Address(val.FeeRecipient), params.BeaconConfig().DefaultFeeRecipient)
setFeeRecipientIfBurnAddress(val)
require.NotEqual(t, common.Address(val.FeeRecipient), params.BeaconConfig().DefaultFeeRecipient)
val.Index = 0
setFeeRecipientIfBurnAddress(val)
require.Equal(t, common.Address(val.FeeRecipient), params.BeaconConfig().DefaultFeeRecipient)
}

View File

@@ -2697,13 +2697,10 @@ func TestProposer_PrepareBeaconProposer(t *testing.T) {
BeaconDB: db,
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
}
require.Equal(t, false, proposerServer.TrackedValidatorsCache.Validating())
_, err := proposerServer.PrepareBeaconProposer(ctx, tt.args.request)
if tt.wantErr != "" {
require.ErrorContains(t, tt.wantErr, err)
return
} else {
require.Equal(t, true, proposerServer.TrackedValidatorsCache.Validating())
}
require.NoError(t, err)
val, tracked := proposerServer.TrackedValidatorsCache.Validator(1)

View File

@@ -108,7 +108,6 @@ go_library(
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/equality:go_default_library",
"//io/file:go_default_library",
"//monitoring/tracing:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -79,10 +79,6 @@ func (c *batchSequencer) update(b batch) {
// so we want to copy c to a, then on i=3, d to b, then on i=4 e to c.
c.seq[i-done] = c.seq[i]
}
if done == 1 && len(c.seq) == 1 {
c.seq[0] = c.batcher.beforeBatch(c.seq[0])
return
}
// Overwrite the moved batches with the next ones in the sequence.
// Continuing the example in the comment above, len(c.seq)==5, done=2, so i=3.
// We want to replace index 3 with the batch that should be processed after index 2,

View File

@@ -64,35 +64,6 @@ func TestBatcherBefore(t *testing.T) {
}
}
func TestBatchSingleItem(t *testing.T) {
var min, max, size primitives.Slot
// seqLen = 1 means just one worker
seqLen := 1
min = 0
max = 11235
size = 64
seq := newBatchSequencer(seqLen, min, max, size)
got, err := seq.sequence()
require.NoError(t, err)
require.Equal(t, 1, len(got))
b := got[0]
// calling sequence again should give you the next (earlier) batch
seq.update(b.withState(batchImportComplete))
next, err := seq.sequence()
require.NoError(t, err)
require.Equal(t, 1, len(next))
require.Equal(t, b.end, next[0].end+size)
// should get the same batch again when update is called with an error
seq.update(next[0].withState(batchErrRetryable))
same, err := seq.sequence()
require.NoError(t, err)
require.Equal(t, 1, len(same))
require.Equal(t, next[0].begin, same[0].begin)
require.Equal(t, next[0].end, same[0].end)
}
func TestBatchSequencer(t *testing.T) {
var min, max, size primitives.Slot
seqLen := 8

View File

@@ -118,24 +118,6 @@ func WithVerifierWaiter(viw InitializerWaiter) ServiceOption {
}
}
// WithMinimumSlot allows the user to specify a different backfill minimum slot than the spec default of current - MIN_EPOCHS_FOR_BLOCK_REQUESTS.
// If this value is greater than current - MIN_EPOCHS_FOR_BLOCK_REQUESTS, it will be ignored with a warning log.
func WithMinimumSlot(s primitives.Slot) ServiceOption {
ms := func(current primitives.Slot) primitives.Slot {
specMin := minimumBackfillSlot(current)
if s < specMin {
return s
}
log.WithField("userSlot", s).WithField("specMinSlot", specMin).
Warn("Ignoring user-specified slot > MIN_EPOCHS_FOR_BLOCK_REQUESTS.")
return specMin
}
return func(s *Service) error {
s.ms = ms
return nil
}
}
// NewService initializes the backfill Service. Like all implementations of the Service interface,
// the service won't begin its runloop until Start() is called.
func NewService(ctx context.Context, su *Store, bStore *filesystem.BlobStorage, cw startup.ClockWaiter, p p2p.P2P, pa PeerAssigner, opts ...ServiceOption) (*Service, error) {
@@ -307,7 +289,7 @@ func (s *Service) Start() {
if ctx.Err() != nil {
return
}
if s.updateComplete() {
if allComplete := s.updateComplete(); allComplete {
return
}
s.importBatches(ctx)
@@ -334,11 +316,11 @@ func (s *Service) downscore(b batch) {
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(b.blockPid)
}
func (*Service) Stop() error {
func (s *Service) Stop() error {
return nil
}
func (*Service) Status() error {
func (s *Service) Status() error {
return nil
}

View File

@@ -29,7 +29,7 @@ func (m mockMinimumSlotter) minimumSlot(_ primitives.Slot) primitives.Slot {
type mockInitalizerWaiter struct {
}
func (*mockInitalizerWaiter) WaitForInitializer(_ context.Context) (*verification.Initializer, error) {
func (mi *mockInitalizerWaiter) WaitForInitializer(ctx context.Context) (*verification.Initializer, error) {
return &verification.Initializer{}, nil
}
@@ -67,7 +67,7 @@ func TestServiceInit(t *testing.T) {
}
go srv.Start()
todo := make([]batch, 0)
todo = testReadN(ctx, t, pool.todoChan, nWorkers, todo)
todo = testReadN(t, ctx, pool.todoChan, nWorkers, todo)
require.Equal(t, nWorkers, len(todo))
for i := 0; i < remaining; i++ {
b := todo[i]
@@ -75,7 +75,7 @@ func TestServiceInit(t *testing.T) {
b.state = batchImportable
}
pool.finishedChan <- b
todo = testReadN(ctx, t, pool.todoChan, 1, todo)
todo = testReadN(t, ctx, pool.todoChan, 1, todo)
}
require.Equal(t, remaining+nWorkers, len(todo))
for i := remaining; i < remaining+nWorkers; i++ {
@@ -90,12 +90,14 @@ func TestMinimumBackfillSlot(t *testing.T) {
minSlot := minimumBackfillSlot(primitives.Slot(currSlot))
require.Equal(t, 100*params.BeaconConfig().SlotsPerEpoch, minSlot)
oe = helpers.MinEpochsForBlockRequests()
currSlot = oe.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
minSlot = minimumBackfillSlot(primitives.Slot(currSlot))
require.Equal(t, primitives.Slot(1), minSlot)
}
func testReadN(ctx context.Context, t *testing.T, c chan batch, n int, into []batch) []batch {
func testReadN(t *testing.T, ctx context.Context, c chan batch, n int, into []batch) []batch {
for i := 0; i < n; i++ {
select {
case b := <-c:
@@ -107,28 +109,3 @@ func testReadN(ctx context.Context, t *testing.T, c chan batch, n int, into []ba
}
return into
}
func TestBackfillMinSlotDefault(t *testing.T) {
oe := helpers.MinEpochsForBlockRequests()
current := primitives.Slot((oe + 100).Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
s := &Service{}
specMin := minimumBackfillSlot(current)
t.Run("equal to specMin", func(t *testing.T) {
opt := WithMinimumSlot(specMin)
require.NoError(t, opt(s))
require.Equal(t, specMin, s.ms(current))
})
t.Run("older than specMin", func(t *testing.T) {
opt := WithMinimumSlot(specMin - 1)
require.NoError(t, opt(s))
// if WithMinimumSlot is older than the spec minimum, we should use it.
require.Equal(t, specMin-1, s.ms(current))
})
t.Run("newer than specMin", func(t *testing.T) {
opt := WithMinimumSlot(specMin + 1)
require.NoError(t, opt(s))
// if WithMinimumSlot is newer than the spec minimum, we should use the spec minimum
require.Equal(t, specMin, s.ms(current))
})
}

View File

@@ -45,7 +45,7 @@ func (w *p2pWorker) run(ctx context.Context) {
func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
cs := w.c.CurrentSlot()
blobRetentionStart, err := sync.BlobRPCMinValidSlot(cs)
blobRetentionStart, err := sync.BlobsByRangeMinStartSlot(cs)
if err != nil {
return b.withRetryableError(errors.Wrap(err, "configuration issue, could not compute minimum blob retention slot"))
}

View File

@@ -327,3 +327,40 @@ func TestTestcaseSetup_BlocksAndBlobs(t *testing.T) {
require.Equal(t, true, found != nil)
}
}
func TestRoundTripDenebSave(t *testing.T) {
ctx := context.Background()
cfg := params.BeaconConfig()
repositionFutureEpochs(cfg)
undo, err := params.SetActiveWithUndo(cfg)
require.NoError(t, err)
defer func() {
require.NoError(t, undo())
}()
parentRoot := [32]byte{}
c := blobsTestCase{}
chain, clock := defaultMockChain(t)
c.chain = chain
c.clock = clock
oldest, err := slots.EpochStart(blobMinReqEpoch(c.chain.FinalizedCheckPoint.Epoch, slots.ToEpoch(c.clock.CurrentSlot())))
require.NoError(t, err)
maxBlobs := fieldparams.MaxBlobsPerBlock
block, bsc := generateTestBlockWithSidecars(t, parentRoot, oldest, maxBlobs)
require.Equal(t, len(block.Block.Body.BlobKzgCommitments), len(bsc))
require.Equal(t, maxBlobs, len(bsc))
for i := range bsc {
require.DeepEqual(t, block.Block.Body.BlobKzgCommitments[i], bsc[i].KzgCommitment)
}
d := db.SetupDB(t)
util.SaveBlock(t, ctx, d, block)
root, err := block.Block.HashTreeRoot()
require.NoError(t, err)
dbBlock, err := d.Block(ctx, root)
require.NoError(t, err)
comms, err := dbBlock.Block().Body().BlobKzgCommitments()
require.NoError(t, err)
require.Equal(t, maxBlobs, len(comms))
for i := range bsc {
require.DeepEqual(t, comms[i], bsc[i].KzgCommitment)
}
}

View File

@@ -478,7 +478,7 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.Bl
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
return bwb, nil
}
blobWindowStart, err := prysmsync.BlobRPCMinValidSlot(f.clock.CurrentSlot())
blobWindowStart, err := prysmsync.BlobsByRangeMinStartSlot(f.clock.CurrentSlot())
if err != nil {
return nil, err
}

View File

@@ -144,12 +144,6 @@ var (
Help: "Time for gossiped blob sidecars to arrive",
},
)
blobSidecarVerificationGossipSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "gossip_blob_sidecar_verification_milliseconds",
Help: "Time to verify gossiped blob sidecars",
},
)
// Sync committee verification performance.
syncMessagesForUnknownBlocks = promauto.NewCounter(

View File

@@ -123,10 +123,10 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
return nil
}
// BlobRPCMinValidSlot returns the lowest slot that we should expect peers to respect as the
// BlobsByRangeMinStartSlot returns the lowest slot that we should expect peers to respect as the
// start slot in a BlobSidecarsByRange request. This can be used to validate incoming requests and
// to avoid pestering peers with requests for blobs that are outside the retention window.
func BlobRPCMinValidSlot(current primitives.Slot) (primitives.Slot, error) {
func BlobsByRangeMinStartSlot(current primitives.Slot) (primitives.Slot, error) {
// Avoid overflow if we're running on a config where deneb is set to far future epoch.
if params.BeaconConfig().DenebForkEpoch == math.MaxUint64 {
return primitives.Slot(math.MaxUint64), nil
@@ -176,9 +176,9 @@ func validateBlobsByRange(r *pb.BlobSidecarsByRangeRequest, current primitives.S
// [max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]
// where current_epoch is defined by the current wall-clock time,
// and clients MUST support serving requests of blobs on this range.
minStartSlot, err := BlobRPCMinValidSlot(current)
minStartSlot, err := BlobsByRangeMinStartSlot(current)
if err != nil {
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "BlobRPCMinValidSlot error")
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "BlobsByRangeMinStartSlot error")
}
if rp.start > maxStart {
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "start > maxStart")

View File

@@ -178,7 +178,7 @@ func TestBlobsByRangeValidation(t *testing.T) {
and clients MUST support serving requests of blobs on this range.
*/
defaultCurrent := denebSlot + 100 + minReqSlots
defaultMinStart, err := BlobRPCMinValidSlot(defaultCurrent)
defaultMinStart, err := BlobsByRangeMinStartSlot(defaultCurrent)
require.NoError(t, err)
cases := []struct {
name string
@@ -285,67 +285,3 @@ func TestBlobsByRangeValidation(t *testing.T) {
})
}
}
func TestBlobRPCMinValidSlot(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
current func(t *testing.T) types.Slot
expected types.Slot
err error
}{
{
name: "before deneb",
current: func(t *testing.T) types.Slot {
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch - 1)
// note: we no longer need to deal with deneb fork epoch being far future
require.NoError(t, err)
return st
},
expected: denebSlot,
},
{
name: "equal to deneb",
current: func(t *testing.T) types.Slot {
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
// note: we no longer need to deal with deneb fork epoch being far future
require.NoError(t, err)
return st
},
expected: denebSlot,
},
{
name: "after deneb, before expiry starts",
current: func(t *testing.T) types.Slot {
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch + params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
// note: we no longer need to deal with deneb fork epoch being far future
require.NoError(t, err)
return st
},
expected: denebSlot,
},
{
name: "expiry starts one epoch after deneb + MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS",
current: func(t *testing.T) types.Slot {
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch + params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1)
// note: we no longer need to deal with deneb fork epoch being far future
require.NoError(t, err)
return st
},
expected: denebSlot + params.BeaconConfig().SlotsPerEpoch,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
current := c.current(t)
got, err := BlobRPCMinValidSlot(current)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.expected, got)
})
}
}

View File

@@ -13,12 +13,30 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
func blobMinReqEpoch(finalized, current primitives.Epoch) primitives.Epoch {
// max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)
denebFork := params.BeaconConfig().DenebForkEpoch
var reqWindow primitives.Epoch
if current > params.BeaconConfig().MinEpochsForBlobsSidecarsRequest {
reqWindow = current - params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
}
if finalized >= reqWindow && finalized > denebFork {
return finalized
}
if reqWindow >= finalized && reqWindow > denebFork {
return reqWindow
}
return denebFork
}
// blobSidecarByRootRPCHandler handles the /eth2/beacon_chain/req/blob_sidecars_by_root/1/ RPC request.
// spec: https://github.com/ethereum/consensus-specs/blob/a7e45db9ac2b60a33e144444969ad3ac0aae3d4c/specs/deneb/p2p-interface.md#blobsidecarsbyroot-v1
func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
@@ -47,13 +65,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
if len(blobIdents) > batchSize {
ticker = time.NewTicker(time.Second)
}
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
cs := s.cfg.clock.CurrentSlot()
minReqSlot, err := BlobRPCMinValidSlot(cs)
if err != nil {
return errors.Wrapf(err, "unexpected error computing min valid blob request slot, current_slot=%d", cs)
}
minReqEpoch := blobMinReqEpoch(s.cfg.chain.FinalizedCheckpt().Epoch, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
for i := range blobIdents {
if err := ctx.Err(); err != nil {
@@ -83,15 +95,12 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
// If any root in the request content references a block earlier than minimum_request_epoch,
// peers MAY respond with error code 3: ResourceUnavailable or not include the blob in the response.
// note: we are deviating from the spec to allow requests for blobs that are before minimum_request_epoch,
// up to the beginning of the retention period.
if sc.Slot() < minReqSlot {
if slots.ToEpoch(sc.Slot()) < minReqEpoch {
s.writeErrorResponseToStream(responseCodeResourceUnavailable, types.ErrBlobLTMinRequest.Error(), stream)
log.WithError(types.ErrBlobLTMinRequest).
Debugf("requested blob for block %#x before minimum_request_epoch", blobIdents[i].BlockRoot)
return types.ErrBlobLTMinRequest
}
SetStreamWriteDeadline(stream, defaultWriteDuration)
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
log.WithError(chunkErr).Debug("Could not send a chunked response")

View File

@@ -19,7 +19,7 @@ import (
)
func (c *blobsTestCase) defaultOldestSlotByRoot(t *testing.T) types.Slot {
oldest, err := BlobRPCMinValidSlot(c.clock.CurrentSlot())
oldest, err := slots.EpochStart(blobMinReqEpoch(c.chain.FinalizedCheckPoint.Epoch, slots.ToEpoch(c.clock.CurrentSlot())))
require.NoError(t, err)
return oldest
}
@@ -259,3 +259,71 @@ func TestBlobsByRootOK(t *testing.T) {
})
}
}
func TestBlobsByRootMinReqEpoch(t *testing.T) {
winMin := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
cases := []struct {
name string
finalized types.Epoch
current types.Epoch
deneb types.Epoch
expected types.Epoch
}{
{
name: "testnet genesis",
deneb: 100,
current: 0,
finalized: 0,
expected: 100,
},
{
name: "underflow averted",
deneb: 100,
current: winMin - 1,
finalized: 0,
expected: 100,
},
{
name: "underflow averted - finalized is higher",
deneb: 100,
current: winMin - 1,
finalized: winMin - 2,
expected: winMin - 2,
},
{
name: "underflow averted - genesis at deneb",
deneb: 0,
current: winMin - 1,
finalized: 0,
expected: 0,
},
{
name: "max is finalized",
deneb: 100,
current: 99 + winMin,
finalized: 101,
expected: 101,
},
{
name: "reqWindow > finalized, reqWindow < deneb",
deneb: 100,
current: 99 + winMin,
finalized: 98,
expected: 100,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cfg := params.BeaconConfig()
repositionFutureEpochs(cfg)
cfg.DenebForkEpoch = c.deneb
undo, err := params.SetActiveWithUndo(cfg)
require.NoError(t, err)
defer func() {
require.NoError(t, undo())
}()
ep := blobMinReqEpoch(c.finalized, c.current)
require.Equal(t, c.expected, ep)
})
}
}

View File

@@ -53,7 +53,7 @@ const rangeLimit uint64 = 1024
const seenBlockSize = 1000
const seenBlobSize = seenBlockSize * 4 // Each block can have max 4 blobs. Worst case 164kB for cache.
const seenUnaggregatedAttSize = 20000
const seenAggregatedAttSize = 16384
const seenAggregatedAttSize = 1024
const seenSyncMsgSize = 1000 // Maximum of 512 sync committee members, 1000 is a safe amount.
const seenSyncContributionSize = 512 // Maximum of SYNC_COMMITTEE_SIZE as specified by the spec.
const seenExitSize = 100

View File

@@ -321,7 +321,7 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
}
}
// subscribe to a static subnet with the given topic and index. A given validator and subscription handler is
// subscribe to a static subnet with the given topic and index.A given validator and subscription handler is
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte, subnetCount uint64) {
genRoot := s.cfg.clock.GenesisValidatorsRoot()

View File

@@ -2,16 +2,10 @@ package sync
import (
"context"
"fmt"
"os"
"path"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition/interop"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/io/file"
"google.golang.org/protobuf/proto"
)
@@ -39,10 +33,7 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
if r != [32]byte{} {
s.setBadBlock(ctx, r) // Setting head block as bad.
} else {
// TODO(13721): Remove this once we can deprecate the flag.
interop.WriteBlockToDisk(signed, true /*failed*/)
saveInvalidBlockToTemp(signed)
s.setBadBlock(ctx, root)
}
}
@@ -54,21 +45,3 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
}
return err
}
// WriteInvalidBlockToDisk as a block ssz. Writes to temp directory.
func saveInvalidBlockToTemp(block interfaces.ReadOnlySignedBeaconBlock) {
if !features.Get().SaveInvalidBlock {
return
}
filename := fmt.Sprintf("beacon_block_%d.ssz", block.Block().Slot())
fp := path.Join(os.TempDir(), filename)
log.Warnf("Writing invalid block to disk at %s", fp)
enc, err := block.MarshalSSZ()
if err != nil {
log.WithError(err).Error("Failed to ssz encode block")
return
}
if err := file.WriteFile(fp, enc); err != nil {
log.WithError(err).Error("Failed to write to disk")
}
}

View File

@@ -3,21 +3,17 @@ package sync
import (
"context"
"fmt"
"os"
"path"
"strings"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/io/file"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -113,7 +109,6 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes
}
if err := vf.SidecarKzgProofVerified(); err != nil {
saveInvalidBlobToTemp(blob)
return pubsub.ValidationReject, err
}
@@ -123,12 +118,10 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes
fields := blobFields(blob)
sinceSlotStartTime := receivedTime.Sub(startTime)
validationTime := s.cfg.clock.Now().Sub(receivedTime)
fields["sinceSlotStartTime"] = sinceSlotStartTime
fields["validationTime"] = validationTime
fields["validationTime"] = s.cfg.clock.Now().Sub(receivedTime)
log.WithFields(fields).Debug("Received blob sidecar gossip")
blobSidecarVerificationGossipSummary.Observe(float64(validationTime.Milliseconds()))
blobSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
vBlobData, err := vf.VerifiedROBlob()
@@ -172,21 +165,3 @@ func blobFields(b blocks.ROBlob) logrus.Fields {
func computeSubnetForBlobSidecar(index uint64) uint64 {
return index % params.BeaconConfig().BlobsidecarSubnetCount
}
// saveInvalidBlobToTemp as a block ssz. Writes to temp directory.
func saveInvalidBlobToTemp(b blocks.ROBlob) {
if !features.Get().SaveInvalidBlob {
return
}
filename := fmt.Sprintf("blob_sidecar_%#x_%d_%d.ssz", b.BlockRoot(), b.Slot(), b.Index)
fp := path.Join(os.TempDir(), filename)
log.Warnf("Writing invalid blob sidecar to disk at %s", fp)
enc, err := b.MarshalSSZ()
if err != nil {
log.WithError(err).Error("Failed to ssz encode blob sidecar")
return
}
if err := file.WriteFile(fp, enc); err != nil {
log.WithError(err).Error("Failed to write to disk")
}
}

View File

@@ -9,7 +9,6 @@ go_library(
"fake.go",
"initializer.go",
"interface.go",
"metrics.go",
"mock.go",
"result.go",
],
@@ -36,8 +35,6 @@ go_library(
"//time/slots:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -190,15 +190,12 @@ func (bv *ROBlobVerifier) ValidProposerSignature(ctx context.Context) (err error
// First check if there is a cached verification that can be reused.
seen, err := bv.sc.SignatureVerified(sd)
if seen {
blobVerificationProposerSignatureCache.WithLabelValues("hit-valid").Inc()
if err != nil {
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("reusing failed proposer signature validation from cache")
blobVerificationProposerSignatureCache.WithLabelValues("hit-invalid").Inc()
return ErrInvalidProposerSignature
}
return nil
}
blobVerificationProposerSignatureCache.WithLabelValues("miss").Inc()
// Retrieve the parent state to fallback to full verification.
parent, err := bv.parentState(ctx)

View File

@@ -1,16 +0,0 @@
package verification
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
blobVerificationProposerSignatureCache = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "blob_verification_proposer_signature_cache",
Help: "BlobSidecar proposer signature cache result.",
},
[]string{"result"},
)
)

View File

@@ -38,7 +38,6 @@ go_library(
"@com_github_ethereum_go_ethereum//log:go_default_library",
"@com_github_ipfs_go_log_v2//:go_default_library",
"@com_github_joonix_log//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],

View File

@@ -11,7 +11,6 @@ import (
gethlog "github.com/ethereum/go-ethereum/log"
golog "github.com/ipfs/go-log/v2"
joonix "github.com/joonix/log"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/node"
"github.com/prysmaticlabs/prysm/v5/cmd"
@@ -144,96 +143,84 @@ var appFlags = []cli.Flag{
bflags.EnableExperimentalBackfill,
bflags.BackfillBatchSize,
bflags.BackfillWorkerCount,
bflags.BackfillOldestSlot,
}
func init() {
appFlags = cmd.WrapFlags(append(appFlags, features.BeaconChainFlags...))
}
func before(ctx *cli.Context) error {
// Load flags from config file, if specified.
if err := cmd.LoadFlagsFromConfig(ctx, appFlags); err != nil {
return errors.Wrap(err, "failed to load flags from config file")
}
format := ctx.String(cmd.LogFormat.Name)
switch format {
case "text":
formatter := new(prefixed.TextFormatter)
formatter.TimestampFormat = "2006-01-02 15:04:05"
formatter.FullTimestamp = true
// If persistent log files are written - we disable the log messages coloring because
// the colors are ANSI codes and seen as gibberish in the log files.
formatter.DisableColors = ctx.String(cmd.LogFileName.Name) != ""
logrus.SetFormatter(formatter)
case "fluentd":
f := joonix.NewFormatter()
if err := joonix.DisableTimestampFormat(f); err != nil {
panic(err)
}
logrus.SetFormatter(f)
case "json":
logrus.SetFormatter(&logrus.JSONFormatter{})
case "journald":
if err := journald.Enable(); err != nil {
return err
}
default:
return fmt.Errorf("unknown log format %s", format)
}
logFileName := ctx.String(cmd.LogFileName.Name)
if logFileName != "" {
if err := logs.ConfigurePersistentLogging(logFileName); err != nil {
log.WithError(err).Error("Failed to configuring logging to disk.")
}
}
if err := cmd.ExpandSingleEndpointIfFile(ctx, flags.ExecutionEngineEndpoint); err != nil {
return errors.Wrap(err, "failed to expand single endpoint")
}
if ctx.IsSet(flags.SetGCPercent.Name) {
runtimeDebug.SetGCPercent(ctx.Int(flags.SetGCPercent.Name))
}
if err := debug.Setup(ctx); err != nil {
return errors.Wrap(err, "failed to setup debug")
}
if err := fdlimits.SetMaxFdLimits(); err != nil {
return errors.Wrap(err, "failed to set max fd limits")
}
return cmd.ValidateNoArgs(ctx)
}
func main() {
// rctx = root context with cancellation.
// note other instances of ctx in this func are *cli.Context.
rctx, cancel := context.WithCancel(context.Background())
app := cli.App{
Name: "beacon-chain",
Usage: "this is a beacon chain implementation for Ethereum",
Action: func(ctx *cli.Context) error {
if err := startNode(ctx, cancel); err != nil {
log.Fatal(err.Error())
app := cli.App{}
app.Name = "beacon-chain"
app.Usage = "this is a beacon chain implementation for Ethereum"
app.Action = func(ctx *cli.Context) error {
if err := startNode(ctx, cancel); err != nil {
return cli.Exit(err.Error(), 1)
}
return nil
}
app.Version = version.Version()
app.Commands = []*cli.Command{
dbcommands.Commands,
jwtcommands.Commands,
}
app.Flags = appFlags
app.Before = func(ctx *cli.Context) error {
// Load flags from config file, if specified.
if err := cmd.LoadFlagsFromConfig(ctx, app.Flags); err != nil {
return err
}
format := ctx.String(cmd.LogFormat.Name)
switch format {
case "text":
formatter := new(prefixed.TextFormatter)
formatter.TimestampFormat = "2006-01-02 15:04:05"
formatter.FullTimestamp = true
// If persistent log files are written - we disable the log messages coloring because
// the colors are ANSI codes and seen as gibberish in the log files.
formatter.DisableColors = ctx.String(cmd.LogFileName.Name) != ""
logrus.SetFormatter(formatter)
case "fluentd":
f := joonix.NewFormatter()
if err := joonix.DisableTimestampFormat(f); err != nil {
panic(err)
}
logrus.SetFormatter(f)
case "json":
logrus.SetFormatter(&logrus.JSONFormatter{})
case "journald":
if err := journald.Enable(); err != nil {
return err
}
return nil
},
Version: version.Version(),
Commands: []*cli.Command{
dbcommands.Commands,
jwtcommands.Commands,
},
Flags: appFlags,
Before: before,
default:
return fmt.Errorf("unknown log format %s", format)
}
logFileName := ctx.String(cmd.LogFileName.Name)
if logFileName != "" {
if err := logs.ConfigurePersistentLogging(logFileName); err != nil {
log.WithError(err).Error("Failed to configuring logging to disk.")
}
}
if err := cmd.ExpandSingleEndpointIfFile(ctx, flags.ExecutionEngineEndpoint); err != nil {
return err
}
if ctx.IsSet(flags.SetGCPercent.Name) {
runtimeDebug.SetGCPercent(ctx.Int(flags.SetGCPercent.Name))
}
if err := debug.Setup(ctx); err != nil {
return err
}
if err := fdlimits.SetMaxFdLimits(); err != nil {
return err
}
return cmd.ValidateNoArgs(ctx)
}
defer func() {

View File

@@ -9,7 +9,6 @@ go_library(
"//beacon-chain/node:go_default_library",
"//beacon-chain/sync/backfill:go_default_library",
"//cmd/beacon-chain/sync/backfill/flags:go_default_library",
"//consensus-types/primitives:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -22,7 +22,7 @@ var (
Usage: "Number of blocks per backfill batch. " +
"A larger number will request more blocks at once from peers, but also consume more system memory to " +
"hold batches in memory during processing. This has a multiplicative effect with " + backfillWorkerCountName + ".",
Value: 32,
Value: 64,
}
// BackfillWorkerCount allows users to tune the number of concurrent backfill batches to download, to maximize
// network utilization at the cost of higher memory.
@@ -35,9 +35,4 @@ var (
"This has a multiplicative effect with " + backfillBatchSizeName + ".",
Value: 2,
}
BackfillOldestSlot = &cli.Uint64Flag{
Name: "backfill-oldest-slot",
Usage: "Specifies the oldest slot that backfill should download. " +
"If this value is greater than current_slot - MIN_EPOCHS_FOR_BLOCK_REQUESTS, it will be ignored with a warning log.",
}
)

View File

@@ -4,7 +4,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/node"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/backfill"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/sync/backfill/flags"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/urfave/cli/v2"
)
@@ -12,17 +11,11 @@ import (
// from flag parsing.
func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
opt := func(node *node.BeaconNode) (err error) {
bno := []backfill.ServiceOption{
node.BackfillOpts = []backfill.ServiceOption{
backfill.WithBatchSize(c.Uint64(flags.BackfillBatchSize.Name)),
backfill.WithWorkerCount(c.Int(flags.BackfillWorkerCount.Name)),
backfill.WithEnableBackfill(c.Bool(flags.EnableExperimentalBackfill.Name)),
}
// The zero value of this uint flag would be genesis, so we use IsSet to differentiate nil from zero case.
if c.IsSet(flags.BackfillOldestSlot.Name) {
uv := c.Uint64(flags.BackfillBatchSize.Name)
bno = append(bno, backfill.WithMinimumSlot(primitives.Slot(uv)))
}
node.BackfillOpts = bno
return nil
}
return []node.Option{opt}, nil

View File

@@ -140,7 +140,6 @@ var appHelpFlagGroups = []flagGroup{
backfill.EnableExperimentalBackfill,
backfill.BackfillWorkerCount,
backfill.BackfillBatchSize,
backfill.BackfillOldestSlot,
},
},
{

View File

@@ -72,10 +72,6 @@ type Flags struct {
PrepareAllPayloads bool // PrepareAllPayloads informs the engine to prepare a block on every slot.
// BlobSaveFsync requires blob saving to block on fsync to ensure blobs are durably persisted before passing DA.
BlobSaveFsync bool
SaveInvalidBlock bool // SaveInvalidBlock saves invalid block to temp.
SaveInvalidBlob bool // SaveInvalidBlob saves invalid blob to temp.
// KeystoreImportDebounceInterval specifies the time duration the validator waits to reload new keys if they have
// changed on disk. This feature is for advanced use cases only.
KeystoreImportDebounceInterval time.Duration
@@ -191,16 +187,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
cfg.WriteSSZStateTransitions = true
}
if ctx.Bool(saveInvalidBlockTempFlag.Name) {
logEnabled(saveInvalidBlockTempFlag)
cfg.SaveInvalidBlock = true
}
if ctx.Bool(saveInvalidBlobTempFlag.Name) {
logEnabled(saveInvalidBlobTempFlag)
cfg.SaveInvalidBlob = true
}
if ctx.IsSet(disableGRPCConnectionLogging.Name) {
logDisabled(disableGRPCConnectionLogging)
cfg.DisableGRPCConnectionLogs = true

View File

@@ -40,15 +40,7 @@ var (
}
writeSSZStateTransitionsFlag = &cli.BoolFlag{
Name: "interop-write-ssz-state-transitions",
Usage: "Writes SSZ states to disk after attempted state transitio.",
}
saveInvalidBlockTempFlag = &cli.BoolFlag{
Name: "save-invalid-block-temp",
Usage: "Writes invalid blocks to temp directory.",
}
saveInvalidBlobTempFlag = &cli.BoolFlag{
Name: "save-invalid-blob-temp",
Usage: "Writes invalid blobs to temp directory.",
Usage: "Writes failed SSZ block to disk after attempted importing block and state transition.",
}
disableGRPCConnectionLogging = &cli.BoolFlag{
Name: "disable-grpc-connection-logging",
@@ -204,8 +196,6 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
devModeFlag,
enableExperimentalState,
writeSSZStateTransitionsFlag,
saveInvalidBlockTempFlag,
saveInvalidBlobTempFlag,
disableGRPCConnectionLogging,
HoleskyTestnet,
PraterTestnet,

View File

@@ -49,82 +49,6 @@ func TestProposerSettingsLoader(t *testing.T) {
validatorRegistrationEnabled bool
skipDBSavedCheck bool
}{
{
name: "graffiti in db without fee recipient",
args: args{
proposerSettingsFlagValues: &proposerSettingsFlag{
dir: "",
url: "",
defaultfee: "",
},
},
want: func() *proposer.Settings {
key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a")
require.NoError(t, err)
return &proposer.Settings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*proposer.Option{
bytesutil.ToBytes48(key1): {
GraffitiConfig: &proposer.GraffitiConfig{
Graffiti: "specific graffiti",
},
},
},
}
},
withdb: func(db iface.ValidatorDB) error {
key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a")
require.NoError(t, err)
settings := &proposer.Settings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*proposer.Option{
bytesutil.ToBytes48(key1): {
GraffitiConfig: &proposer.GraffitiConfig{
Graffiti: "specific graffiti",
},
},
},
}
return db.SaveProposerSettings(context.Background(), settings)
},
},
{
name: "graffiti from file",
args: args{
proposerSettingsFlagValues: &proposerSettingsFlag{
dir: "./testdata/good-graffiti-settings.json",
url: "",
defaultfee: "",
},
},
want: func() *proposer.Settings {
key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a")
require.NoError(t, err)
return &proposer.Settings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*proposer.Option{
bytesutil.ToBytes48(key1): {
FeeRecipientConfig: &proposer.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
GraffitiConfig: &proposer.GraffitiConfig{
Graffiti: "some graffiti",
},
BuilderConfig: &proposer.BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(30000000),
},
},
},
DefaultConfig: &proposer.Option{
FeeRecipientConfig: &proposer.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
},
BuilderConfig: &proposer.BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(40000000),
},
},
}
},
},
{
name: "db settings override file settings if file default config is missing",
args: args{
@@ -951,8 +875,6 @@ func TestProposerSettingsLoader(t *testing.T) {
if tt.wantErr != "" {
require.ErrorContains(t, tt.wantErr, err)
return
} else {
require.NoError(t, err)
}
if tt.wantLog != "" {
assert.LogsContain(t, hook,

View File

@@ -1,19 +0,0 @@
{
"proposer_config": {
"0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a": {
"fee_recipient": "0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3",
"graffiti": "some graffiti",
"builder": {
"enabled": true,
"gas_limit": "30000000"
}
}
},
"default_config": {
"fee_recipient": "0x6e35733c5af9B61374A128e6F85f553aF09ff89A",
"builder": {
"enabled": true,
"gas_limit": 40000000
}
}
}

View File

@@ -19,6 +19,9 @@ func SettingFromConsensus(ps *validatorpb.ProposerSettingsPayload) (*Settings, e
if ps.ProposerConfig != nil && len(ps.ProposerConfig) != 0 {
settings.ProposeConfig = make(map[[fieldparams.BLSPubkeyLength]byte]*Option)
for key, optionPayload := range ps.ProposerConfig {
if optionPayload.FeeRecipient == "" {
continue
}
decodedKey, err := hexutil.Decode(key)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("cannot decode public key %s", key))
@@ -26,15 +29,13 @@ func SettingFromConsensus(ps *validatorpb.ProposerSettingsPayload) (*Settings, e
if len(decodedKey) != fieldparams.BLSPubkeyLength {
return nil, fmt.Errorf("%v is not a bls public key", key)
}
p := &Option{}
if optionPayload.Graffiti != nil {
p.GraffitiConfig = &GraffitiConfig{*optionPayload.Graffiti}
if err := verifyOption(key, optionPayload); err != nil {
return nil, err
}
if optionPayload.FeeRecipient != "" {
if err := verifyOption(key, optionPayload); err != nil {
return nil, err
}
p.FeeRecipientConfig = &FeeRecipientConfig{FeeRecipient: common.HexToAddress(optionPayload.FeeRecipient)}
p := &Option{
FeeRecipientConfig: &FeeRecipientConfig{
FeeRecipient: common.HexToAddress(optionPayload.FeeRecipient),
},
}
if optionPayload.Builder != nil {
p.BuilderConfig = BuilderConfigFromConsensus(optionPayload.Builder)
@@ -140,16 +141,10 @@ type FeeRecipientConfig struct {
FeeRecipient common.Address
}
// GraffitiConfig is a prysm internal representation to see if the graffiti was set.
type GraffitiConfig struct {
Graffiti string
}
// Option is a Prysm internal representation of the ProposerOptionPayload on the validator client in bytes format instead of hex.
type Option struct {
FeeRecipientConfig *FeeRecipientConfig
BuilderConfig *BuilderConfig
GraffitiConfig *GraffitiConfig
}
// Clone creates a deep copy of proposer option
@@ -164,9 +159,6 @@ func (po *Option) Clone() *Option {
if po.BuilderConfig != nil {
p.BuilderConfig = po.BuilderConfig.Clone()
}
if po.GraffitiConfig != nil {
p.GraffitiConfig = po.GraffitiConfig.Clone()
}
return p
}
@@ -181,9 +173,6 @@ func (po *Option) ToConsensus() *validatorpb.ProposerOptionPayload {
if po.BuilderConfig != nil {
p.Builder = po.BuilderConfig.ToConsensus()
}
if po.GraffitiConfig != nil {
p.Graffiti = &po.GraffitiConfig.Graffiti
}
return p
}
@@ -233,14 +222,6 @@ func (bc *BuilderConfig) Clone() *BuilderConfig {
return c
}
// Clone creates a deep copy of graffiti config
func (gc *GraffitiConfig) Clone() *GraffitiConfig {
if gc == nil {
return nil
}
return &GraffitiConfig{gc.Graffiti}
}
// ToConsensus converts Builder Config to the protobuf object
func (bc *BuilderConfig) ToConsensus() *validatorpb.BuilderConfig {
if bc == nil {

Some files were not shown because too many files have changed in this diff Show More