Compare commits

..

1 Commits

Author SHA1 Message Date
terence tsao
3120746e59 ssz interop write: only capture invalid block 2024-03-11 08:22:57 -10:00
353 changed files with 11176 additions and 10369 deletions

View File

@@ -6,12 +6,6 @@ import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# hermetic_cc_toolchain v3.0.1 required changes.
common --enable_platform_specific_config
build:linux --sandbox_add_mount_pair=/tmp
build:macos --sandbox_add_mount_pair=/var/tmp
build:windows --sandbox_add_mount_pair=C:\Temp
# E2E run with debug gotag
test:e2e --define gotags=debug

View File

@@ -1 +1 @@
7.1.0
7.0.0

3
.gitignore vendored
View File

@@ -41,6 +41,3 @@ jwt.hex
# manual testing
tmp
# spectest coverage reports
report.txt

821
MODULE.bazel.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.4.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.4.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.4.0)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)

View File

@@ -16,14 +16,12 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
http_archive(
name = "hermetic_cc_toolchain",
sha256 = "3bc6ec127622fdceb4129cb06b6f7ab098c4d539124dde96a6318e7c32a53f7a",
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
urls = [
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
],
)
@@ -83,10 +81,10 @@ bazel_skylib_workspace()
http_archive(
name = "bazel_gazelle",
integrity = "sha256-MpOL2hbmcABjA1R5Bj2dJMYO2o15/Uc5Vj9Q0zHLMgk=",
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
],
)
@@ -130,9 +128,9 @@ aspect_bazel_lib_register_toolchains()
http_archive(
name = "rules_oci",
sha256 = "4a276e9566c03491649eef63f27c2816cc222f41ccdebd97d2c5159e84917c3b",
strip_prefix = "rules_oci-1.7.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.7.4/rules_oci-v1.7.4.tar.gz",
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
strip_prefix = "rules_oci-1.3.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
)
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
@@ -153,13 +151,17 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
sha256 = "d6ab6b57e48c09523e93050f13698f708428cfd5e619252e369d377af6597707",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
],
)
load("//:distroless_deps.bzl", "distroless_deps")
distroless_deps()
# Override default import in rules_go with special patch until
# https://github.com/gogo/protobuf/pull/582 is merged.
git_repository(
@@ -198,14 +200,10 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.21.8",
go_version = "1.21.6",
nogo = "@//:nogo",
)
load("//:distroless_deps.bzl", "distroless_deps")
distroless_deps()
http_archive(
name = "io_kubernetes_build",
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
@@ -243,7 +241,9 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0"
consensus_spec_version = "v1.4.0-beta.7"
consensus_spec_test_version = "v1.4.0-beta.7-hotfix"
bls_test_version = "v0.1.1"
@@ -260,7 +260,7 @@ filegroup(
)
""",
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -276,7 +276,7 @@ filegroup(
)
""",
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -292,7 +292,7 @@ filegroup(
)
""",
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -306,7 +306,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
sha256 = "049c29267310e6b88280f4f834a75866c2f5b9036fa97acb9d9c6db8f64d9118",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -342,6 +342,22 @@ filegroup(
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
)
http_archive(
name = "goerli_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"prater/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
)
http_archive(
name = "holesky_testnet",
build_file_content = """

View File

@@ -6,14 +6,12 @@ go_library(
"checkpoint.go",
"client.go",
"doc.go",
"health.go",
"log.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon/iface:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
@@ -39,12 +37,10 @@ go_test(
srcs = [
"checkpoint_test.go",
"client_test.go",
"health_test.go",
],
embed = [":go_default_library"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -58,6 +54,5 @@ go_test(
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -1,55 +0,0 @@
package beacon
import (
"context"
"sync"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
)
type NodeHealthTracker struct {
isHealthy *bool
healthChan chan bool
node iface.HealthNode
sync.RWMutex
}
func NewNodeHealthTracker(node iface.HealthNode) *NodeHealthTracker {
return &NodeHealthTracker{
node: node,
healthChan: make(chan bool, 1),
}
}
// HealthUpdates provides a read-only channel for health updates.
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
return n.healthChan
}
func (n *NodeHealthTracker) IsHealthy() bool {
n.RLock()
defer n.RUnlock()
if n.isHealthy == nil {
return false
}
return *n.isHealthy
}
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
n.RLock()
newStatus := n.node.IsHealthy(ctx)
if n.isHealthy == nil {
n.isHealthy = &newStatus
}
isStatusChanged := newStatus != *n.isHealthy
n.RUnlock()
if isStatusChanged {
n.Lock()
// Double-check the condition to ensure it hasn't changed since the first check.
n.isHealthy = &newStatus
n.Unlock() // It's better to unlock as soon as the protected section is over.
n.healthChan <- newStatus
}
return newStatus
}

View File

@@ -1,118 +0,0 @@
package beacon
import (
"context"
"sync"
"testing"
healthTesting "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing"
"go.uber.org/mock/gomock"
)
func TestNodeHealth_IsHealthy(t *testing.T) {
tests := []struct {
name string
isHealthy bool
want bool
}{
{"initially healthy", true, true},
{"initially unhealthy", false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
n := &NodeHealthTracker{
isHealthy: &tt.isHealthy,
healthChan: make(chan bool, 1),
}
if got := n.IsHealthy(); got != tt.want {
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
}
})
}
}
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
tests := []struct {
name string
initial bool // Initial health status
newStatus bool // Status to update to
shouldSend bool // Should a message be sent through the channel
}{
{"healthy to unhealthy", true, false, true},
{"unhealthy to healthy", false, true, true},
{"remain healthy", true, true, false},
{"remain unhealthy", false, false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := healthTesting.NewMockHealthClient(ctrl)
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
n := &NodeHealthTracker{
isHealthy: &tt.initial,
node: client,
healthChan: make(chan bool, 1),
}
s := n.CheckHealth(context.Background())
// Check if health status was updated
if s != tt.newStatus {
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
}
select {
case status := <-n.HealthUpdates():
if !tt.shouldSend {
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
} else if status != tt.newStatus {
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
}
default:
if tt.shouldSend {
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
}
}
})
}
}
func TestNodeHealth_Concurrency(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := healthTesting.NewMockHealthClient(ctrl)
n := NewNodeHealthTracker(client)
var wg sync.WaitGroup
// Number of goroutines to spawn for both reading and writing
numGoroutines := 6
go func() {
for range n.HealthUpdates() {
// Consume values to avoid blocking on channel send.
}
}()
wg.Add(numGoroutines * 2) // for readers and writers
// Concurrently update health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
client.EXPECT().IsHealthy(gomock.Any()).Return(false)
n.CheckHealth(context.Background())
client.EXPECT().IsHealthy(gomock.Any()).Return(true)
n.CheckHealth(context.Background())
}()
}
// Concurrently read health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
_ = n.IsHealthy() // Just read the value
}()
}
wg.Wait() // Wait for all goroutines to finish
}

View File

@@ -1,8 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["health.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface",
visibility = ["//visibility:public"],
)

View File

@@ -1,13 +0,0 @@
package iface
import "context"
type HealthTracker interface {
HealthUpdates() <-chan bool
IsHealthy() bool
CheckHealth(ctx context.Context) bool
}
type HealthNode interface {
IsHealthy(ctx context.Context) bool
}

View File

@@ -1,12 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["mock.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing",
visibility = ["//visibility:public"],
deps = [
"//api/client/beacon/iface:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -1,53 +0,0 @@
package testing
import (
"context"
"reflect"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
"go.uber.org/mock/gomock"
)
var (
_ = iface.HealthNode(&MockHealthClient{})
)
// MockHealthClient is a mock of HealthClient interface.
type MockHealthClient struct {
ctrl *gomock.Controller
recorder *MockHealthClientMockRecorder
}
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
type MockHealthClientMockRecorder struct {
mock *MockHealthClient
}
// IsHealthy mocks base method.
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsHealthy", arg0)
ret0, ok := ret[0].(bool)
if !ok {
return false
}
return ret0
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
return m.recorder
}
// IsHealthy indicates an expected call of IsHealthy.
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
}
// NewMockHealthClient creates a new mock instance.
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
mock := &MockHealthClient{ctrl: ctrl}
mock.recorder = &MockHealthClientMockRecorder{mock}
return mock
}

View File

@@ -304,8 +304,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
@@ -343,8 +341,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
@@ -383,8 +379,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {

View File

@@ -321,8 +321,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
@@ -349,8 +347,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
@@ -380,8 +376,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
var req structs.SignedBlindedBeaconBlockDeneb
err := json.NewDecoder(r.Body).Decode(&req)
require.NoError(t, err)

View File

@@ -21,9 +21,6 @@ var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")
// ErrConnectionIssue represents a connection problem.
var ErrConnectionIssue = errors.New("could not connect")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)

View File

@@ -1,24 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["event_stream.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/event",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/client:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["event_stream_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,148 +0,0 @@
package event
import (
"bufio"
"context"
"net/http"
"net/url"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/client"
log "github.com/sirupsen/logrus"
)
const (
EventHead = "head"
EventBlock = "block"
EventAttestation = "attestation"
EventVoluntaryExit = "voluntary_exit"
EventBlsToExecutionChange = "bls_to_execution_change"
EventProposerSlashing = "proposer_slashing"
EventAttesterSlashing = "attester_slashing"
EventFinalizedCheckpoint = "finalized_checkpoint"
EventChainReorg = "chain_reorg"
EventContributionAndProof = "contribution_and_proof"
EventLightClientFinalityUpdate = "light_client_finality_update"
EventLightClientOptimisticUpdate = "light_client_optimistic_update"
EventPayloadAttributes = "payload_attributes"
EventBlobSidecar = "blob_sidecar"
EventError = "error"
EventConnectionError = "connection_error"
)
var (
_ = EventStreamClient(&EventStream{})
)
var DefaultEventTopics = []string{EventHead}
type EventStreamClient interface {
Subscribe(eventsChannel chan<- *Event)
}
type Event struct {
EventType string
Data []byte
}
// EventStream is responsible for subscribing to the Beacon API events endpoint
// and dispatching received events to subscribers.
type EventStream struct {
ctx context.Context
httpClient *http.Client
host string
topics []string
}
func NewEventStream(ctx context.Context, httpClient *http.Client, host string, topics []string) (*EventStream, error) {
// Check if the host is a valid URL
_, err := url.ParseRequestURI(host)
if err != nil {
return nil, err
}
if len(topics) == 0 {
return nil, errors.New("no topics provided")
}
return &EventStream{
ctx: ctx,
httpClient: httpClient,
host: host,
topics: topics,
}, nil
}
func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
allTopics := strings.Join(h.topics, ",")
log.WithField("topics", allTopics).Info("Listening to Beacon API events")
fullUrl := h.host + "/eth/v1/events?topics=" + allTopics
req, err := http.NewRequestWithContext(h.ctx, http.MethodGet, fullUrl, nil)
if err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
}
}
req.Header.Set("Accept", api.EventStreamMediaType)
req.Header.Set("Connection", api.KeepAlive)
resp, err := h.httpClient.Do(req)
if err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
}
}
defer func() {
if closeErr := resp.Body.Close(); closeErr != nil {
log.WithError(closeErr).Error("Failed to close events response body")
}
}()
// Create a new scanner to read lines from the response body
scanner := bufio.NewScanner(resp.Body)
var eventType, data string // Variables to store event type and data
// Iterate over lines of the event stream
for scanner.Scan() {
select {
case <-h.ctx.Done():
log.Info("Context canceled, stopping event stream")
close(eventsChannel)
return
default:
line := scanner.Text() // TODO(13730): scanner does not handle /r and does not fully adhere to https://html.spec.whatwg.org/multipage/server-sent-events.html#the-eventsource-interface
// Handle the event based on your specific format
if line == "" {
// Empty line indicates the end of an event
if eventType != "" && data != "" {
// Process the event when both eventType and data are set
eventsChannel <- &Event{EventType: eventType, Data: []byte(data)}
}
// Reset eventType and data for the next event
eventType, data = "", ""
continue
}
et, ok := strings.CutPrefix(line, "event: ")
if ok {
// Extract event type from the "event" field
eventType = et
}
d, ok := strings.CutPrefix(line, "data: ")
if ok {
// Extract data from the "data" field
data = d
}
}
}
if err := scanner.Err(); err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, errors.Wrap(client.ErrConnectionIssue, "scanner failed").Error()).Error()),
}
}
}

View File

@@ -1,80 +0,0 @@
package event
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v5/testing/require"
log "github.com/sirupsen/logrus"
)
func TestNewEventStream(t *testing.T) {
validURL := "http://localhost:8080"
invalidURL := "://invalid"
topics := []string{"topic1", "topic2"}
tests := []struct {
name string
host string
topics []string
wantErr bool
}{
{"Valid input", validURL, topics, false},
{"Invalid URL", invalidURL, topics, true},
{"No topics", validURL, []string{}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics)
if (err != nil) != tt.wantErr {
t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestEventStream(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
require.Equal(t, true, ok)
for i := 1; i <= 2; i++ {
_, err := fmt.Fprintf(w, "event: head\ndata: data%d\n\n", i)
require.NoError(t, err)
flusher.Flush() // Trigger flush to simulate streaming data
time.Sleep(100 * time.Millisecond) // Simulate delay between events
}
})
server := httptest.NewServer(mux)
defer server.Close()
topics := []string{"head"}
eventsChannel := make(chan *Event, 1)
stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics)
require.NoError(t, err)
go stream.Subscribe(eventsChannel)
// Collect events
var events []*Event
for len(events) != 2 {
select {
case event := <-eventsChannel:
log.Info(event)
events = append(events, event)
}
}
// Assertions to verify the events content
expectedData := []string{"data1", "data2"}
for i, event := range events {
if string(event.Data) != expectedData[i] {
t.Errorf("Expected event data %q, got %q", expectedData[i], string(event.Data))
}
}
}

View File

@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
}
}
func TestToggleMultipleTimes(t *testing.T) {
func TestToogleMultipleTimes(t *testing.T) {
t.Parallel()
v := New()
@@ -101,16 +101,16 @@ func TestToggleMultipleTimes(t *testing.T) {
expected := i%2 != 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
}
if pre == v.IsSet() {
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
}
}
}
func TestToggleAfterOverflow(t *testing.T) {
func TestToogleAfterOverflow(t *testing.T) {
t.Parallel()
var value int32 = math.MaxInt32
@@ -122,7 +122,7 @@ func TestToggleAfterOverflow(t *testing.T) {
v.Toggle()
expected := math.MaxInt32%2 == 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
}
// make sure overflow happened
@@ -135,7 +135,7 @@ func TestToggleAfterOverflow(t *testing.T) {
v.Toggle()
expected = !expected
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
}
}

View File

@@ -20,7 +20,6 @@ package event
import (
"errors"
"reflect"
"slices"
"sync"
)
@@ -220,9 +219,12 @@ type caseList []reflect.SelectCase
// find returns the index of a case containing the given channel.
func (cs caseList) find(channel interface{}) int {
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
return selectCase.Chan.Interface() == channel
})
for i, cas := range cs {
if cas.Chan.Interface() == channel {
return i
}
}
return -1
}
// delete removes the given case from cs.

View File

@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
return results, nil
}
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
func calculateChunkSize(items int) int {
// Start with a simple even split
chunkSize := items / runtime.GOMAXPROCS(0)

View File

@@ -2,7 +2,7 @@
# This script serves as a wrapper around bazel to limit the scope of environment variables that
# may change the action output. Using this script should result in a higher cache hit ratio for
# cached actions with a more hermetic build.
# cached actions with a more heremtic build.
env -i \
PATH=/usr/bin:/bin \

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -398,6 +399,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
@@ -554,9 +563,3 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
func (s *Service) inRegularSync() bool {
return s.cfg.SyncChecker.Synced()
}
// validating returns true if the beacon is tracking some validators that have
// registered for proposing.
func (s *Service) validating() bool {
return s.cfg.TrackedValidatorsCache.Validating()
}

View File

@@ -61,7 +61,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
require.NotEqual(t, 0, len(indices))
}

View File

@@ -82,20 +82,19 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
"deposits": len(block.Body().Deposits()),
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"deposits": len(block.Body().Deposits()),
}
log.WithFields(lf).Debug("Synced new block")
} else {

View File

@@ -18,63 +18,17 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch < headEpoch {
return nil
}
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err != nil {
return nil
}
if slots.ToEpoch(targetSlot)+1 < headEpoch {
return nil
}
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return nil
}
return st
}
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil
}
// Try if we have already set the checkpoint cache
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
lock := async.NewMultilock(string(c.Root) + epochKey)
lock.Lock()
defer lock.Unlock()
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
if err != nil {
return nil
}
if cachedState != nil && !cachedState.IsNil() {
return cachedState
}
st, err := s.HeadState(ctx)
if err != nil {
return nil
}
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, c.Root, slot)
if err != nil {
return nil
}
if err := s.checkpointStateCache.AddCheckpointState(c, st); err != nil {
return nil
}
return st
}
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
if st := s.getRecentPreState(ctx, c); st != nil {
return st, nil
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return s.HeadStateReadOnly(ctx)
}
}
}
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.

View File

@@ -146,28 +146,6 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
func TestService_GetRecentPreState(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 31,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -559,20 +558,6 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
// The gossip handler for blobs writes the index of each verified blob referencing the given
// root to the channel returned by blobNotifiers.forRoot.
nc := s.blobNotifiers.forRoot(root)
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
nst := time.AfterFunc(time.Until(nextSlot), func() {
if len(missing) == 0 {
return
}
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
Error("Still waiting for DA check at slot end.")
})
defer nst.Stop()
}
for {
select {
case idx := <-nc:
@@ -586,20 +571,11 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
s.blobNotifiers.delete(root)
return nil
case <-ctx.Done():
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
}
}
}
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,
"root": fmt.Sprintf("%#x", root),
"blobsExpected": expected,
"blobsWaiting": missing,
}
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot

View File

@@ -60,7 +60,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
// logNonCanonicalBlockReceived prints a message informing that the received
// block is not the head of the chain. It requires the caller holds a lock on
// Forkchoice.
// Foprkchoice.
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {

View File

@@ -95,9 +95,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
s.UpdateHead(s.ctx, slotInterval.Slot+1)
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {

View File

@@ -170,7 +170,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Send finalized events and finalized deposits in the background
if newFinalized {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
go s.sendNewFinalizedEvent(ctx, postState)
go s.sendNewFinalizedEvent(blockCopy, postState)
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDeposits(depCtx, finalized.Root)
@@ -443,7 +443,7 @@ func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postS
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
// event feed. It needs to be called on the background
func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.BeaconState) {
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
isValidPayload := false
s.headLock.RLock()
if s.head != nil {
@@ -451,17 +451,8 @@ func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.Bea
}
s.headLock.RUnlock()
blk, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
if err != nil {
log.WithError(err).Error("Could not retrieve block for finalized checkpoint root. Finalized event will not be emitted")
return
}
if blk == nil || blk.IsNil() || blk.Block() == nil || blk.Block().IsNil() {
log.WithError(err).Error("Block retrieved for finalized checkpoint root is nil. Finalized event will not be emitted")
return
}
stateRoot := blk.Block().StateRoot()
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{

View File

@@ -8,14 +8,12 @@ import (
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -380,38 +378,3 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
require.Equal(t, false, pool.ValidatorExists(idx))
})
}
func Test_sendNewFinalizedEvent(t *testing.T) {
s, _ := minimalTestService(t)
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
s.cfg.StateNotifier = notifier
finalizedSt, err := util.NewBeaconState()
require.NoError(t, err)
finalizedStRoot, err := finalizedSt.HashTreeRoot(s.ctx)
require.NoError(t, err)
b := util.NewBeaconBlock()
b.Block.StateRoot = finalizedStRoot[:]
sbb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
sbbRoot, err := sbb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(s.ctx, sbb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: 123,
Root: sbbRoot[:],
}))
s.sendNewFinalizedEvent(s.ctx, st)
require.Equal(t, 1, len(notifier.ReceivedEvents()))
e := notifier.ReceivedEvents()[0]
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
require.Equal(t, true, ok, "event has wrong data type")
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
assert.DeepEqual(t, sbbRoot[:], fc.Block)
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
assert.Equal(t, false, fc.ExecutionOptimistic)
}

View File

@@ -290,10 +290,18 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
if params.BeaconConfig().ConfigName != params.PraterName {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
} else {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")

View File

@@ -804,7 +804,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a nonsensical ordering
// Perform this in a non-sensical ordering
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10, [32]byte{}, 0))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0))

View File

@@ -784,7 +784,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a nonsensical ordering
// Perform this in a non-sensical ordering
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)

View File

@@ -109,6 +109,10 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
if c.disabled {
return nil
}
c.lock.Lock()
defer c.lock.Unlock()
@@ -122,6 +126,10 @@ func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
if c.disabled {
return
}
c.lock.Lock()
defer c.lock.Unlock()

View File

@@ -2,7 +2,6 @@ package cache_test
import (
"context"
"sync"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
@@ -36,28 +35,3 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
require.NoError(t, err)
assert.DeepEqual(t, res.ToProto(), s.ToProto(), "Expected equal protos to return from cache")
}
func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
ctx := context.Background()
c := cache.NewSkipSlotCache()
r := [32]byte{'a'}
c.Disable()
require.NoError(t, c.MarkInProgress(r))
c.Enable()
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
// Get call will only terminate when
// it is not longer in progress.
obj, err := c.Get(ctx, r)
require.NoError(t, err)
require.IsNil(t, obj)
wg.Done()
}()
c.MarkNotInProgress(r)
wg.Wait()
}

View File

@@ -41,9 +41,3 @@ func (t *TrackedValidatorsCache) Prune() {
defer t.Unlock()
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
}
func (t *TrackedValidatorsCache) Validating() bool {
t.Lock()
defer t.Unlock()
return len(t.trackedValidators) > 0
}

View File

@@ -14,7 +14,6 @@ go_library(
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
"//tools:__subpackages__",
],
deps = [
"//beacon-chain/core/helpers:go_default_library",

View File

@@ -12,6 +12,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
)
var (
@@ -132,6 +133,9 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx primitives.
//
// In the attestation must be within the range of 95 to 102 in the example above.
func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := slots.ValidateClock(attSlot, uint64(genesisTime.Unix())); err != nil {
return err
}
attTime, err := slots.ToTime(uint64(genesisTime.Unix()), attSlot)
if err != nil {
return err
@@ -178,15 +182,24 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
}
// EIP-7045: Starting in Deneb, allow any attestations from the current or previous epoch.
currentEpoch := slots.ToEpoch(currentSlot)
if attEpoch+1 < currentEpoch {
prevEpoch, err := currentEpoch.SafeSub(1)
if err != nil {
log.WithError(err).Debug("Ignoring underflow for a deneb attestation inclusion check in epoch 0")
prevEpoch = 0
}
attSlotEpoch := slots.ToEpoch(attSlot)
if attSlotEpoch != currentEpoch && attSlotEpoch != prevEpoch {
attError = fmt.Errorf(
"attestation epoch %d not within current epoch %d or previous epoch",
attEpoch,
"attestation epoch %d not within current epoch %d or previous epoch %d",
attSlot/params.BeaconConfig().SlotsPerEpoch,
currentEpoch,
prevEpoch,
)
return errors.Join(ErrTooLate, attError)
}
return nil
}

View File

@@ -197,7 +197,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
-500 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(200 * time.Millisecond),
},
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch",
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch 14",
},
{
name: "attestation.slot is well beyond current slot",
@@ -205,7 +205,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
attSlot: 1 << 32,
genesisTime: prysmTime.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
wantedErr: "attestation slot 4294967296 not within attestation propagation range of 0 to 15 (current slot)",
wantedErr: "which exceeds max allowed value relative to the local clock",
},
}
for _, tt := range tests {

View File

@@ -22,7 +22,7 @@ var balanceCache = cache.NewEffectiveBalanceCache()
// """
// Return the combined effective balance of the ``indices``.
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// Math safe up to ~10B ETH, after which this overflows uint64.
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
// """
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
func TotalBalance(state state.ReadOnlyValidators, indices []primitives.ValidatorIndex) uint64 {

View File

@@ -59,7 +59,7 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key)
}
// ComputeDomainAndSignWithoutState offers the same functionality as ComputeDomainAndSign without the need to provide a BeaconState.
// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState.
// This is particularly helpful for signing values in tests.
func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) {
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.

View File

@@ -24,7 +24,6 @@ go_library(
"//beacon-chain/core/execution:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition/interop:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",

View File

@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
b "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition/interop"
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
@@ -58,9 +57,6 @@ func ExecuteStateTransitionNoVerifyAnySig(
defer span.End()
var err error
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
interop.WriteStateToDisk(st)
parentRoot := signed.Block().ParentRoot()
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
if err != nil {

View File

@@ -94,15 +94,6 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
entry := s.cache.ensure(key)
defer s.cache.delete(key)
root := b.Root()
sumz, err := s.store.WaitForSummarizer(ctx)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
WithError(err).
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
} else {
entry.setDiskSummary(sumz.Summary(root))
}
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -60,12 +59,7 @@ func (c *cache) delete(key cacheKey) {
// cacheEntry holds a fixed-length cache of BlobSidecars.
type cacheEntry struct {
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
diskSummary filesystem.BlobStorageSummary
}
func (e *cacheEntry) setDiskSummary(sum filesystem.BlobStorageSummary) {
e.diskSummary = sum
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
}
// stash adds an item to the in-memory cache of BlobSidecars.
@@ -87,17 +81,9 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
// the cache do not match those found in the block. If err is nil, then all expected
// commitments were found in the cache and the sidecar slice return value can be used
// to perform a DA check against the cached sidecars.
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROBlob, error) {
if e.diskSummary.AllAvailable(kc.count()) {
return nil, nil
}
scs := make([]blocks.ROBlob, 0, kc.count())
scs := make([]blocks.ROBlob, kc.count())
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
// We already have this blob, we don't need to write it or validate it.
if e.diskSummary.HasIndex(i) {
continue
}
if kc[i] == nil {
if e.scs[i] != nil {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
@@ -111,7 +97,7 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
}
scs = append(scs, *e.scs[i])
scs[i] = *e.scs[i]
}
return scs, nil

View File

@@ -3,14 +3,9 @@ package das
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestCacheEnsureDelete(t *testing.T) {
@@ -28,145 +23,3 @@ func TestCacheEnsureDelete(t *testing.T) {
var nilEntry *cacheEntry
require.Equal(t, nilEntry, c.entries[k])
}
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
return func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
require.NoError(t, err)
entry := &cacheEntry{}
if len(onDisk) > 0 {
od := map[[32]byte][]int{blk.Root(): onDisk}
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
sum := sumz.Summary(blk.Root())
entry.setDiskSummary(sum)
}
expected := make([]blocks.ROBlob, 0, nBlobs)
for i := 0; i < commits.count(); i++ {
if entry.diskSummary.HasIndex(uint64(i)) {
continue
}
// If we aren't telling the cache a blob is on disk, add it to the expected list and stash.
expected = append(expected, blobs[i])
require.NoError(t, entry.stash(&blobs[i]))
}
require.Equal(t, numExpected, len(expected))
return entry, commits, expected
}
}
func TestFilterDiskSummary(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
setup filterTestCaseSetupFunc
}{
{
name: "full blobs, all on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{0, 1, 2, 3, 4, 5}, 0),
},
{
name: "full blobs, first on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{0}, 5),
},
{
name: "full blobs, middle on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{2}, 5),
},
{
name: "full blobs, last on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{5}, 5),
},
{
name: "full blobs, none on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{}, 6),
},
{
name: "one commitment, on disk",
setup: filterTestCaseSetup(denebSlot, 1, []int{0}, 0),
},
{
name: "one commitment, not on disk",
setup: filterTestCaseSetup(denebSlot, 1, []int{}, 1),
},
{
name: "two commitments, first on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{0}, 1),
},
{
name: "two commitments, last on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{1}, 1),
},
{
name: "two commitments, none on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{}, 2),
},
{
name: "two commitments, all on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{0, 1}, 0),
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits)
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
}
}
func TestFilter(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
setup func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
err error
}{
{
name: "commitments mismatch - extra sidecar",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
commits[5] = nil
return entry, commits, expected
},
err: errCommitmentMismatch,
},
{
name: "sidecar missing",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5] = nil
return entry, commits, expected
},
err: errMissingSidecar,
},
{
name: "commitments mismatch - different bytes",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5].KzgCommitment = []byte("nope")
return entry, commits, expected
},
err: errCommitmentMismatch,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
}
}

View File

@@ -4,10 +4,9 @@ go_library(
name = "go_default_library",
srcs = [
"blob.go",
"cache.go",
"ephemeral.go",
"log.go",
"metrics.go",
"mock.go",
"pruner.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
@@ -34,7 +33,6 @@ go_test(
name = "go_default_test",
srcs = [
"blob_test.go",
"cache_test.go",
"pruner_test.go",
],
embed = [":go_default_library"],

View File

@@ -1,9 +1,7 @@
package filesystem
import (
"context"
"fmt"
"math"
"os"
"path"
"strconv"
@@ -105,29 +103,12 @@ func (bs *BlobStorage) WarmCache() {
return
}
go func() {
start := time.Now()
if err := bs.pruner.warmCache(); err != nil {
if err := bs.pruner.prune(0); err != nil {
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
}
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
}()
}
// ErrBlobStorageSummarizerUnavailable is a sentinel error returned when there is no pruner/cache available.
// This should be used by code that optionally uses the summarizer to optimize rpc requests. Being able to
// fallback when there is no summarizer allows client code to avoid test complexity where the summarizer doesn't matter.
var ErrBlobStorageSummarizerUnavailable = errors.New("BlobStorage not initialized with a pruner or cache")
// WaitForSummarizer blocks until the BlobStorageSummarizer is ready to use.
// BlobStorageSummarizer is not ready immediately on node startup because it needs to sample the blob filesystem to
// determine which blobs are available.
func (bs *BlobStorage) WaitForSummarizer(ctx context.Context) (BlobStorageSummarizer, error) {
if bs == nil || bs.pruner == nil {
return nil, ErrBlobStorageSummarizerUnavailable
}
return bs.pruner.waitForCache(ctx)
}
// Save saves blobs given a list of sidecars.
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
startTime := time.Now()
@@ -300,15 +281,6 @@ func (bs *BlobStorage) Clear() error {
return nil
}
// WithinRetentionPeriod checks if the requested epoch is within the blob retention period.
func (bs *BlobStorage) WithinRetentionPeriod(requested, current primitives.Epoch) bool {
if requested > math.MaxUint64-bs.retentionEpochs {
// If there is an overflow, then the retention period was set to an extremely large number.
return true
}
return requested+bs.retentionEpochs >= current
}
type blobNamer struct {
root [32]byte
index uint64

View File

@@ -2,7 +2,6 @@ package filesystem
import (
"bytes"
"math"
"os"
"path"
"sync"
@@ -249,50 +248,3 @@ func TestNewBlobStorage(t *testing.T) {
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
require.NoError(t, err)
}
func TestConfig_WithinRetentionPeriod(t *testing.T) {
retention := primitives.Epoch(16)
storage := &BlobStorage{retentionEpochs: retention}
cases := []struct {
name string
requested primitives.Epoch
current primitives.Epoch
within bool
}{
{
name: "before",
requested: 0,
current: retention + 1,
within: false,
},
{
name: "same",
requested: 0,
current: 0,
within: true,
},
{
name: "boundary",
requested: 0,
current: retention,
within: true,
},
{
name: "one less",
requested: retention - 1,
current: retention,
within: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
require.Equal(t, c.within, storage.WithinRetentionPeriod(c.requested, c.current))
})
}
t.Run("overflow", func(t *testing.T) {
storage := &BlobStorage{retentionEpochs: math.MaxUint64}
require.Equal(t, true, storage.WithinRetentionPeriod(1, 1))
})
}

View File

@@ -1,119 +0,0 @@
package filesystem
import (
"sync"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
type BlobStorageSummary struct {
slot primitives.Slot
mask blobIndexMask
}
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
if idx >= fieldparams.MaxBlobsPerBlock {
return false
}
return s.mask[idx]
}
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
func (s BlobStorageSummary) AllAvailable(count int) bool {
if count > fieldparams.MaxBlobsPerBlock {
return false
}
for i := 0; i < count; i++ {
if !s.mask[i] {
return false
}
}
return true
}
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
type BlobStorageSummarizer interface {
Summary(root [32]byte) BlobStorageSummary
}
type blobStorageCache struct {
mu sync.RWMutex
nBlobs float64
cache map[string]BlobStorageSummary
}
var _ BlobStorageSummarizer = &blobStorageCache{}
func newBlobStorageCache() *blobStorageCache {
return &blobStorageCache{
cache: make(map[string]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
// BlobSidecars based on Index.
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
k := rootString(root)
s.mu.RLock()
defer s.mu.RUnlock()
return s.cache[k]
}
func (s *blobStorageCache) ensure(key string, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.mu.Lock()
defer s.mu.Unlock()
v := s.cache[key]
v.slot = slot
if !v.mask[idx] {
s.updateMetrics(1)
}
v.mask[idx] = true
s.cache[key] = v
return nil
}
func (s *blobStorageCache) slot(key string) (primitives.Slot, bool) {
s.mu.RLock()
defer s.mu.RUnlock()
v, ok := s.cache[key]
if !ok {
return 0, false
}
return v.slot, ok
}
func (s *blobStorageCache) evict(key string) {
var deleted float64
s.mu.Lock()
v, ok := s.cache[key]
if ok {
for i := range v.mask {
if v.mask[i] {
deleted += 1
}
}
}
delete(s.cache, key)
s.mu.Unlock()
if deleted > 0 {
s.updateMetrics(-deleted)
}
}
func (s *blobStorageCache) updateMetrics(delta float64) {
s.nBlobs += delta
blobDiskCount.Set(s.nBlobs)
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
}

View File

@@ -1,150 +0,0 @@
package filesystem
import (
"testing"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestSlotByRoot_Summary(t *testing.T) {
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
firstSet[0] = true
lastSet[len(lastSet)-1] = true
oneSet[1] = true
for i := range allSet {
allSet[i] = true
}
cases := []struct {
name string
root [32]byte
expected *blobIndexMask
}{
{
name: "not found",
},
{
name: "none set",
expected: &noneSet,
},
{
name: "index 1 set",
expected: &oneSet,
},
{
name: "all set",
expected: &allSet,
},
{
name: "first set",
expected: &firstSet,
},
{
name: "last set",
expected: &lastSet,
},
}
sc := newBlobStorageCache()
for _, c := range cases {
if c.expected != nil {
key := rootString(bytesutil.ToBytes32([]byte(c.name)))
sc.cache[key] = BlobStorageSummary{slot: 0, mask: *c.expected}
}
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
key := bytesutil.ToBytes32([]byte(c.name))
sum := sc.Summary(key)
for i := range c.expected {
ui := uint64(i)
if c.expected == nil {
require.Equal(t, false, sum.HasIndex(ui))
} else {
require.Equal(t, c.expected[i], sum.HasIndex(ui))
}
}
})
}
}
func TestAllAvailable(t *testing.T) {
idxUpTo := func(u int) []int {
r := make([]int, u)
for i := range r {
r[i] = i
}
return r
}
require.DeepEqual(t, []int{}, idxUpTo(0))
require.DeepEqual(t, []int{0}, idxUpTo(1))
require.DeepEqual(t, []int{0, 1, 2, 3, 4, 5}, idxUpTo(6))
cases := []struct {
name string
idxSet []int
count int
aa bool
}{
{
// If there are no blobs committed, then all the committed blobs are available.
name: "none in idx, 0 arg",
count: 0,
aa: true,
},
{
name: "none in idx, 1 arg",
count: 1,
aa: false,
},
{
name: "first in idx, 1 arg",
idxSet: []int{0},
count: 1,
aa: true,
},
{
name: "second in idx, 1 arg",
idxSet: []int{1},
count: 1,
aa: false,
},
{
name: "first missing, 2 arg",
idxSet: []int{1},
count: 2,
aa: false,
},
{
name: "all missing, 1 arg",
count: 6,
aa: false,
},
{
name: "out of bound is safe",
count: fieldparams.MaxBlobsPerBlock + 1,
aa: false,
},
{
name: "max present",
count: fieldparams.MaxBlobsPerBlock,
idxSet: idxUpTo(fieldparams.MaxBlobsPerBlock),
aa: true,
},
{
name: "one present",
count: 1,
idxSet: idxUpTo(1),
aa: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var mask blobIndexMask
for _, idx := range c.idxSet {
mask[idx] = true
}
sum := BlobStorageSummary{mask: mask}
require.Equal(t, c.aa, sum.AllAvailable(c.count))
})
}
}

View File

@@ -12,7 +12,7 @@ import (
// improving test performance and simplifying cleanup.
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
if err != nil {
t.Fatal("test setup issue", err)
}
@@ -23,7 +23,7 @@ func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
// in order to interact with it outside the parameters of the BlobStorage api.
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
if err != nil {
t.Fatal("test setup issue", err)
}
@@ -61,15 +61,3 @@ func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage)
bs := &BlobStorage{fs: fs}
return &BlobMocker{fs: fs, bs: bs}, bs
}
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
c := newBlobStorageCache()
for k, v := range set {
for i := range v {
if err := c.ensure(rootString(k), 0, uint64(v[i])); err != nil {
t.Fatal(err)
}
}
}
return c
}

View File

@@ -1,7 +1,6 @@
package filesystem
import (
"context"
"encoding/binary"
"io"
"path"
@@ -13,6 +12,7 @@ import (
"time"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -32,39 +32,22 @@ type blobPruner struct {
sync.Mutex
prunedBefore atomic.Uint64
windowSize primitives.Slot
cache *blobStorageCache
cacheReady chan struct{}
warmed bool
slotMap *slotForRoot
fs afero.Fs
}
type prunerOpt func(*blobPruner) error
func withWarmedCache() prunerOpt {
return func(p *blobPruner) error {
return p.warmCache()
}
}
func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*blobPruner, error) {
func newBlobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) {
r, err := slots.EpochStart(retain + retentionBuffer)
if err != nil {
return nil, errors.Wrap(err, "could not set retentionSlots")
}
cw := make(chan struct{})
p := &blobPruner{fs: fs, windowSize: r, cache: newBlobStorageCache(), cacheReady: cw}
for _, o := range opts {
if err := o(p); err != nil {
return nil, err
}
}
return p, nil
return &blobPruner{fs: fs, windowSize: r, slotMap: newSlotForRoot()}, nil
}
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
if err := p.cache.ensure(rootString(root), latest, idx); err != nil {
if err := p.slotMap.ensure(rootString(root), latest, idx); err != nil {
return err
}
pruned := uint64(windowMin(latest, p.windowSize))
@@ -72,8 +55,6 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
return nil
}
go func() {
p.Lock()
defer p.Unlock()
if err := p.prune(primitives.Slot(pruned)); err != nil {
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
}
@@ -81,7 +62,7 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
return nil
}
func windowMin(latest, offset primitives.Slot) primitives.Slot {
func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
// Safely compute the first slot in the epoch for the latest slot
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
if latest < offset {
@@ -90,32 +71,12 @@ func windowMin(latest, offset primitives.Slot) primitives.Slot {
return latest - offset
}
func (p *blobPruner) warmCache() error {
p.Lock()
defer p.Unlock()
if err := p.prune(0); err != nil {
return err
}
if !p.warmed {
p.warmed = true
close(p.cacheReady)
}
return nil
}
func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error) {
select {
case <-p.cacheReady:
return p.cache, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// Prune prunes blobs in the base directory based on the retention epoch.
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
p.Lock()
defer p.Unlock()
start := time.Now()
totalPruned, totalErr := 0, 0
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
@@ -161,7 +122,7 @@ func shouldRetain(slot, pruneBefore primitives.Slot) bool {
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
root := rootFromDir(dir)
slot, slotCached := p.cache.slot(root)
slot, slotCached := p.slotMap.slot(root)
// Return early if the slot is cached and doesn't need pruning.
if slotCached && shouldRetain(slot, pruneBefore) {
return 0, nil
@@ -190,7 +151,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
if err != nil {
return 0, errors.Wrapf(err, "index could not be determined for blob file %s", scFiles[i])
}
if err := p.cache.ensure(root, slot, idx); err != nil {
if err := p.slotMap.ensure(root, slot, idx); err != nil {
return 0, errors.Wrapf(err, "could not update prune cache for blob file %s", scFiles[i])
}
}
@@ -218,7 +179,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
}
p.cache.evict(rootFromDir(dir))
p.slotMap.evict(rootFromDir(dir))
return len(scFiles), nil
}
@@ -308,3 +269,71 @@ func filterSsz(s string) bool {
func filterPart(s string) bool {
return filepath.Ext(s) == dotPartExt
}
func newSlotForRoot() *slotForRoot {
return &slotForRoot{
cache: make(map[string]*slotCacheEntry, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
type slotCacheEntry struct {
slot primitives.Slot
mask [fieldparams.MaxBlobsPerBlock]bool
}
type slotForRoot struct {
sync.RWMutex
nBlobs float64
cache map[string]*slotCacheEntry
}
func (s *slotForRoot) updateMetrics(delta float64) {
s.nBlobs += delta
blobDiskCount.Set(s.nBlobs)
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
}
func (s *slotForRoot) ensure(key string, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.Lock()
defer s.Unlock()
v, ok := s.cache[key]
if !ok {
v = &slotCacheEntry{}
}
v.slot = slot
if !v.mask[idx] {
s.updateMetrics(1)
}
v.mask[idx] = true
s.cache[key] = v
return nil
}
func (s *slotForRoot) slot(key string) (primitives.Slot, bool) {
s.RLock()
defer s.RUnlock()
v, ok := s.cache[key]
if !ok {
return 0, false
}
return v.slot, ok
}
func (s *slotForRoot) evict(key string) {
s.Lock()
defer s.Unlock()
v, ok := s.cache[key]
var deleted float64
if ok {
for i := range v.mask {
if v.mask[i] {
deleted += 1
}
}
s.updateMetrics(-deleted)
}
delete(s.cache, key)
}

View File

@@ -28,7 +28,7 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
root := fmt.Sprintf("%#x", sc.BlockRoot())
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
pruned, err := pr.tryPruneDir(root, pr.windowSize)
require.NoError(t, err)
require.Equal(t, 0, pruned)
@@ -45,7 +45,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
require.NoError(t, err)
root := fmt.Sprintf("%#x", sc.BlockRoot())
require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
pruned, err := pr.tryPruneDir(root, slot+1)
require.NoError(t, err)
require.Equal(t, 0, pruned)
@@ -63,7 +63,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, cok := bs.pruner.cache.slot(root)
cs, cok := bs.pruner.slotMap.slot(root)
require.Equal(t, true, cok)
require.Equal(t, slot, cs)
@@ -95,12 +95,12 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, ok := bs.pruner.cache.slot(root)
cs, ok := bs.pruner.slotMap.slot(root)
require.Equal(t, true, ok)
require.Equal(t, slot, cs)
// evict it from the cache so that we trigger the file read path
bs.pruner.cache.evict(root)
_, ok = bs.pruner.cache.slot(root)
bs.pruner.slotMap.evict(root)
_, ok = bs.pruner.slotMap.slot(root)
require.Equal(t, false, ok)
// ensure that we see the saved files in the filesystem
@@ -119,7 +119,7 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
// Set slot equal to the window size, so it should be retained.
slot := bs.pruner.windowSize
var slot primitives.Slot = bs.pruner.windowSize
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
@@ -129,8 +129,8 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
// Evict slot mapping from the cache so that we trigger the file read path.
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
bs.pruner.cache.evict(root)
_, ok := bs.pruner.cache.slot(root)
bs.pruner.slotMap.evict(root)
_, ok := bs.pruner.slotMap.slot(root)
require.Equal(t, false, ok)
// Ensure that we see the saved files in the filesystem.
@@ -243,8 +243,10 @@ func TestListDir(t *testing.T) {
}
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
fsLayout.children = append(fsLayout.children,
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
fsLayout.children = append(fsLayout.children, notABlob)
fsLayout.children = append(fsLayout.children, childlessBlob)
fsLayout.children = append(fsLayout.children, blobWithSsz)
fsLayout.children = append(fsLayout.children, blobWithSszAndTmp)
topChildren := make([]string, len(fsLayout.children))
for i := range fsLayout.children {
@@ -280,7 +282,10 @@ func TestListDir(t *testing.T) {
dirPath: ".",
expected: []string{notABlob.name},
filter: func(s string) bool {
return s == notABlob.name
if s == notABlob.name {
return true
}
return false
},
},
{

View File

@@ -118,7 +118,7 @@ type HeadAccessDatabase interface {
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
type SlasherDatabase interface {
io.Closer
SaveLastEpochWrittenForValidators(
SaveLastEpochsWrittenForValidators(
ctx context.Context, epochByValidator map[primitives.ValidatorIndex]primitives.Epoch,
) error
SaveAttestationRecordsForValidators(

View File

@@ -20,7 +20,6 @@ go_library(
"migration.go",
"migration_archived_index.go",
"migration_block_slot_index.go",
"migration_finalized_parent.go",
"migration_state_validators.go",
"schema.go",
"state.go",

View File

@@ -201,19 +201,20 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
return err
}
encs[i-1] = penc
}
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
lastIdx := len(blocks) - 1
fbrs[lastIdx].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[lastIdx])
if err != nil {
tracing.AnnotateError(span, err)
return err
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
if i == len(blocks)-1 {
fbrs[i].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[i])
if err != nil {
tracing.AnnotateError(span, err)
return err
}
encs[i] = enc
}
}
encs[lastIdx] = enc
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)

View File

@@ -237,50 +237,6 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte)
return ifaceBlocks
}
func TestStore_BackfillFinalizedIndexSingle(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
// we're making 4 blocks so we can test an element without a valid child at the end
blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 4, [32]byte{}))
require.NoError(t, err)
// existing is the child that we'll set up in the index by hand to seed the index.
existing := blks[3]
// toUpdate is a single item update, emulating a backfill batch size of 1. it is the parent of `existing`.
toUpdate := blks[2]
// set up existing finalized block
ebpr := existing.Block().ParentRoot()
ebr := existing.Root()
ebf := &ethpb.FinalizedBlockRootContainer{
ParentRoot: ebpr[:],
ChildRoot: make([]byte, 32), // we're bypassing validation to seed the db, so we don't need a valid child.
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
// writing this to the index outside of the validating function to seed the test.
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
require.NoError(t, err)
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{toUpdate}, ebr))
// make sure that we still correctly validate descendents in the single item case.
noChild := blks[0] // will fail to update because we don't have blks[1] in the db.
// test wrong child param
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, ebr), errNotConnectedToFinalized)
// test parent of child that isn't finalized
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, blks[1].Root()), errFinalizedChildNotFound)
// now make it work by writing the missing block
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[1]}, blks[2].Root()))
// since blks[1] is now in the index, we should be able to update blks[0]
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[0]}, blks[1].Root()))
}
func TestStore_BackfillFinalizedIndex(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
@@ -296,23 +252,23 @@ func TestStore_BackfillFinalizedIndex(t *testing.T) {
ParentRoot: ebpr[:],
ChildRoot: chldr[:],
}
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
require.NoError(t, err)
// reslice to remove the existing blocks
blks = blks[0:64]
// check the other error conditions with a descendent root that really doesn't exist
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
require.NoError(t, err)
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, disjoint, [32]byte{}), errIncorrectBlockParent)
require.NoError(t, err)
require.ErrorIs(t, errFinalizedChildNotFound, db.BackfillFinalizedIndex(ctx, blks, [32]byte{}))
// use the real root so that it succeeds

View File

@@ -14,7 +14,6 @@ var migrations = []migration{
migrateArchivedIndex,
migrateBlockSlotIndex,
migrateStateValidators,
migrateFinalizedParent,
}
// RunMigrations defined in the migrations array.

View File

@@ -1,87 +0,0 @@
package kv
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
)
var migrationFinalizedParent = []byte("parent_bug_32fb183")
func migrateFinalizedParent(ctx context.Context, db *bolt.DB) error {
if updateErr := db.Update(func(tx *bolt.Tx) error {
mb := tx.Bucket(migrationsBucket)
if b := mb.Get(migrationFinalizedParent); bytes.Equal(b, migrationCompleted) {
return nil // Migration already completed.
}
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if bkt == nil {
return fmt.Errorf("unable to read %s bucket for migration", finalizedBlockRootsIndexBucket)
}
bb := tx.Bucket(blocksBucket)
if bb == nil {
return fmt.Errorf("unable to read %s bucket for migration", blocksBucket)
}
c := bkt.Cursor()
var slotsWithoutBug primitives.Slot
maxBugSearch := params.BeaconConfig().SlotsPerEpoch * 10
for k, v := c.Last(); k != nil; k, v = c.Prev() {
// check if context is cancelled in between
if ctx.Err() != nil {
return ctx.Err()
}
idxEntry := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, v, idxEntry); err != nil {
return errors.Wrapf(err, "unable to decode finalized block root container for root=%#x", k)
}
// Not one of the corrupt values
if !bytes.Equal(idxEntry.ParentRoot, k) {
slotsWithoutBug += 1
if slotsWithoutBug > maxBugSearch {
break
}
continue
}
slotsWithoutBug = 0
log.WithField("root", fmt.Sprintf("%#x", k)).Debug("found index entry with incorrect parent root")
// Look up full block to get the correct parent root.
encBlk := bb.Get(k)
if encBlk == nil {
return errors.Wrapf(ErrNotFound, "could not find block for corrupt finalized index entry %#x", k)
}
blk, err := unmarshalBlock(ctx, encBlk)
if err != nil {
return errors.Wrapf(err, "unable to decode block for root=%#x", k)
}
// Replace parent root in the index with the correct value and write it back.
pr := blk.Block().ParentRoot()
idxEntry.ParentRoot = pr[:]
idxEnc, err := encode(ctx, idxEntry)
if err != nil {
return errors.Wrapf(err, "failed to encode finalized index entry for root=%#x", k)
}
if err := bkt.Put(k, idxEnc); err != nil {
return errors.Wrapf(err, "failed to update finalized index entry for root=%#x", k)
}
log.WithField("root", fmt.Sprintf("%#x", k)).
WithField("parentRoot", fmt.Sprintf("%#x", idxEntry.ParentRoot)).
Debug("updated corrupt index entry with correct parent")
}
// Mark migration complete.
return mb.Put(migrationFinalizedParent, migrationCompleted)
}); updateErr != nil {
log.WithError(updateErr).Errorf("could not run finalized parent root index repair migration")
return updateErr
}
return nil
}

View File

@@ -70,12 +70,12 @@ func (s *Store) LastEpochWrittenForValidators(
return attestedEpochs, err
}
// SaveLastEpochWrittenForValidators saves the latest epoch
// that each validator has attested to in the provided map.
func (s *Store) SaveLastEpochWrittenForValidators(
// SaveLastEpochsWrittenForValidators updates the latest epoch a slice
// of validator indices has attested to.
func (s *Store) SaveLastEpochsWrittenForValidators(
ctx context.Context, epochByValIndex map[primitives.ValidatorIndex]primitives.Epoch,
) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochWrittenForValidators")
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
defer span.End()
const batchSize = 10000
@@ -157,7 +157,7 @@ func (s *Store) CheckAttesterDoubleVotes(
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
encEpoch := encodeTargetEpoch(attToProcess.IndexedAttestation.Data.Target.Epoch)
localDoubleVotes := make([]*slashertypes.AttesterDoubleVote, 0)
localDoubleVotes := []*slashertypes.AttesterDoubleVote{}
for _, valIdx := range attToProcess.IndexedAttestation.AttestingIndices {
// Check if there is signing root in the database for this combination
@@ -166,7 +166,7 @@ func (s *Store) CheckAttesterDoubleVotes(
validatorEpochKey := append(encEpoch, encIdx...)
attRecordsKey := signingRootsBkt.Get(validatorEpochKey)
// An attestation record key consists of a signing root (32 bytes).
// An attestation record key is comprised of a signing root (32 bytes).
if len(attRecordsKey) < attestationRecordKeySize {
// If there is no signing root for this combination,
// then there is no double vote. We can continue to the next validator.
@@ -697,7 +697,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
}
// Encode attestation record to bytes.
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
// The output encoded attestation record consists in the signing root concatened with the compressed attestation record.
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
if att == nil || att.IndexedAttestation == nil {
return []byte{}, errors.New("nil proposal record")
@@ -716,7 +716,7 @@ func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byt
}
// Decode attestation record from bytes.
// The input encoded attestation record consists in the signing root concatenated with the compressed attestation record.
// The input encoded attestation record consists in the signing root concatened with the compressed attestation record.
func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWrapper, error) {
if len(encoded) < rootSize {
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", rootSize, len(encoded))

View File

@@ -89,7 +89,7 @@ func TestStore_LastEpochWrittenForValidators(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, len(attestedEpochs))
err = beaconDB.SaveLastEpochWrittenForValidators(ctx, epochsByValidator)
err = beaconDB.SaveLastEpochsWrittenForValidators(ctx, epochsByValidator)
require.NoError(t, err)
retrievedEpochs, err := beaconDB.LastEpochWrittenForValidators(ctx, indices)

View File

@@ -10,7 +10,7 @@ import (
)
// TestCleanup ensures that the cleanup function unregisters the prometheus.Collection
// also tests the interchangeability of the explicit prometheus Register/Unregister
// also tests the interchangability of the explicit prometheus Register/Unregister
// and the implicit methods within the collector implementation
func TestCleanup(t *testing.T) {
ctx := context.Background()
@@ -32,11 +32,11 @@ func TestCleanup(t *testing.T) {
assert.Equal(t, true, unregistered, "prometheus.Unregister failed to unregister PowchainCollector on final cleanup")
}
// TestCancellation tests that canceling the context passed into
// TestCancelation tests that canceling the context passed into
// NewPowchainCollector cleans everything up as expected. This
// does come at the cost of an extra channel cluttering up
// PowchainCollector, just for this test.
func TestCancellation(t *testing.T) {
func TestCancelation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")

View File

@@ -7,11 +7,9 @@ import (
"bytes"
"context"
"fmt"
"net"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
@@ -129,16 +127,51 @@ type BeaconNode struct {
// New creates a new node instance, sets up configuration options, and registers
// every required service to the node.
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
if err := configureBeacon(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not set beacon configuration options")
if err := configureTracing(cliCtx); err != nil {
return nil, err
}
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
return nil, fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
}
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
return nil, err
}
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
return nil, err
}
flags.ConfigureGlobalFlags(cliCtx)
if err := configureChainConfig(cliCtx); err != nil {
return nil, err
}
if err := configureHistoricalSlasher(cliCtx); err != nil {
return nil, err
}
err := configureBuilderCircuitBreaker(cliCtx)
if err != nil {
return nil, err
}
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
return nil, err
}
if err := configureEth1Config(cliCtx); err != nil {
return nil, err
}
configureNetwork(cliCtx)
if err := configureInteropConfig(cliCtx); err != nil {
return nil, err
}
if err := configureExecutionSetting(cliCtx); err != nil {
return nil, err
}
configureFastSSZHashingAlgorithm()
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()
registry := runtime.NewServiceRegistry()
ctx := cliCtx.Context
ctx := cliCtx.Context
beacon := &BeaconNode{
cliCtx: cliCtx,
ctx: ctx,
@@ -158,10 +191,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
slasherBlockHeadersFeed: new(event.Feed),
slasherAttestationsFeed: new(event.Feed),
serviceFlagOpts: &serviceFlagOpts{},
initialSyncComplete: make(chan struct{}),
syncChecker: &initialsync.SyncChecker{},
}
beacon.initialSyncComplete = make(chan struct{})
beacon.syncChecker = &initialsync.SyncChecker{}
for _, opt := range opts {
if err := opt(beacon); err != nil {
return nil, err
@@ -170,8 +203,8 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
synchronizer := startup.NewClockSynchronizer()
beacon.clockWaiter = synchronizer
beacon.forkChoicer = doublylinkedtree.New()
beacon.forkChoicer = doublylinkedtree.New()
depositAddress, err := execution.DepositContractAddress()
if err != nil {
return nil, err
@@ -187,29 +220,112 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.BlobStorage = blobs
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, err
}
beacon.BlobStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
return nil, errors.Wrap(err, "backfill status initialization error")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
if errors.Is(err, stategen.ErrNoGenesisBlock) {
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
"state bundled in the application. You must provide the --%s or --%s flag to load "+
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
}
return nil, err
}
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
beacon.BackfillOpts = append(
beacon.BackfillOpts,
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
)
beacon.BackfillOpts = append(beacon.BackfillOpts, backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)))
bf, err := backfill.NewService(ctx, bfs, beacon.BlobStorage, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
if err != nil {
return nil, errors.Wrap(err, "error initializing backfill service")
}
if err := beacon.services.RegisterService(bf); err != nil {
return nil, errors.Wrap(err, "error registering backfill service")
}
if err := registerServices(cliCtx, beacon, synchronizer, bf, bfs); err != nil {
return nil, errors.Wrap(err, "could not register services")
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return nil, err
}
log.Debugln("Registering Attestation Pool Service")
if err := beacon.registerAttestationPool(); err != nil {
return nil, err
}
log.Debugln("Registering Deterministic Genesis Service")
if err := beacon.registerDeterministicGenesisService(); err != nil {
return nil, err
}
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
return nil, err
}
log.Debugln("Registering Initial Sync Service")
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
return nil, err
}
log.Debugln("Registering Sync Service")
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
return nil, err
}
log.Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return nil, err
}
log.Debugln("Registering builder service")
if err := beacon.registerBuilderService(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering RPC Service")
router := newRouter(cliCtx)
if err := beacon.registerRPCService(router); err != nil {
return nil, err
}
log.Debugln("Registering GRPC Gateway Service")
if err := beacon.registerGRPCGateway(router); err != nil {
return nil, err
}
log.Debugln("Registering Validator Monitoring Service")
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
return nil, err
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
log.Debugln("Registering Prometheus Service")
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return nil, err
}
}
// db.DatabasePath is the path to the containing directory
@@ -227,170 +343,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
return beacon, nil
}
func configureBeacon(cliCtx *cli.Context) error {
if err := configureTracing(cliCtx); err != nil {
return errors.Wrap(err, "could not configure tracing")
}
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
return fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
}
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
return errors.Wrap(err, "could not configure beacon chain")
}
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
return errors.Wrap(err, "could not configure beacon chain")
}
flags.ConfigureGlobalFlags(cliCtx)
if err := configureChainConfig(cliCtx); err != nil {
return errors.Wrap(err, "could not configure chain config")
}
if err := configureHistoricalSlasher(cliCtx); err != nil {
return errors.Wrap(err, "could not configure historical slasher")
}
if err := configureBuilderCircuitBreaker(cliCtx); err != nil {
return errors.Wrap(err, "could not configure builder circuit breaker")
}
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
return errors.Wrap(err, "could not configure slots per archived point")
}
if err := configureEth1Config(cliCtx); err != nil {
return errors.Wrap(err, "could not configure eth1 config")
}
configureNetwork(cliCtx)
if err := configureInteropConfig(cliCtx); err != nil {
return errors.Wrap(err, "could not configure interop config")
}
if err := configureExecutionSetting(cliCtx); err != nil {
return errors.Wrap(err, "could not configure execution setting")
}
configureFastSSZHashingAlgorithm()
return nil
}
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
ctx := cliCtx.Context
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, errors.Wrap(err, "could not start DB")
}
beacon.BlobStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not start slashing DB")
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not register P2P service")
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "could not create backfill updater")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
if errors.Is(err, stategen.ErrNoGenesisBlock) {
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
"state bundled in the application. You must provide the --%s or --%s flag to load "+
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
}
return nil, errors.Wrap(err, "could not start state generation")
}
return bfs, nil
}
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bf *backfill.Service, bfs *backfill.Store) error {
if err := beacon.services.RegisterService(bf); err != nil {
return errors.Wrap(err, "could not register backfill service")
}
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return errors.Wrap(err, "could not register POW chain service")
}
log.Debugln("Registering Attestation Pool Service")
if err := beacon.registerAttestationPool(); err != nil {
return errors.Wrap(err, "could not register attestation pool service")
}
log.Debugln("Registering Deterministic Genesis Service")
if err := beacon.registerDeterministicGenesisService(); err != nil {
return errors.Wrap(err, "could not register deterministic genesis service")
}
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register blockchain service")
}
log.Debugln("Registering Initial Sync Service")
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register initial sync service")
}
log.Debugln("Registering Sync Service")
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
return errors.Wrap(err, "could not register sync service")
}
log.Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return errors.Wrap(err, "could not register slasher service")
}
log.Debugln("Registering builder service")
if err := beacon.registerBuilderService(cliCtx); err != nil {
return errors.Wrap(err, "could not register builder service")
}
log.Debugln("Registering RPC Service")
router := newRouter(cliCtx)
if err := beacon.registerRPCService(router); err != nil {
return errors.Wrap(err, "could not register RPC service")
}
log.Debugln("Registering GRPC Gateway Service")
if err := beacon.registerGRPCGateway(router); err != nil {
return errors.Wrap(err, "could not register GRPC gateway service")
}
log.Debugln("Registering Validator Monitoring Service")
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register validator monitoring service")
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
log.Debugln("Registering Prometheus Service")
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return errors.Wrap(err, "could not register prometheus service")
}
}
return nil
}
func initSyncWaiter(ctx context.Context, complete chan struct{}) func() error {
return func() error {
select {
@@ -479,86 +431,40 @@ func (b *BeaconNode) Close() {
close(b.stop)
}
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
var err error
clearDBConfirmed := false
if clearDB && !forceClearDB {
const (
actionText = "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText = "Database will not be deleted. No changes have been made."
)
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return nil, errors.Wrapf(err, "could not confirm action")
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return nil, errors.Wrap(err, "could not clear blob storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return nil, errors.Wrap(err, "could not create new database")
}
}
return d, nil
}
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
return errors.Wrap(err, "could not get deposit contract address")
}
addr := common.HexToAddress(depositAddress)
if len(knownContract) == 0 {
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
return errors.Wrap(err, "could not save deposit contract")
}
}
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
"you are trying to run on a different network than what the database contains. You can run once with "+
"--%s to wipe the old database or use an alternative data directory with --%s",
knownContract, addr.Bytes(), cmd.ClearDB.Name, cmd.DataDirFlag.Name)
}
return nil
}
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
var depositCache cache.DepositCache
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrapf(err, "could not create database at %s", dbPath)
return err
}
if clearDBRequired || forceClearDBRequired {
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
clearDBConfirmed := false
if clearDB && !forceClearDB {
actionText := "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText := "Database will not be deleted. No changes have been made."
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return err
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return errors.Wrap(err, "could not clear blob storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrap(err, "could not create new database")
}
}
if err := d.RunMigrations(b.ctx); err != nil {
@@ -567,6 +473,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
b.db = d
var depositCache cache.DepositCache
if features.Get().EnableEIP4881 {
depositCache, err = depositsnapshot.New()
} else {
@@ -581,17 +488,16 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
if b.GenesisInitializer != nil {
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
if err == db.ErrExistingGenesisState {
return errors.Errorf("Genesis state flag specified but a genesis state "+
"exists already. Run again with --%s and/or ensure you are using the "+
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
return errors.New("Genesis state flag specified but a genesis state " +
"exists already. Run again with --clear-db and/or ensure you are using the " +
"appropriate testnet flag to load the given genesis state.")
}
return errors.Wrap(err, "could not load genesis from file")
}
}
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
return errors.Wrap(err, "could not ensure embedded genesis")
return err
}
if b.CheckpointInitializer != nil {
@@ -600,11 +506,23 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
}
}
if err := b.checkAndSaveDepositContract(depositAddress); err != nil {
return errors.Wrap(err, "could not check and save deposit contract")
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
return err
}
log.WithField("address", depositAddress).Info("Deposit contract")
addr := common.HexToAddress(depositAddress)
if len(knownContract) == 0 {
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
return errors.Wrap(err, "could not save deposit contract")
}
}
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
"you are trying to run on a different network than what the database contains. You can run once with "+
"'--clear-db' to wipe the old database or use an alternative data directory with '--datadir'",
knownContract, addr.Bytes())
}
log.Infof("Deposit contract: %#x", addr.Bytes())
return nil
}
@@ -692,32 +610,31 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBl
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
bootstrapNodeAddrs, dataDir, err := registration.P2PPreregistration(cliCtx)
if err != nil {
return errors.Wrapf(err, "could not register p2p service")
return err
}
svc, err := p2p.NewService(b.ctx, &p2p.Config{
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
DataDir: dataDir,
LocalIP: cliCtx.String(cmd.P2PIP.Name),
HostAddress: cliCtx.String(cmd.P2PHost.Name),
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
StateNotifier: b,
DB: b.db,
ClockWaiter: b.clockWaiter,
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
BootstrapNodeAddr: bootstrapNodeAddrs,
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
DataDir: dataDir,
LocalIP: cliCtx.String(cmd.P2PIP.Name),
HostAddress: cliCtx.String(cmd.P2PHost.Name),
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
StateNotifier: b,
DB: b.db,
ClockWaiter: b.clockWaiter,
})
if err != nil {
return err
@@ -1059,13 +976,11 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
if b.cliCtx.Bool(flags.DisableGRPCGateway.Name) {
return nil
}
gatewayHost := b.cliCtx.String(flags.GRPCGatewayHost.Name)
gatewayPort := b.cliCtx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := b.cliCtx.String(flags.GRPCGatewayHost.Name)
rpcHost := b.cliCtx.String(flags.RPCHost.Name)
rpcPort := b.cliCtx.Int(flags.RPCPort.Name)
selfAddress := net.JoinHostPort(rpcHost, strconv.Itoa(rpcPort))
gatewayAddress := net.JoinHostPort(gatewayHost, strconv.Itoa(gatewayPort))
selfAddress := fmt.Sprintf("%s:%d", rpcHost, b.cliCtx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
allowedOrigins := strings.Split(b.cliCtx.String(flags.GPRCGatewayCorsDomain.Name), ",")
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
selfCert := b.cliCtx.String(flags.CertFlag.Name)
@@ -1159,9 +1074,9 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
return err
}
opts := b.serviceFlagOpts.builderOpts
opts = append(opts, builder.WithHeadFetcher(chainService), builder.WithDatabase(b.db))
opts := append(b.serviceFlagOpts.builderOpts,
builder.WithHeadFetcher(chainService),
builder.WithDatabase(b.db))
// make cache the default.
if !cliCtx.Bool(features.DisableRegistrationCache.Name) {
opts = append(opts, builder.WithRegistrationCache())

View File

@@ -217,9 +217,9 @@ func Test_hasNetworkFlag(t *testing.T) {
want bool
}{
{
name: "Holesky testnet",
networkName: features.HoleskyTestnet.Name,
networkValue: "holesky",
name: "Prater testnet",
networkName: features.PraterTestnet.Name,
networkValue: "prater",
want: true,
},
{

View File

@@ -11,7 +11,6 @@ go_library(
deps = [
"//cmd:go_default_library",
"//config/params:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@in_gopkg_yaml_v2//:go_default_library",

View File

@@ -4,7 +4,6 @@ import (
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/urfave/cli/v2"
@@ -32,9 +31,9 @@ func P2PPreregistration(cliCtx *cli.Context) (bootstrapNodeAddrs []string, dataD
if dataDir == "" {
dataDir = cmd.DefaultDataDir()
if dataDir == "" {
err = errors.Errorf(
"Could not determine your system's HOME path, please specify a --%s you wish to use for your chain data",
cmd.DataDirFlag.Name,
log.Fatal(
"Could not determine your system's HOME path, please specify a --datadir you wish " +
"to use for your chain data",
)
}
}

View File

@@ -49,7 +49,6 @@ go_test(
"//beacon-chain/operations/attestations/kv:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",

View File

@@ -6,7 +6,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// pruneAttsPool prunes attestations pool on every slot interval.
@@ -67,18 +66,7 @@ func (s *Service) pruneExpiredAtts() {
// Return true if the input slot has been expired.
// Expired is defined as one epoch behind than current time.
func (s *Service) expired(providedSlot primitives.Slot) bool {
providedEpoch := slots.ToEpoch(providedSlot)
currSlot := slots.CurrentSlot(s.genesisTime)
currEpoch := slots.ToEpoch(currSlot)
if currEpoch < params.BeaconConfig().DenebForkEpoch {
return s.expiredPreDeneb(providedSlot)
}
return providedEpoch+1 < currEpoch
}
// Handles expiration of attestations before deneb.
func (s *Service) expiredPreDeneb(slot primitives.Slot) bool {
func (s *Service) expired(slot primitives.Slot) bool {
expirationSlot := slot + params.BeaconConfig().SlotsPerEpoch
expirationTime := s.genesisTime + uint64(expirationSlot.Mul(params.BeaconConfig().SecondsPerSlot))
currentTime := uint64(prysmTime.Now().Unix())

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/async"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -128,22 +127,3 @@ func TestPruneExpired_Expired(t *testing.T) {
assert.Equal(t, true, s.expired(0), "Should be expired")
assert.Equal(t, false, s.expired(1), "Should not be expired")
}
func TestPruneExpired_ExpiredDeneb(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.DenebForkEpoch = 3
params.OverrideBeaconConfig(cfg)
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
require.NoError(t, err)
// Rewind back 4 epochs + 10 slots worth of time.
s.genesisTime = uint64(prysmTime.Now().Unix()) - (4*uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) + 10)
secondEpochStart := primitives.Slot(2 * uint64(params.BeaconConfig().SlotsPerEpoch))
thirdEpochStart := primitives.Slot(3 * uint64(params.BeaconConfig().SlotsPerEpoch))
assert.Equal(t, true, s.expired(secondEpochStart), "Should be expired")
assert.Equal(t, false, s.expired(thirdEpochStart), "Should not be expired")
}

View File

@@ -90,12 +90,10 @@ go_library(
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_libp2p_go_mplex//:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -10,13 +10,11 @@ import (
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/protobuf/proto"
)
@@ -70,7 +68,7 @@ func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *
}
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
go s.internalBroadcastAttestation(ctx, subnet, att, forkDigest)
go s.broadcastAttestation(ctx, subnet, att, forkDigest)
return nil
}
@@ -96,8 +94,8 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
return nil
}
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastAttestation")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -138,11 +136,8 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
// In the event our attestation is outdated and beyond the
// acceptable threshold, we exit early and do not broadcast it.
currSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.genesisTime, params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
log.WithFields(logrus.Fields{
"attestationSlot": att.Data.Slot,
"currentSlot": currSlot,
}).WithError(err).Warning("Attestation is too old to broadcast, discarding it")
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
log.Warnf("Attestation is too old to broadcast, discarding it. Current Slot: %d , Attestation Slot: %d", currSlot, att.Data.Slot)
return
}
@@ -223,13 +218,13 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
}
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
go s.internalBroadcastBlob(ctx, subnet, blob, forkDigest)
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
return nil
}
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.

View File

@@ -240,8 +240,9 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
var hosts []host.Host
// setup other nodes.
cfg = &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
MaxPeers: 30,
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
MaxPeers: 30,
}
// Setup 2 different hosts
for i := 1; i <= 2; i++ {

View File

@@ -12,28 +12,28 @@ const defaultPubsubQueueSize = 600
// Config for the p2p service. These parameters are set from application level flags
// to initialize the p2p service.
type Config struct {
NoDiscovery bool
EnableUPnP bool
StaticPeerID bool
StaticPeers []string
Discv5BootStrapAddrs []string
RelayNodeAddr string
LocalIP string
HostAddress string
HostDNS string
PrivateKey string
DataDir string
MetaDataDir string
QUICPort uint
TCPPort uint
UDPPort uint
MaxPeers uint
QueueSize uint
AllowListCIDR string
DenyListCIDR []string
StateNotifier statefeed.Notifier
DB db.ReadOnlyDatabase
ClockWaiter startup.ClockWaiter
NoDiscovery bool
EnableUPnP bool
StaticPeerID bool
StaticPeers []string
BootstrapNodeAddr []string
Discv5BootStrapAddr []string
RelayNodeAddr string
LocalIP string
HostAddress string
HostDNS string
PrivateKey string
DataDir string
MetaDataDir string
TCPPort uint
UDPPort uint
MaxPeers uint
QueueSize uint
AllowListCIDR string
DenyListCIDR []string
StateNotifier statefeed.Notifier
DB db.ReadOnlyDatabase
ClockWaiter startup.ClockWaiter
}
// validateConfig validates whether the values provided are accurate and will set

View File

@@ -25,7 +25,7 @@ const (
)
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
func (_ *Service) InterceptPeerDial(_ peer.ID) (allow bool) {
return true
}
@@ -63,12 +63,12 @@ func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
// InterceptSecured tests whether a given connection, now authenticated,
// is allowed.
func (*Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
func (_ *Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
return true
}
// InterceptUpgraded tests whether a fully capable connection is allowed.
func (*Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
func (_ *Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
return true, 0
}

View File

@@ -15,7 +15,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
@@ -35,16 +34,6 @@ type Listener interface {
LocalNode() *enode.LocalNode
}
const (
udp4 = iota
udp6
)
type quicProtocol uint16
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
func (quicProtocol) ENRKey() string { return "quic" }
// RefreshENR uses an epoch to refresh the enr entry for our node
// with the tracked committee ids for the epoch, allowing our node
// to be dynamically discoverable by others given our tracked committee ids.
@@ -73,14 +62,8 @@ func (s *Service) RefreshENR() {
// Compare current epoch with our fork epochs
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
switch {
case currEpoch < altairForkEpoch:
// Phase 0 behaviour.
if bytes.Equal(bitV, currentBitV) {
// return early if bitfield hasn't changed
return
}
s.updateSubnetRecordWithMetadata(bitV)
default:
// Altair Behaviour
case currEpoch >= altairForkEpoch:
// Retrieve sync subnets from application level
// cache.
bitS := bitfield.Bitvector4{byte(0x00)}
@@ -99,6 +82,13 @@ func (s *Service) RefreshENR() {
return
}
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
default:
// Phase 0 behaviour.
if bytes.Equal(bitV, currentBitV) {
// return early if bitfield hasn't changed
return
}
s.updateSubnetRecordWithMetadata(bitV)
}
// ping all peers to inform them of new metadata
s.pingPeers()
@@ -106,15 +96,14 @@ func (s *Service) RefreshENR() {
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
func (s *Service) listenForNewNodes() {
iterator := enode.Filter(s.dv5Listener.RandomNodes(), s.filterPeer)
iterator := s.dv5Listener.RandomNodes()
iterator = enode.Filter(iterator, s.filterPeer)
defer iterator.Close()
for {
// Exit if service's context is canceled.
// Exit if service's context is canceled
if s.ctx.Err() != nil {
break
}
if s.isPeerAtLimit(false /* inbound */) {
// Pause the main loop for a period to stop looking
// for new peers.
@@ -122,22 +111,16 @@ func (s *Service) listenForNewNodes() {
time.Sleep(pollingPeriod)
continue
}
if exists := iterator.Next(); !exists {
exists := iterator.Next()
if !exists {
break
}
node := iterator.Node()
peerInfo, _, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Error("Could not convert to peer info")
continue
}
if peerInfo == nil {
continue
}
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
s.Peers().RandomizeBackOff(peerInfo.ID)
go func(info *peer.AddrInfo) {
@@ -157,9 +140,9 @@ func (s *Service) createListener(
// by default we will listen to all interfaces.
var bindIP net.IP
switch udpVersionFromIP(ipAddr) {
case udp4:
case "udp4":
bindIP = net.IPv4zero
case udp6:
case "udp6":
bindIP = net.IPv6zero
default:
return nil, errors.New("invalid ip provided")
@@ -177,10 +160,10 @@ func (s *Service) createListener(
IP: bindIP,
Port: int(s.cfg.UDPPort),
}
// Listen to all network interfaces
// for both ip protocols.
conn, err := net.ListenUDP("udp", udpAddr)
networkVersion := "udp"
conn, err := net.ListenUDP(networkVersion, udpAddr)
if err != nil {
return nil, errors.Wrap(err, "could not listen to UDP")
}
@@ -190,86 +173,24 @@ func (s *Service) createListener(
ipAddr,
int(s.cfg.UDPPort),
int(s.cfg.TCPPort),
int(s.cfg.QUICPort),
)
if err != nil {
return nil, errors.Wrap(err, "could not create local node")
}
bootNodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
for _, addr := range s.cfg.Discv5BootStrapAddrs {
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
if err != nil {
return nil, errors.Wrap(err, "could not bootstrap addr")
}
bootNodes = append(bootNodes, bootNode)
}
dv5Cfg := discover.Config{
PrivateKey: privKey,
Bootnodes: bootNodes,
}
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
if err != nil {
return nil, errors.Wrap(err, "could not listen to discV5")
}
return listener, nil
}
func (s *Service) createLocalNode(
privKey *ecdsa.PrivateKey,
ipAddr net.IP,
udpPort, tcpPort, quicPort int,
) (*enode.LocalNode, error) {
db, err := enode.OpenDB("")
if err != nil {
return nil, errors.Wrap(err, "could not open node's peer database")
}
localNode := enode.NewLocalNode(db, privKey)
ipEntry := enr.IP(ipAddr)
localNode.Set(ipEntry)
udpEntry := enr.UDP(udpPort)
localNode.Set(udpEntry)
tcpEntry := enr.TCP(tcpPort)
localNode.Set(tcpEntry)
if features.Get().EnableQUIC {
quicEntry := quicProtocol(quicPort)
localNode.Set(quicEntry)
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
}
localNode = initializeAttSubnets(localNode)
localNode = initializeSyncCommSubnets(localNode)
if s.cfg != nil && s.cfg.HostAddress != "" {
if s.cfg.HostAddress != "" {
hostIP := net.ParseIP(s.cfg.HostAddress)
if hostIP.To4() == nil && hostIP.To16() == nil {
return nil, errors.Errorf("invalid host address: %s", s.cfg.HostAddress)
log.Errorf("Invalid host address given: %s", hostIP.String())
} else {
localNode.SetFallbackIP(hostIP)
localNode.SetStaticIP(hostIP)
}
}
if s.cfg != nil && s.cfg.HostDNS != "" {
if s.cfg.HostDNS != "" {
host := s.cfg.HostDNS
ips, err := net.LookupIP(host)
if err != nil {
return nil, errors.Wrapf(err, "could not resolve host address: %s", host)
return nil, errors.Wrap(err, "could not resolve host address")
}
if len(ips) > 0 {
// Use first IP returned from the
@@ -278,8 +199,51 @@ func (s *Service) createLocalNode(
localNode.SetFallbackIP(firstIP)
}
}
dv5Cfg := discover.Config{
PrivateKey: privKey,
}
dv5Cfg.Bootnodes = []*enode.Node{}
for _, addr := range s.cfg.Discv5BootStrapAddr {
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
if err != nil {
return nil, errors.Wrap(err, "could not bootstrap addr")
}
dv5Cfg.Bootnodes = append(dv5Cfg.Bootnodes, bootNode)
}
return localNode, nil
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
if err != nil {
return nil, errors.Wrap(err, "could not listen to discV5")
}
return listener, nil
}
func (s *Service) createLocalNode(
privKey *ecdsa.PrivateKey,
ipAddr net.IP,
udpPort, tcpPort int,
) (*enode.LocalNode, error) {
db, err := enode.OpenDB("")
if err != nil {
return nil, errors.Wrap(err, "could not open node's peer database")
}
localNode := enode.NewLocalNode(db, privKey)
ipEntry := enr.IP(ipAddr)
udpEntry := enr.UDP(udpPort)
tcpEntry := enr.TCP(tcpPort)
localNode.Set(ipEntry)
localNode.Set(udpEntry)
localNode.Set(tcpEntry)
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
}
localNode = initializeAttSubnets(localNode)
return initializeSyncCommSubnets(localNode), nil
}
func (s *Service) startDiscoveryV5(
@@ -298,68 +262,58 @@ func (s *Service) startDiscoveryV5(
// filterPeer validates each node that we retrieve from our dht. We
// try to ascertain that the peer can be a valid protocol peer.
// Validity Conditions:
// 1. Peer has a valid IP and a (QUIC and/or TCP) port set in their enr.
// 2. Peer hasn't been marked as 'bad'.
// 3. Peer is not currently active or connected.
// 4. Peer is ready to receive incoming connections.
// 5. Peer's fork digest in their ENR matches that of
// 1. The local node is still actively looking for peers to
// connect to.
// 2. Peer has a valid IP and TCP port set in their enr.
// 3. Peer hasn't been marked as 'bad'
// 4. Peer is not currently active or connected.
// 5. Peer is ready to receive incoming connections.
// 6. Peer's fork digest in their ENR matches that of
// our localnodes.
func (s *Service) filterPeer(node *enode.Node) bool {
// Ignore nil node entries passed in.
if node == nil {
return false
}
// Ignore nodes with no IP address stored.
// ignore nodes with no ip address stored.
if node.IP() == nil {
return false
}
peerData, multiAddrs, err := convertToAddrInfo(node)
// do not dial nodes with their tcp ports not set
if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil {
if !enr.IsNotFound(err) {
log.WithError(err).Debug("Could not retrieve tcp port")
}
return false
}
peerData, multiAddr, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Debug("Could not convert to peer data")
return false
}
if peerData == nil || len(multiAddrs) == 0 {
return false
}
// Ignore bad nodes.
if s.peers.IsBad(peerData.ID) {
return false
}
// Ignore nodes that are already active.
if s.peers.IsActive(peerData.ID) {
return false
}
// Ignore nodes that are already connected.
if s.host.Network().Connectedness(peerData.ID) == network.Connected {
return false
}
// Ignore nodes that are not ready to receive incoming connections.
if !s.peers.IsReadyToDial(peerData.ID) {
return false
}
// Ignore nodes that don't match our fork digest.
nodeENR := node.Record()
// Decide whether or not to connect to peer that does not
// match the proper fork ENR data with our local node.
if s.genesisValidatorsRoot != nil {
if err := s.compareForkENR(nodeENR); err != nil {
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
return false
}
}
// If the peer has 2 multiaddrs, favor the QUIC address, which is in first position.
multiAddr := multiAddrs[0]
// Add peer to peer handler.
s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown)
return true
}
@@ -400,16 +354,16 @@ func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
if err != nil {
return nil, errors.Wrapf(err, "Could not get enode from string")
}
nodeAddrs, err := retrieveMultiAddrsFromNode(enodeAddr)
addr, err := convertToSingleMultiAddr(enodeAddr)
if err != nil {
return nil, errors.Wrapf(err, "Could not get multiaddr")
}
allAddrs = append(allAddrs, nodeAddrs...)
allAddrs = append(allAddrs, addr)
}
return allAddrs, nil
}
func ParseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
func parseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
discv5Nodes, _ = parseGenericAddrs(addrs)
if len(discv5Nodes) == 0 {
log.Warn("No bootstrap addresses supplied")
@@ -439,139 +393,45 @@ func parseGenericAddrs(addrs []string) (enodeString, multiAddrString []string) {
}
func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
// Expect each node to have a TCP and a QUIC address.
multiAddrs := make([]ma.Multiaddr, 0, 2*len(nodes))
var multiAddrs []ma.Multiaddr
for _, node := range nodes {
// Skip nodes with no ip address stored.
// ignore nodes with no ip address stored
if node.IP() == nil {
continue
}
// Get up to two multiaddrs (TCP and QUIC) for each node.
nodeMultiAddrs, err := retrieveMultiAddrsFromNode(node)
multiAddr, err := convertToSingleMultiAddr(node)
if err != nil {
log.WithError(err).Errorf("Could not convert to multiAddr node %s", node)
log.WithError(err).Error("Could not convert to multiAddr")
continue
}
multiAddrs = append(multiAddrs, nodeMultiAddrs...)
multiAddrs = append(multiAddrs, multiAddr)
}
return multiAddrs
}
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
multiAddrs, err := retrieveMultiAddrsFromNode(node)
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, ma.Multiaddr, error) {
multiAddr, err := convertToSingleMultiAddr(node)
if err != nil {
return nil, nil, err
}
if len(multiAddrs) == 0 {
return nil, nil, nil
}
infos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
info, err := peer.AddrInfoFromP2pAddr(multiAddr)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert to peer info: %v", multiAddrs)
return nil, nil, err
}
if len(infos) != 1 {
return nil, nil, errors.Errorf("infos contains %v elements, expected exactly 1", len(infos))
}
return &infos[0], multiAddrs, nil
return info, multiAddr, nil
}
// retrieveMultiAddrsFromNode converts an enode.Node to a list of multiaddrs.
// If the node has a both a QUIC and a TCP port set in their ENR, then
// the multiaddr corresponding to the QUIC port is added first, followed
// by the multiaddr corresponding to the TCP port.
func retrieveMultiAddrsFromNode(node *enode.Node) ([]ma.Multiaddr, error) {
multiaddrs := make([]ma.Multiaddr, 0, 2)
// Retrieve the node public key.
func convertToSingleMultiAddr(node *enode.Node) (ma.Multiaddr, error) {
pubkey := node.Pubkey()
assertedKey, err := ecdsaprysm.ConvertToInterfacePubkey(pubkey)
if err != nil {
return nil, errors.Wrap(err, "could not get pubkey")
}
// Compute the node ID from the public key.
id, err := peer.IDFromPublicKey(assertedKey)
if err != nil {
return nil, errors.Wrap(err, "could not get peer id")
}
if features.Get().EnableQUIC {
// If the QUIC entry is present in the ENR, build the corresponding multiaddress.
port, ok, err := getPort(node, quic)
if err != nil {
return nil, errors.Wrap(err, "could not get QUIC port")
}
if ok {
addr, err := multiAddressBuilderWithID(node.IP(), quic, port, id)
if err != nil {
return nil, errors.Wrap(err, "could not build QUIC address")
}
multiaddrs = append(multiaddrs, addr)
}
}
// If the TCP entry is present in the ENR, build the corresponding multiaddress.
port, ok, err := getPort(node, tcp)
if err != nil {
return nil, errors.Wrap(err, "could not get TCP port")
}
if ok {
addr, err := multiAddressBuilderWithID(node.IP(), tcp, port, id)
if err != nil {
return nil, errors.Wrap(err, "could not build TCP address")
}
multiaddrs = append(multiaddrs, addr)
}
return multiaddrs, nil
}
// getPort retrieves the port for a given node and protocol, as well as a boolean
// indicating whether the port was found, and an error
func getPort(node *enode.Node, protocol internetProtocol) (uint, bool, error) {
var (
port uint
err error
)
switch protocol {
case tcp:
var entry enr.TCP
err = node.Load(&entry)
port = uint(entry)
case udp:
var entry enr.UDP
err = node.Load(&entry)
port = uint(entry)
case quic:
var entry quicProtocol
err = node.Load(&entry)
port = uint(entry)
default:
return 0, false, errors.Errorf("invalid protocol: %v", protocol)
}
if enr.IsNotFound(err) {
return port, false, nil
}
if err != nil {
return 0, false, errors.Wrap(err, "could not get port")
}
return port, true, nil
return multiAddressBuilderWithID(node.IP().String(), "tcp", uint(node.TCP()), id)
}
func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
@@ -589,14 +449,14 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
var ip4 enr.IPv4
var ip6 enr.IPv6
if node.Load(&ip4) == nil {
address, ipErr := multiAddressBuilderWithID(net.IP(ip4), udp, uint(node.UDP()), id)
address, ipErr := multiAddressBuilderWithID(net.IP(ip4).String(), "udp", uint(node.UDP()), id)
if ipErr != nil {
return nil, errors.Wrap(ipErr, "could not build IPv4 address")
}
addresses = append(addresses, address)
}
if node.Load(&ip6) == nil {
address, ipErr := multiAddressBuilderWithID(net.IP(ip6), udp, uint(node.UDP()), id)
address, ipErr := multiAddressBuilderWithID(net.IP(ip6).String(), "udp", uint(node.UDP()), id)
if ipErr != nil {
return nil, errors.Wrap(ipErr, "could not build IPv6 address")
}
@@ -623,9 +483,9 @@ func multiAddrFromString(address string) (ma.Multiaddr, error) {
return ma.NewMultiaddr(address)
}
func udpVersionFromIP(ipAddr net.IP) int {
func udpVersionFromIP(ipAddr net.IP) string {
if ipAddr.To4() != nil {
return udp4
return "udp4"
}
return udp6
return "udp6"
}

View File

@@ -42,6 +42,10 @@ import (
var discoveryWaitTime = 1 * time.Second
func init() {
rand.Seed(time.Now().Unix())
}
func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
ip, err := prysmNetwork.ExternalIPv4()
require.NoError(t, err, "Could not get ip")
@@ -99,8 +103,8 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
for i := 1; i <= 5; i++ {
port = 3000 + i
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
Discv5BootStrapAddr: []string{bootNode.String()},
UDPPort: uint(port),
}
ipAddr, pkey := createAddrAndPrivKey(t)
s = &Service{
@@ -130,107 +134,6 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
}
}
func TestCreateLocalNode(t *testing.T) {
testCases := []struct {
name string
cfg *Config
expectedError bool
}{
{
name: "valid config",
cfg: nil,
expectedError: false,
},
{
name: "invalid host address",
cfg: &Config{HostAddress: "invalid"},
expectedError: true,
},
{
name: "valid host address",
cfg: &Config{HostAddress: "192.168.0.1"},
expectedError: false,
},
{
name: "invalid host DNS",
cfg: &Config{HostDNS: "invalid"},
expectedError: true,
},
{
name: "valid host DNS",
cfg: &Config{HostDNS: "www.google.com"},
expectedError: false,
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
// Define ports.
const (
udpPort = 2000
tcpPort = 3000
quicPort = 3000
)
// Create a private key.
address, privKey := createAddrAndPrivKey(t)
// Create a service.
service := &Service{
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: tt.cfg,
}
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
if tt.expectedError {
require.NotNil(t, err)
return
}
require.NoError(t, err)
expectedAddress := address
if tt.cfg != nil && tt.cfg.HostAddress != "" {
expectedAddress = net.ParseIP(tt.cfg.HostAddress)
}
// Check IP.
// IP is not checked int case of DNS, since it can be resolved to different IPs.
if tt.cfg == nil || tt.cfg.HostDNS == "" {
ip := new(net.IP)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("ip", ip)))
require.Equal(t, true, ip.Equal(expectedAddress))
require.Equal(t, true, localNode.Node().IP().Equal(expectedAddress))
}
// Check UDP.
udp := new(uint16)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("udp", udp)))
require.Equal(t, udpPort, localNode.Node().UDP())
// Check TCP.
tcp := new(uint16)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("tcp", tcp)))
require.Equal(t, tcpPort, localNode.Node().TCP())
// Check fork is set.
fork := new([]byte)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2ENRKey, fork)))
require.NotEmpty(t, *fork)
// Check att subnets.
attSubnets := new([]byte)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(attSubnetEnrKey, attSubnets)))
require.DeepSSZEqual(t, []byte{0, 0, 0, 0, 0, 0, 0, 0}, *attSubnets)
// Check sync committees subnets.
syncSubnets := new([]byte)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
})
}
}
func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
addr := net.ParseIP("invalidIP")
_, pkey := createAddrAndPrivKey(t)
@@ -238,7 +141,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
}
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
node, err := s.createLocalNode(pkey, addr, 0, 0)
require.NoError(t, err)
multiAddr := convertToMultiAddr([]*enode.Node{node.Node()})
assert.Equal(t, 0, len(multiAddr), "Invalid ip address converted successfully")
@@ -249,9 +152,8 @@ func TestMultiAddrConversion_OK(t *testing.T) {
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
cfg: &Config{
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
TCPPort: 0,
UDPPort: 0,
},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
@@ -408,12 +310,12 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
}
func TestCorrectUDPVersion(t *testing.T) {
assert.Equal(t, udp4, udpVersionFromIP(net.IPv4zero), "incorrect network version")
assert.Equal(t, udp6, udpVersionFromIP(net.IPv6zero), "incorrect network version")
assert.Equal(t, udp4, udpVersionFromIP(net.IP{200, 20, 12, 255}), "incorrect network version")
assert.Equal(t, udp6, udpVersionFromIP(net.IP{22, 23, 24, 251, 17, 18, 0, 0, 0, 0, 12, 14, 212, 213, 16, 22}), "incorrect network version")
assert.Equal(t, "udp4", udpVersionFromIP(net.IPv4zero), "incorrect network version")
assert.Equal(t, "udp6", udpVersionFromIP(net.IPv6zero), "incorrect network version")
assert.Equal(t, "udp4", udpVersionFromIP(net.IP{200, 20, 12, 255}), "incorrect network version")
assert.Equal(t, "udp6", udpVersionFromIP(net.IP{22, 23, 24, 251, 17, 18, 0, 0, 0, 0, 12, 14, 212, 213, 16, 22}), "incorrect network version")
// v4 in v6
assert.Equal(t, udp4, udpVersionFromIP(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 212, 213, 16, 22}), "incorrect network version")
assert.Equal(t, "udp4", udpVersionFromIP(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 212, 213, 16, 22}), "incorrect network version")
}
// addPeer is a helper to add a peer with a given connection state)

View File

@@ -28,8 +28,7 @@ import (
)
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
const port = 2000
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
@@ -47,14 +46,14 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
bootNode := bootListener.Self()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
StateNotifier: &mock.MockStateNotifier{},
Discv5BootStrapAddr: []string{bootNode.String()},
UDPPort: uint(port),
StateNotifier: &mock.MockStateNotifier{},
}
var listeners []*discover.UDPv5
for i := 1; i <= 5; i++ {
port := 3000 + i
port = 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
@@ -99,14 +98,13 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
var addrs []ma.Multiaddr
addrs := make([]ma.Multiaddr, 0)
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
for _, n := range nodes {
if s.filterPeer(n) {
addr, err := convertToSingleMultiAddr(n)
require.NoError(t, err)
addrs = append(addrs, nodeAddrs...)
addrs = append(addrs, addr)
}
}
@@ -116,11 +114,10 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
}
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
const port = 2000
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
logrus.SetLevel(logrus.TraceLevel)
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
@@ -135,13 +132,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
bootNode := bootListener.Self()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
Discv5BootStrapAddr: []string{bootNode.String()},
UDPPort: uint(port),
}
var listeners []*discover.UDPv5
for i := 1; i <= 5; i++ {
port := 3000 + i
port = 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
@@ -191,13 +188,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
addrs := make([]ma.Multiaddr, 0, len(nodes))
var addrs []ma.Multiaddr
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
for _, n := range nodes {
if s.filterPeer(n) {
addr, err := convertToSingleMultiAddr(n)
require.NoError(t, err)
addrs = append(addrs, nodeAddrs...)
addrs = append(addrs, addr)
}
}
if len(addrs) == 0 {

View File

@@ -20,7 +20,7 @@ self=%s
%d peers
%v
`,
s.cfg.Discv5BootStrapAddrs,
s.cfg.BootstrapNodeAddr,
s.selfAddresses(),
len(s.host.Network().Peers()),
formatPeers(s.host), // Must be last. Writes one entry per row.

View File

@@ -1,7 +1,6 @@
package p2p
import (
"net"
"strconv"
"strings"
@@ -13,32 +12,32 @@ import (
var log = logrus.WithField("prefix", "p2p")
func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) {
var correctAddr ma.Multiaddr
for _, addr := range addrs {
if !(strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/")) {
continue
if strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/") {
correctAddr = addr
break
}
}
if correctAddr != nil {
log.WithField(
"multiAddr",
addr.String()+"/p2p/"+id.String(),
correctAddr.String()+"/p2p/"+id.String(),
).Info("Node started p2p server")
}
}
func logExternalIPAddr(id peer.ID, addr string, tcpPort, quicPort uint) {
func logExternalIPAddr(id peer.ID, addr string, port uint) {
if addr != "" {
multiAddrs, err := MultiAddressBuilder(net.ParseIP(addr), tcpPort, quicPort)
multiAddr, err := MultiAddressBuilder(addr, port)
if err != nil {
log.WithError(err).Error("Could not create multiaddress")
return
}
for _, multiAddr := range multiAddrs {
log.WithField(
"multiAddr",
multiAddr.String()+"/p2p/"+id.String(),
).Info("Node started external p2p server")
}
log.WithField(
"multiAddr",
multiAddr.String()+"/p2p/"+id.String(),
).Info("Node started external p2p server")
}
}

View File

@@ -4,129 +4,90 @@ import (
"crypto/ecdsa"
"fmt"
"net"
"time"
"github.com/libp2p/go-libp2p"
mplex "github.com/libp2p/go-libp2p-mplex"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/security/noise"
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
gomplex "github.com/libp2p/go-mplex"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/features"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
type internetProtocol string
const (
udp = "udp"
tcp = "tcp"
quic = "quic"
)
// MultiAddressBuilder takes in an ip address string and port to produce a go multiaddr format.
func MultiAddressBuilder(ip net.IP, tcpPort, quicPort uint) ([]ma.Multiaddr, error) {
ipType, err := extractIpType(ip)
if err != nil {
return nil, errors.Wrap(err, "unable to determine IP type")
func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
parsedIP := net.ParseIP(ipAddr)
if parsedIP.To4() == nil && parsedIP.To16() == nil {
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
}
// Example: /ip4/1.2.3.4./tcp/5678
multiaddrStr := fmt.Sprintf("/%s/%s/tcp/%d", ipType, ip, tcpPort)
multiAddrTCP, err := ma.NewMultiaddr(multiaddrStr)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce TCP multiaddr format from %s:%d", ip, tcpPort)
if parsedIP.To4() != nil {
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
}
multiaddrs := []ma.Multiaddr{multiAddrTCP}
if features.Get().EnableQUIC {
// Example: /ip4/1.2.3.4/udp/5678/quic-v1
multiAddrQUIC, err := ma.NewMultiaddr(fmt.Sprintf("/%s/%s/udp/%d/quic-v1", ipType, ip, quicPort))
if err != nil {
return nil, errors.Wrapf(err, "cannot produce QUIC multiaddr format from %s:%d", ip, tcpPort)
}
multiaddrs = append(multiaddrs, multiAddrQUIC)
}
return multiaddrs, nil
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
}
// buildOptions for the libp2p host.
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Option {
cfg := s.cfg
multiaddrs, err := MultiAddressBuilder(ip, cfg.TCPPort, cfg.QUICPort)
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip, cfg.TCPPort)
log.WithError(err).Fatal("Failed to p2p listen")
}
if cfg.LocalIP != "" {
localIP := net.ParseIP(cfg.LocalIP)
if localIP == nil {
return nil, errors.Wrapf(err, "invalid local ip provided: %s:%d", cfg.LocalIP, cfg.TCPPort)
if net.ParseIP(cfg.LocalIP) == nil {
log.Fatalf("Invalid local ip provided: %s", cfg.LocalIP)
}
multiaddrs, err = MultiAddressBuilder(localIP, cfg.TCPPort, cfg.QUICPort)
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", cfg.LocalIP, cfg.TCPPort)
log.WithError(err).Fatal("Failed to p2p listen")
}
}
ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(priKey)
if err != nil {
return nil, errors.Wrap(err, "cannot convert private key to interface private key. (Private key not displayed in logs for security reasons)")
log.WithError(err).Fatal("Failed to retrieve private key")
}
id, err := peer.IDFromPublicKey(ifaceKey.GetPublic())
if err != nil {
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
log.WithError(err).Fatal("Failed to retrieve peer id")
}
log.Infof("Running node with peer id of %s ", id.String())
options := []libp2p.Option{
privKeyOption(priKey),
libp2p.ListenAddrs(multiaddrs...),
libp2p.ListenAddrs(listen),
libp2p.UserAgent(version.BuildData()),
libp2p.ConnectionGater(s),
libp2p.Transport(libp2ptcp.NewTCPTransport),
libp2p.Transport(tcp.NewTCPTransport),
libp2p.DefaultMuxers,
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
libp2p.Security(noise.ID, noise.New),
libp2p.Ping(false), // Disable Ping Service.
}
if features.Get().EnableQUIC {
options = append(options, libp2p.Transport(libp2pquic.NewTransport))
}
options = append(options, libp2p.Security(noise.ID, noise.New))
if cfg.EnableUPnP {
options = append(options, libp2p.NATPortMap()) // Allow to use UPnP
}
if cfg.RelayNodeAddr != "" {
options = append(options, libp2p.AddrsFactory(withRelayAddrs(cfg.RelayNodeAddr)))
} else {
// Disable relay if it has not been set.
options = append(options, libp2p.DisableRelay())
}
if cfg.HostAddress != "" {
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
externalMultiaddrs, err := MultiAddressBuilder(net.ParseIP(cfg.HostAddress), cfg.TCPPort, cfg.QUICPort)
external, err := MultiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
if err != nil {
log.WithError(err).Error("Unable to create external multiaddress")
} else {
addrs = append(addrs, externalMultiaddrs...)
addrs = append(addrs, external)
}
return addrs
}))
}
if cfg.HostDNS != "" {
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
external, err := ma.NewMultiaddr(fmt.Sprintf("/dns4/%s/tcp/%d", cfg.HostDNS, cfg.TCPPort))
@@ -138,51 +99,26 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
return addrs
}))
}
// Disable Ping Service.
options = append(options, libp2p.Ping(false))
if features.Get().DisableResourceManager {
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
}
return options, nil
return options
}
func extractIpType(ip net.IP) (string, error) {
if ip.To4() != nil {
return "ip4", nil
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
parsedIP := net.ParseIP(ipAddr)
if parsedIP.To4() == nil && parsedIP.To16() == nil {
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
}
if ip.To16() != nil {
return "ip6", nil
if id.String() == "" {
return nil, errors.New("empty peer id given")
}
return "", errors.Errorf("provided IP address is neither IPv4 nor IPv6: %s", ip)
}
func multiAddressBuilderWithID(ip net.IP, protocol internetProtocol, port uint, id peer.ID) (ma.Multiaddr, error) {
var multiaddrStr string
if id == "" {
return nil, errors.Errorf("empty peer id given: %s", id)
if parsedIP.To4() != nil {
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
}
ipType, err := extractIpType(ip)
if err != nil {
return nil, errors.Wrap(err, "unable to determine IP type")
}
switch protocol {
case udp, tcp:
// Example with UDP: /ip4/1.2.3.4/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
// Example with TCP: /ip6/1.2.3.4/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
multiaddrStr = fmt.Sprintf("/%s/%s/%s/%d/p2p/%s", ipType, ip, protocol, port, id)
case quic:
// Example: /ip4/1.2.3.4/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
multiaddrStr = fmt.Sprintf("/%s/%s/udp/%d/quic-v1/p2p/%s", ipType, ip, port, id)
default:
return nil, errors.Errorf("unsupported protocol: %s", protocol)
}
return ma.NewMultiaddr(multiaddrStr)
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
}
// Adds a private key to the libp2p option if the option was provided.
@@ -198,8 +134,3 @@ func privKeyOption(privkey *ecdsa.PrivateKey) libp2p.Option {
return cfg.Apply(libp2p.Identity(ifaceKey))
}
}
// Configures stream timeouts on mplex.
func configureMplex() {
gomplex.ResetStreamTimeout = 5 * time.Second
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -89,34 +88,30 @@ func TestIPV6Support(t *testing.T) {
lNode := enode.NewLocalNode(db, key)
mockIPV6 := net.IP{0xff, 0x02, 0xAA, 0, 0x1F, 0, 0x2E, 0, 0, 0x36, 0x45, 0, 0, 0, 0, 0x02}
lNode.Set(enr.IP(mockIPV6))
mas, err := retrieveMultiAddrsFromNode(lNode.Node())
ma, err := convertToSingleMultiAddr(lNode.Node())
if err != nil {
t.Fatal(err)
}
for _, ma := range mas {
ipv6Exists := false
for _, p := range ma.Protocols() {
if p.Name == "ip4" {
t.Error("Got ip4 address instead of ip6")
}
if p.Name == "ip6" {
ipv6Exists = true
}
ipv6Exists := false
for _, p := range ma.Protocols() {
if p.Name == "ip4" {
t.Error("Got ip4 address instead of ip6")
}
if !ipv6Exists {
t.Error("Multiaddress did not have ipv6 protocol")
if p.Name == "ip6" {
ipv6Exists = true
}
}
if !ipv6Exists {
t.Error("Multiaddress did not have ipv6 protocol")
}
}
func TestDefaultMultiplexers(t *testing.T) {
var cfg libp2p.Config
_ = cfg
p2pCfg := &Config{
TCPPort: 2000,
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
StateNotifier: &mock.MockStateNotifier{},
}
svc := &Service{cfg: p2pCfg}
@@ -124,65 +119,11 @@ func TestDefaultMultiplexers(t *testing.T) {
svc.privKey, err = privKey(svc.cfg)
assert.NoError(t, err)
ipAddr := network.IPAddr()
opts, err := svc.buildOptions(ipAddr, svc.privKey)
assert.NoError(t, err)
opts := svc.buildOptions(ipAddr, svc.privKey)
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
assert.NoError(t, err)
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
}
func TestMultiAddressBuilderWithID(t *testing.T) {
testCases := []struct {
name string
ip net.IP
protocol internetProtocol
port uint
id string
expectedMultiaddrStr string
}{
{
name: "UDP",
ip: net.IPv4(192, 168, 0, 1),
protocol: udp,
port: 5678,
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
},
{
name: "TCP",
ip: net.IPv4(192, 168, 0, 1),
protocol: tcp,
port: 5678,
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
expectedMultiaddrStr: "/ip4/192.168.0.1/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
},
{
name: "QUIC",
ip: net.IPv4(192, 168, 0, 1),
protocol: quic,
port: 5678,
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
id, err := hex.DecodeString(tt.id)
require.NoError(t, err)
actualMultiaddr, err := multiAddressBuilderWithID(tt.ip, tt.protocol, tt.port, peer.ID(id))
require.NoError(t, err)
actualMultiaddrStr := actualMultiaddr.String()
require.Equal(t, tt.expectedMultiaddrStr, actualMultiaddrStr)
})
}
}

View File

@@ -22,7 +22,7 @@ func TestGossipParameters(t *testing.T) {
pms := pubsubGossipParam()
assert.Equal(t, gossipSubMcacheLen, pms.HistoryLength, "gossipSubMcacheLen")
assert.Equal(t, gossipSubMcacheGossip, pms.HistoryGossip, "gossipSubMcacheGossip")
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Seconds()), "gossipSubSeenTtl")
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Milliseconds()/pms.HeartbeatInterval.Milliseconds()), "gossipSubSeenTtl")
}
func TestFanoutParameters(t *testing.T) {

View File

@@ -107,4 +107,5 @@ func TestStore_TrustedPeers(t *testing.T) {
assert.Equal(t, false, store.IsTrustedPeer(pid1))
assert.Equal(t, false, store.IsTrustedPeer(pid2))
assert.Equal(t, false, store.IsTrustedPeer(pid3))
}

View File

@@ -56,12 +56,12 @@ func newBadResponsesScorer(store *peerdata.Store, config *BadResponsesScorerConf
func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
s.store.RLock()
defer s.store.RUnlock()
return s.scoreNoLock(pid)
return s.score(pid)
}
// scoreNoLock is a lock-free version of Score.
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
if s.isBadPeerNoLock(pid) {
// score is a lock-free version of Score.
func (s *BadResponsesScorer) score(pid peer.ID) float64 {
if s.isBadPeer(pid) {
return BadPeerScore
}
score := float64(0)
@@ -87,11 +87,11 @@ func (s *BadResponsesScorer) Params() *BadResponsesScorerConfig {
func (s *BadResponsesScorer) Count(pid peer.ID) (int, error) {
s.store.RLock()
defer s.store.RUnlock()
return s.countNoLock(pid)
return s.count(pid)
}
// countNoLock is a lock-free version of Count.
func (s *BadResponsesScorer) countNoLock(pid peer.ID) (int, error) {
// count is a lock-free version of Count.
func (s *BadResponsesScorer) count(pid peer.ID) (int, error) {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.BadResponses, nil
}
@@ -119,11 +119,11 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
return s.isBadPeer(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
// isBadPeer is lock-free version of IsBadPeer.
func (s *BadResponsesScorer) isBadPeer(pid peer.ID) bool {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.BadResponses >= s.config.Threshold
}
@@ -137,7 +137,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) {
if s.isBadPeer(pid) {
badPeers = append(badPeers, pid)
}
}

View File

@@ -15,8 +15,6 @@ import (
)
func TestScorers_BadResponses_Score(t *testing.T) {
const pid = "peer1"
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -30,23 +28,15 @@ func TestScorers_BadResponses_Score(t *testing.T) {
})
scorer := peerStatuses.Scorers().BadResponsesScorer()
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
scorer.Increment(pid)
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.Equal(t, -2.5, scorer.Score(pid))
scorer.Increment(pid)
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-5), scorer.Score(pid))
scorer.Increment(pid)
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-7.5), scorer.Score(pid))
scorer.Increment(pid)
assert.Equal(t, true, scorer.IsBadPeer(pid))
assert.Equal(t, -100.0, scorer.Score(pid))
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score for unregistered peer")
scorer.Increment("peer1")
assert.Equal(t, -2.5, scorer.Score("peer1"))
scorer.Increment("peer1")
assert.Equal(t, float64(-5), scorer.Score("peer1"))
scorer.Increment("peer1")
scorer.Increment("peer1")
assert.Equal(t, -100.0, scorer.Score("peer1"))
assert.Equal(t, true, scorer.IsBadPeer("peer1"))
}
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {

View File

@@ -98,11 +98,11 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo
func (s *BlockProviderScorer) Score(pid peer.ID) float64 {
s.store.RLock()
defer s.store.RUnlock()
return s.scoreNoLock(pid)
return s.score(pid)
}
// scoreNoLock is a lock-free version of Score.
func (s *BlockProviderScorer) scoreNoLock(pid peer.ID) float64 {
// score is a lock-free version of Score.
func (s *BlockProviderScorer) score(pid peer.ID) float64 {
score := float64(0)
peerData, ok := s.store.PeerData(pid)
// Boost score of new peers or peers that haven't been accessed for too long.
@@ -126,7 +126,7 @@ func (s *BlockProviderScorer) Params() *BlockProviderScorerConfig {
func (s *BlockProviderScorer) IncrementProcessedBlocks(pid peer.ID, cnt uint64) {
s.store.Lock()
defer s.store.Unlock()
defer s.touchNoLock(pid)
defer s.touch(pid)
if cnt <= 0 {
return
@@ -145,11 +145,11 @@ func (s *BlockProviderScorer) IncrementProcessedBlocks(pid peer.ID, cnt uint64)
func (s *BlockProviderScorer) Touch(pid peer.ID, t ...time.Time) {
s.store.Lock()
defer s.store.Unlock()
s.touchNoLock(pid, t...)
s.touch(pid, t...)
}
// touchNoLock is a lock-free version of Touch.
func (s *BlockProviderScorer) touchNoLock(pid peer.ID, t ...time.Time) {
// touch is a lock-free version of Touch.
func (s *BlockProviderScorer) touch(pid peer.ID, t ...time.Time) {
peerData := s.store.PeerDataGetOrCreate(pid)
if len(t) == 1 {
peerData.BlockProviderUpdated = t[0]
@@ -162,11 +162,11 @@ func (s *BlockProviderScorer) touchNoLock(pid peer.ID, t ...time.Time) {
func (s *BlockProviderScorer) ProcessedBlocks(pid peer.ID) uint64 {
s.store.RLock()
defer s.store.RUnlock()
return s.processedBlocksNoLock(pid)
return s.processedBlocks(pid)
}
// processedBlocksNoLock is a lock-free version of ProcessedBlocks.
func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
// processedBlocks is a lock-free version of ProcessedBlocks.
func (s *BlockProviderScorer) processedBlocks(pid peer.ID) uint64 {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.ProcessedBlocks
}
@@ -177,13 +177,13 @@ func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
// out low-scorers (see WeightSorted method).
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
func (_ *BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
return false
}
// BadPeers returns the peers that are considered bad.
// No peers are considered bad by block providers scorer.
func (*BlockProviderScorer) BadPeers() []peer.ID {
func (_ *BlockProviderScorer) BadPeers() []peer.ID {
return []peer.ID{}
}
@@ -277,9 +277,9 @@ func (s *BlockProviderScorer) mapScoresAndPeers(
peers := make([]peer.ID, len(pids))
for i, pid := range pids {
if scoreFn != nil {
scores[pid] = scoreFn(pid, s.scoreNoLock(pid))
scores[pid] = scoreFn(pid, s.score(pid))
} else {
scores[pid] = s.scoreNoLock(pid)
scores[pid] = s.score(pid)
}
peers[i] = pid
}
@@ -293,9 +293,9 @@ func (s *BlockProviderScorer) FormatScorePretty(pid peer.ID) string {
if !features.Get().EnablePeerScorer {
return "disabled"
}
score := s.scoreNoLock(pid)
score := s.score(pid)
return fmt.Sprintf("[%0.1f%%, raw: %0.2f, blocks: %d/%d]",
(score/s.MaxScore())*100, score, s.processedBlocksNoLock(pid), s.config.ProcessedBlocksCap)
(score/s.MaxScore())*100, score, s.processedBlocks(pid), s.config.ProcessedBlocksCap)
}
// MaxScore exposes maximum score attainable by peers.

View File

@@ -38,11 +38,11 @@ func newGossipScorer(store *peerdata.Store, config *GossipScorerConfig) *GossipS
func (s *GossipScorer) Score(pid peer.ID) float64 {
s.store.RLock()
defer s.store.RUnlock()
return s.scoreNoLock(pid)
return s.score(pid)
}
// scoreNoLock is a lock-free version of Score.
func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
// score is a lock-free version of Score.
func (s *GossipScorer) score(pid peer.ID) float64 {
peerData, ok := s.store.PeerData(pid)
if !ok {
return 0
@@ -54,11 +54,11 @@ func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
return s.isBadPeer(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
// isBadPeer is lock-free version of IsBadPeer.
func (s *GossipScorer) isBadPeer(pid peer.ID) bool {
peerData, ok := s.store.PeerData(pid)
if !ok {
return false
@@ -73,7 +73,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) {
if s.isBadPeer(pid) {
badPeers = append(badPeers, pid)
}
}
@@ -98,11 +98,11 @@ func (s *GossipScorer) SetGossipData(pid peer.ID, gScore float64,
func (s *GossipScorer) GossipData(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
s.store.RLock()
defer s.store.RUnlock()
return s.gossipDataNoLock(pid)
return s.gossipData(pid)
}
// gossipDataNoLock lock-free version of GossipData.
func (s *GossipScorer) gossipDataNoLock(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
// gossipData lock-free version of GossipData.
func (s *GossipScorer) gossipData(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.GossipScore, peerData.BehaviourPenalty, peerData.TopicScores, nil
}

View File

@@ -41,12 +41,12 @@ func newPeerStatusScorer(store *peerdata.Store, config *PeerStatusScorerConfig)
func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
s.store.RLock()
defer s.store.RUnlock()
return s.scoreNoLock(pid)
return s.score(pid)
}
// scoreNoLock is a lock-free version of Score.
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
if s.isBadPeerNoLock(pid) {
// score is a lock-free version of Score.
func (s *PeerStatusScorer) score(pid peer.ID) float64 {
if s.isBadPeer(pid) {
return BadPeerScore
}
score := float64(0)
@@ -70,11 +70,11 @@ func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
return s.isBadPeer(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
// isBadPeer is lock-free version of IsBadPeer.
func (s *PeerStatusScorer) isBadPeer(pid peer.ID) bool {
peerData, ok := s.store.PeerData(pid)
if !ok {
return false
@@ -100,7 +100,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) {
if s.isBadPeer(pid) {
badPeers = append(badPeers, pid)
}
}
@@ -129,11 +129,11 @@ func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, val
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
s.store.RLock()
defer s.store.RUnlock()
return s.peerStatusNoLock(pid)
return s.peerStatus(pid)
}
// peerStatusNoLock lock-free version of PeerStatus.
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.Status, error) {
// peerStatus lock-free version of PeerStatus.
func (s *PeerStatusScorer) peerStatus(pid peer.ID) (*pb.Status, error) {
if peerData, ok := s.store.PeerData(pid); ok {
if peerData.ChainState == nil {
return nil, peerdata.ErrNoPeerStatus

View File

@@ -116,10 +116,10 @@ func (s *Service) ScoreNoLock(pid peer.ID) float64 {
if _, ok := s.store.PeerData(pid); !ok {
return 0
}
score += s.scorers.badResponsesScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.badResponsesScorer)
score += s.scorers.blockProviderScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.blockProviderScorer)
score += s.scorers.peerStatusScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.peerStatusScorer)
score += s.scorers.gossipScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.gossipScorer)
score += s.scorers.badResponsesScorer.score(pid) * s.scorerWeight(s.scorers.badResponsesScorer)
score += s.scorers.blockProviderScorer.score(pid) * s.scorerWeight(s.scorers.blockProviderScorer)
score += s.scorers.peerStatusScorer.score(pid) * s.scorerWeight(s.scorers.peerStatusScorer)
score += s.scorers.gossipScorer.score(pid) * s.scorerWeight(s.scorers.gossipScorer)
return math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor
}
@@ -132,14 +132,14 @@ func (s *Service) IsBadPeer(pid peer.ID) bool {
// IsBadPeerNoLock is a lock-free version of IsBadPeer.
func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
if s.scorers.badResponsesScorer.isBadPeerNoLock(pid) {
if s.scorers.badResponsesScorer.isBadPeer(pid) {
return true
}
if s.scorers.peerStatusScorer.isBadPeerNoLock(pid) {
if s.scorers.peerStatusScorer.isBadPeer(pid) {
return true
}
if features.Get().EnablePeerScorer {
if s.scorers.gossipScorer.isBadPeerNoLock(pid) {
if s.scorers.gossipScorer.isBadPeer(pid) {
return true
}
}

View File

@@ -1,7 +1,7 @@
// Package peers provides information about peers at the Ethereum consensus protocol level.
//
// "Protocol level" is the level above the network level, so this layer never sees or interacts with
// (for example) hosts that are unreachable due to being down, firewalled, etc. Instead, this works
// (for example) hosts that are uncontactable due to being down, firewalled, etc. Instead, this works
// with peers that are contactable but may or may not be of the correct fork version, not currently
// required due to the number of current connections, etc.
//
@@ -26,7 +26,6 @@ import (
"context"
"math"
"sort"
"strings"
"time"
"github.com/ethereum/go-ethereum/p2p/enr"
@@ -60,8 +59,8 @@ const (
)
const (
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
CollocationLimit = 5
// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
ColocationLimit = 5
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
maxLimitBuffer = 150
@@ -77,13 +76,6 @@ const (
MaxBackOffDuration = 5000
)
type InternetProtocol string
const (
TCP = "tcp"
QUIC = "quic"
)
// Status is the structure holding the peer status information.
type Status struct {
ctx context.Context
@@ -457,19 +449,6 @@ func (p *Status) InboundConnected() []peer.ID {
return peers
}
// InboundConnectedWithProtocol returns the current batch of inbound peers that are connected with a given protocol.
func (p *Status) InboundConnectedWithProtocol(protocol InternetProtocol) []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
peers = append(peers, pid)
}
}
return peers
}
// Outbound returns the current batch of outbound peers.
func (p *Status) Outbound() []peer.ID {
p.store.RLock()
@@ -496,20 +475,7 @@ func (p *Status) OutboundConnected() []peer.ID {
return peers
}
// OutboundConnectedWithProtocol returns the current batch of outbound peers that are connected with a given protocol.
func (p *Status) OutboundConnectedWithProtocol(protocol InternetProtocol) []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
peers = append(peers, pid)
}
}
return peers
}
// Active returns the peers that are active (connecting or connected).
// Active returns the peers that are connecting or connected.
func (p *Status) Active() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
@@ -548,7 +514,7 @@ func (p *Status) Disconnected() []peer.ID {
return peers
}
// Inactive returns the peers that are inactive (disconnecting or disconnected).
// Inactive returns the peers that are disconnecting or disconnected.
func (p *Status) Inactive() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
@@ -582,7 +548,7 @@ func (p *Status) Prune() {
p.store.Lock()
defer p.store.Unlock()
// Default to old method if flag isn't enabled.
// Default to old method if flag isnt enabled.
if !features.Get().EnablePeerScorer {
p.deprecatedPrune()
return
@@ -995,7 +961,7 @@ func (p *Status) isfromBadIP(pid peer.ID) bool {
return true
}
if val, ok := p.ipTracker[ip.String()]; ok {
if val > CollocationLimit {
if val > ColocationLimit {
return true
}
}
@@ -1046,7 +1012,7 @@ func (p *Status) tallyIPTracker() {
}
func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
// Exit early if we do get nil multi-addresses
// Exit early if we do get nil multiaddresses
if firstAddr == nil || secondAddr == nil {
return false
}

View File

@@ -565,7 +565,7 @@ func TestPeerIPTracker(t *testing.T) {
badIP := "211.227.218.116"
var badPeers []peer.ID
for i := 0; i < peers.CollocationLimit+10; i++ {
for i := 0; i < peers.ColocationLimit+10; i++ {
port := strconv.Itoa(3000 + i)
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
if err != nil {
@@ -1111,87 +1111,6 @@ func TestInbound(t *testing.T) {
assert.Equal(t, inbound.String(), result[0].String())
}
func TestInboundConnected(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
result := p.InboundConnected()
require.Equal(t, 1, len(result))
assert.Equal(t, inbound.String(), result[0].String())
}
func TestInboundConnectedWithProtocol(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addrsTCP := []string{
"/ip4/127.0.0.1/tcp/33333",
"/ip4/127.0.0.2/tcp/44444",
}
addrsQUIC := []string{
"/ip4/192.168.1.3/udp/13000/quic-v1",
"/ip4/192.168.1.4/udp/14000/quic-v1",
"/ip4/192.168.1.5/udp/14000/quic-v1",
}
expectedTCP := make(map[string]bool, len(addrsTCP))
for _, addr := range addrsTCP {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
expectedTCP[peer.String()] = true
}
expectedQUIC := make(map[string]bool, len(addrsQUIC))
for _, addr := range addrsQUIC {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
expectedQUIC[peer.String()] = true
}
// TCP
// ---
actualTCP := p.InboundConnectedWithProtocol(peers.TCP)
require.Equal(t, len(expectedTCP), len(actualTCP))
for _, actualPeer := range actualTCP {
_, ok := expectedTCP[actualPeer.String()]
require.Equal(t, true, ok)
}
// QUIC
// ----
actualQUIC := p.InboundConnectedWithProtocol(peers.QUIC)
require.Equal(t, len(expectedQUIC), len(actualQUIC))
for _, actualPeer := range actualQUIC {
_, ok := expectedQUIC[actualPeer.String()]
require.Equal(t, true, ok)
}
}
func TestOutbound(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
@@ -1211,87 +1130,6 @@ func TestOutbound(t *testing.T) {
assert.Equal(t, outbound.String(), result[0].String())
}
func TestOutboundConnected(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
result := p.OutboundConnected()
require.Equal(t, 1, len(result))
assert.Equal(t, inbound.String(), result[0].String())
}
func TestOutboundConnectedWithProtocol(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addrsTCP := []string{
"/ip4/127.0.0.1/tcp/33333",
"/ip4/127.0.0.2/tcp/44444",
}
addrsQUIC := []string{
"/ip4/192.168.1.3/udp/13000/quic-v1",
"/ip4/192.168.1.4/udp/14000/quic-v1",
"/ip4/192.168.1.5/udp/14000/quic-v1",
}
expectedTCP := make(map[string]bool, len(addrsTCP))
for _, addr := range addrsTCP {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
expectedTCP[peer.String()] = true
}
expectedQUIC := make(map[string]bool, len(addrsQUIC))
for _, addr := range addrsQUIC {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
expectedQUIC[peer.String()] = true
}
// TCP
// ---
actualTCP := p.OutboundConnectedWithProtocol(peers.TCP)
require.Equal(t, len(expectedTCP), len(actualTCP))
for _, actualPeer := range actualTCP {
_, ok := expectedTCP[actualPeer.String()]
require.Equal(t, true, ok)
}
// QUIC
// ----
actualQUIC := p.OutboundConnectedWithProtocol(peers.QUIC)
require.Equal(t, len(expectedQUIC), len(actualQUIC))
for _, actualPeer := range actualQUIC {
_, ok := expectedQUIC[actualPeer.String()]
require.Equal(t, true, ok)
}
}
// addPeer is a helper to add a peer with a given connection state)
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
// Set up some peers with different states

View File

@@ -3,7 +3,6 @@ package p2p
import (
"context"
"encoding/hex"
"fmt"
"strings"
"time"
@@ -26,7 +25,7 @@ const (
// gossip parameters
gossipSubMcacheLen = 6 // number of windows to retain full messages in cache for `IWANT` responses
gossipSubMcacheGossip = 3 // number of windows to gossip about
gossipSubSeenTTL = 768 // number of seconds to retain message IDs ( 2 epochs)
gossipSubSeenTTL = 550 // number of heartbeat intervals to retain message IDs
// fanout ttl
gossipSubFanoutTTL = 60000000000 // TTL for fanout maps for topics we are not subscribed to but have published to, in nano seconds
@@ -131,7 +130,7 @@ func (s *Service) peerInspector(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) {
}
}
// pubsubOptions creates a list of options to configure our router with.
// Creates a list of pubsub options to configure out router with.
func (s *Service) pubsubOptions() []pubsub.Option {
psOpts := []pubsub.Option{
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
@@ -148,35 +147,9 @@ func (s *Service) pubsubOptions() []pubsub.Option {
pubsub.WithGossipSubParams(pubsubGossipParam()),
pubsub.WithRawTracer(gossipTracer{host: s.host}),
}
if len(s.cfg.StaticPeers) > 0 {
directPeersAddrInfos, err := parsePeersEnr(s.cfg.StaticPeers)
if err != nil {
log.WithError(err).Error("Could not add direct peer option")
return psOpts
}
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
}
return psOpts
}
// parsePeersEnr takes a list of raw ENRs and converts them into a list of AddrInfos.
func parsePeersEnr(peers []string) ([]peer.AddrInfo, error) {
addrs, err := PeersFromStringAddrs(peers)
if err != nil {
return nil, fmt.Errorf("Cannot convert peers raw ENRs into multiaddresses: %v", err)
}
if len(addrs) == 0 {
return nil, fmt.Errorf("Converting peers raw ENRs into multiaddresses resulted in an empty list")
}
directAddrInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
if err != nil {
return nil, fmt.Errorf("Cannot convert peers multiaddresses into AddrInfos: %v", err)
}
return directAddrInfos, nil
}
// creates a custom gossipsub parameter set.
func pubsubGossipParam() pubsub.GossipSubParams {
gParams := pubsub.DefaultGossipSubParams()
@@ -192,8 +165,7 @@ func pubsubGossipParam() pubsub.GossipSubParams {
// to configure our message id time-cache rather than instantiating
// it with a router instance.
func setPubSubParameters() {
seenTtl := 2 * time.Second * time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
pubsub.TimeCacheDuration = seenTtl
pubsub.TimeCacheDuration = 550 * gossipSubHeartbeatInterval
}
// convert from libp2p's internal schema to a compatible prysm protobuf format.

View File

@@ -24,7 +24,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
prysmnetwork "github.com/prysmaticlabs/prysm/v5/network"
@@ -83,76 +82,67 @@ type Service struct {
// NewService initializes a new p2p service compatible with shared.Service interface. No
// connections are made until the Start function is called during the service registry startup.
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
var err error
ctx, cancel := context.WithCancel(ctx)
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop().
cfg = validateConfig(cfg)
privKey, err := privKey(cfg)
if err != nil {
return nil, errors.Wrapf(err, "failed to generate p2p private key")
}
metaData, err := metaDataFromConfig(cfg)
if err != nil {
log.WithError(err).Error("Failed to create peer metadata")
return nil, err
}
addrFilter, err := configureFilter(cfg)
if err != nil {
log.WithError(err).Error("Failed to create address filter")
return nil, err
}
ipLimiter := leakybucket.NewCollector(ipLimit, ipBurst, 30*time.Second, true /* deleteEmptyBuckets */)
s := &Service{
ctx: ctx,
cancel: cancel,
cfg: cfg,
addrFilter: addrFilter,
ipLimiter: ipLimiter,
privKey: privKey,
metaData: metaData,
isPreGenesis: true,
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
subnetsLock: make(map[uint64]*sync.RWMutex),
}
s.cfg = validateConfig(s.cfg)
dv5Nodes := parseBootStrapAddrs(s.cfg.BootstrapNodeAddr)
cfg.Discv5BootStrapAddr = dv5Nodes
ipAddr := prysmnetwork.IPAddr()
opts, err := s.buildOptions(ipAddr, s.privKey)
s.privKey, err = privKey(s.cfg)
if err != nil {
return nil, errors.Wrapf(err, "failed to build p2p options")
log.WithError(err).Error("Failed to generate p2p private key")
return nil, err
}
s.metaData, err = metaDataFromConfig(s.cfg)
if err != nil {
log.WithError(err).Error("Failed to create peer metadata")
return nil, err
}
s.addrFilter, err = configureFilter(s.cfg)
if err != nil {
log.WithError(err).Error("Failed to create address filter")
return nil, err
}
s.ipLimiter = leakybucket.NewCollector(ipLimit, ipBurst, 30*time.Second, true /* deleteEmptyBuckets */)
// Sets mplex timeouts
configureMplex()
opts := s.buildOptions(ipAddr, s.privKey)
h, err := libp2p.New(opts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to create p2p host")
log.WithError(err).Error("Failed to create p2p host")
return nil, err
}
s.host = h
// Gossipsub registration is done before we add in any new peers
// due to libp2p's gossipsub implementation not taking into
// account previously added peers when creating the gossipsub
// object.
psOpts := s.pubsubOptions()
// Set the pubsub global parameters that we require.
setPubSubParameters()
// Reinitialize them in the event we are running a custom config.
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
gs, err := pubsub.NewGossipSub(s.ctx, s.host, psOpts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to create p2p pubsub")
log.WithError(err).Error("Failed to start pubsub")
return nil, err
}
s.pubsub = gs
s.peers = peers.NewStatus(ctx, &peers.StatusConfig{
@@ -217,7 +207,7 @@ func (s *Service) Start() {
if len(s.cfg.StaticPeers) > 0 {
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
if err != nil {
log.WithError(err).Error("could not convert ENR to multiaddr")
log.WithError(err).Error("Could not connect to static peer")
}
// Set trusted peers for those that are provided as static addresses.
pids := peerIdsFromMultiAddrs(addrs)
@@ -236,24 +226,11 @@ func (s *Service) Start() {
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
async.RunEvery(s.ctx, 1*time.Minute, func() {
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
outboundQUICCount := len(s.peers.OutboundConnectedWithProtocol(peers.QUIC))
outboundTCPCount := len(s.peers.OutboundConnectedWithProtocol(peers.TCP))
total := inboundQUICCount + inboundTCPCount + outboundQUICCount + outboundTCPCount
fields := logrus.Fields{
"inboundTCP": inboundTCPCount,
"outboundTCP": outboundTCPCount,
"total": total,
}
if features.Get().EnableQUIC {
fields["inboundQUIC"] = inboundQUICCount
fields["outboundQUIC"] = outboundQUICCount
}
log.WithFields(fields).Info("Connected peers")
log.WithFields(logrus.Fields{
"inbound": len(s.peers.InboundConnected()),
"outbound": len(s.peers.OutboundConnected()),
"activePeers": len(s.peers.Active()),
}).Info("Peer summary")
})
multiAddrs := s.host.Network().ListenAddresses()
@@ -261,10 +238,9 @@ func (s *Service) Start() {
p2pHostAddress := s.cfg.HostAddress
p2pTCPPort := s.cfg.TCPPort
p2pQUICPort := s.cfg.QUICPort
if p2pHostAddress != "" {
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort, p2pQUICPort)
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort)
verifyConnectivity(p2pHostAddress, p2pTCPPort, "tcp")
}
@@ -309,7 +285,7 @@ func (s *Service) Started() bool {
}
// Encoding returns the configured networking encoding.
func (*Service) Encoding() encoder.NetworkEncoding {
func (_ *Service) Encoding() encoder.NetworkEncoding {
return &encoder.SszNetworkEncoder{}
}
@@ -475,8 +451,8 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
}
func (s *Service) connectToBootnodes() error {
nodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
for _, addr := range s.cfg.Discv5BootStrapAddrs {
nodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddr))
for _, addr := range s.cfg.Discv5BootStrapAddr {
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
if err != nil {
return err

View File

@@ -102,9 +102,8 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
cs := startup.NewClockSynchronizer()
cfg := &Config{
TCPPort: 2000,
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
ClockWaiter: cs,
}
s, err := NewService(context.Background(), cfg)
@@ -148,9 +147,8 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
cs := startup.NewClockSynchronizer()
cfg := &Config{
TCPPort: 2000,
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
StateNotifier: &mock.MockStateNotifier{},
NoDiscovery: true, // <-- no s.dv5Listener is created
ClockWaiter: cs,
@@ -215,9 +213,10 @@ func TestListenForNewNodes(t *testing.T) {
// setup other nodes.
cs := startup.NewClockSynchronizer()
cfg = &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
MaxPeers: 30,
ClockWaiter: cs,
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
MaxPeers: 30,
ClockWaiter: cs,
}
for i := 1; i <= 5; i++ {
h, pkey, ipAddr := createHost(t, port+i)

View File

@@ -46,13 +46,9 @@ const syncLockerVal = 100
const blobSubnetLockerVal = 110
// FindPeersWithSubnet performs a network search for peers
// subscribed to a particular subnet. Then it tries to connect
// with those peers. This method will block until either:
// - the required amount of peers are found, or
// - the context is terminated.
// On some edge cases, this method may hang indefinitely while peers
// are actually found. In such a case, the user should cancel the context
// and re-run the method again.
// subscribed to a particular subnet. Then we try to connect
// with those peers. This method will block until the required amount of
// peers are found, the method only exits in the event of context timeouts.
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
index uint64, threshold int) (bool, error) {
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
@@ -77,9 +73,9 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
return false, errors.New("no subnet exists for provided topic")
}
currNum := len(s.pubsub.ListPeers(topic))
wg := new(sync.WaitGroup)
for {
currNum := len(s.pubsub.ListPeers(topic))
if currNum >= threshold {
break
}
@@ -93,11 +89,6 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
if err != nil {
continue
}
if info == nil {
continue
}
wg.Add(1)
go func() {
if err := s.connectWithPeer(ctx, *info); err != nil {
@@ -108,6 +99,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
}
// Wait for all dials to be completed.
wg.Wait()
currNum = len(s.pubsub.ListPeers(topic))
}
return true, nil
}
@@ -118,13 +110,18 @@ func (s *Service) filterPeerForAttSubnet(index uint64) func(node *enode.Node) bo
if !s.filterPeer(node) {
return false
}
subnets, err := attSubnets(node.Record())
if err != nil {
return false
}
return subnets[index]
indExists := false
for _, comIdx := range subnets {
if comIdx == index {
indExists = true
break
}
}
return indExists
}
}
@@ -208,10 +205,8 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
//
// return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)]
func computeSubscribedSubnets(nodeID enode.ID, epoch primitives.Epoch) ([]uint64, error) {
subnetsPerNode := params.BeaconConfig().SubnetsPerNode
subs := make([]uint64, 0, subnetsPerNode)
for i := uint64(0); i < subnetsPerNode; i++ {
subs := []uint64{}
for i := uint64(0); i < params.BeaconConfig().SubnetsPerNode; i++ {
sub, err := computeSubscribedSubnet(nodeID, epoch, i)
if err != nil {
return nil, err
@@ -286,20 +281,19 @@ func initializeSyncCommSubnets(node *enode.LocalNode) *enode.LocalNode {
// Reads the attestation subnets entry from a node's ENR and determines
// the committee indices of the attestation subnets the node is subscribed to.
func attSubnets(record *enr.Record) (map[uint64]bool, error) {
func attSubnets(record *enr.Record) ([]uint64, error) {
bitV, err := attBitvector(record)
if err != nil {
return nil, err
}
committeeIdxs := make(map[uint64]bool)
// lint:ignore uintcast -- subnet count can be safely cast to int.
if len(bitV) != byteCount(int(attestationSubnetCount)) {
return committeeIdxs, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
}
var committeeIdxs []uint64
for i := uint64(0); i < attestationSubnetCount; i++ {
if bitV.BitAt(i) {
committeeIdxs[i] = true
committeeIdxs = append(committeeIdxs, i)
}
}
return committeeIdxs, nil

View File

@@ -3,46 +3,49 @@ package p2p
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
// Topology of this test:
//
//
// Node 1 (subscribed to subnet 1) --\
// |
// Node 2 (subscribed to subnet 2) --+--> BootNode (not subscribed to any subnet) <------- Node 0 (not subscribed to any subnet)
// |
// Node 3 (subscribed to subnet 3) --/
//
// The purpose of this test is to ensure that the "Node 0" (connected only to the boot node) is able to
// find and connect to a node already subscribed to a specific subnet.
// In our case: The node i is subscribed to subnet i, with i = 1, 2, 3
// Define the genesis validators root, to ensure everybody is on the same network.
const genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
genesisValidatorsRoot, err := hex.DecodeString(genesisValidatorRootStr[2:])
func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
params.SetupTestConfigCleanup(t)
// This test needs to be entirely rewritten and should be done in a follow up PR from #7885.
t.Skip("This test is now failing after PR 7885 due to false positive")
gFlags := new(flags.GlobalFlags)
gFlags.MinimumPeersPerSubnet = 4
flags.Init(gFlags)
// Reset config.
defer flags.Init(new(flags.GlobalFlags))
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
s := &Service{
cfg: &Config{UDPPort: uint(port)},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
// Create a context.
ctx := context.Background()
bootNode := bootListener.Self()
// Use shorter period for testing.
currentPeriod := pollingPeriod
pollingPeriod = 1 * time.Second
@@ -50,152 +53,113 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
pollingPeriod = currentPeriod
}()
// Create flags.
params.SetupTestConfigCleanup(t)
gFlags := new(flags.GlobalFlags)
gFlags.MinimumPeersPerSubnet = 1
flags.Init(gFlags)
params.BeaconNetworkConfig().MinimumPeersInSubnetSearch = 1
// Reset config.
defer flags.Init(new(flags.GlobalFlags))
// First, generate a bootstrap node.
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
bootNodeService := &Service{
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
bootNodeForkDigest, err := bootNodeService.currentForkDigest()
require.NoError(t, err)
bootListener, err := bootNodeService.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
bootNodeENR := bootListener.Self().String()
// Create 3 nodes, each subscribed to a different subnet.
// Each node is connected to the boostrap node.
services := make([]*Service, 0, 3)
var listeners []*discover.UDPv5
for i := 1; i <= 3; i++ {
subnet := uint64(i)
service, err := NewService(ctx, &Config{
Discv5BootStrapAddrs: []string{bootNodeENR},
MaxPeers: 30,
UDPPort: uint(2000 + i),
TCPPort: uint(3000 + i),
QUICPort: uint(3000 + i),
})
require.NoError(t, err)
service.genesisTime = genesisTime
service.genesisValidatorsRoot = genesisValidatorsRoot
nodeForkDigest, err := service.currentForkDigest()
require.NoError(t, err)
require.Equal(t, true, nodeForkDigest == bootNodeForkDigest, "fork digest of the node doesn't match the boot node")
// Start the service.
service.Start()
// Set the ENR `attnets`, used by Prysm to filter peers by subnet.
port = 3000 + i
cfg := &Config{
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
MaxPeers: 30,
UDPPort: uint(port),
}
ipAddr, pkey := createAddrAndPrivKey(t)
s = &Service{
cfg: cfg,
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
listener, err := s.startDiscoveryV5(ipAddr, pkey)
assert.NoError(t, err, "Could not start discovery for node")
bitV := bitfield.NewBitvector64()
bitV.SetBitAt(subnet, true)
bitV.SetBitAt(uint64(i), true)
entry := enr.WithEntry(attSubnetEnrKey, &bitV)
service.dv5Listener.LocalNode().Set(entry)
// Join and subscribe to the subnet, needed by libp2p.
topic, err := service.pubsub.Join(fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet) + "/ssz_snappy")
require.NoError(t, err)
_, err = topic.Subscribe()
require.NoError(t, err)
// Memoize the service.
services = append(services, service)
listener.LocalNode().Set(entry)
listeners = append(listeners, listener)
}
// Stop the services.
defer func() {
for _, service := range services {
err := service.Stop()
require.NoError(t, err)
// Close down all peers.
for _, listener := range listeners {
listener.Close()
}
}()
// Make one service on port 4001.
port = 4001
gs := startup.NewClockSynchronizer()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNodeENR},
MaxPeers: 30,
UDPPort: 2010,
TCPPort: 3010,
QUICPort: 3010,
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
MaxPeers: 30,
UDPPort: uint(port),
ClockWaiter: gs,
}
service, err := NewService(ctx, cfg)
s, err = NewService(context.Background(), cfg)
require.NoError(t, err)
service.genesisTime = genesisTime
service.genesisValidatorsRoot = genesisValidatorsRoot
service.Start()
defer func() {
err := service.Stop()
require.NoError(t, err)
exitRoutine := make(chan bool)
go func() {
s.Start()
<-exitRoutine
}()
time.Sleep(50 * time.Millisecond)
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
var vr [32]byte
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
// Look up 3 different subnets.
exists := make([]bool, 0, 3)
for i := 1; i <= 3; i++ {
subnet := uint64(i)
topic := fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet)
exist := false
// This for loop is used to ensure we don't get stuck in `FindPeersWithSubnet`.
// Read the documentation of `FindPeersWithSubnet` for more details.
for j := 0; j < 3; j++ {
ctxWithTimeOut, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
exist, err = service.FindPeersWithSubnet(ctxWithTimeOut, topic, subnet, 1)
require.NoError(t, err)
if exist {
break
}
}
require.NoError(t, err)
exists = append(exists, exist)
// Wait for the nodes to have their local routing tables to be populated with the other nodes
time.Sleep(6 * discoveryWaitTime)
// look up 3 different subnets
ctx := context.Background()
exists, err := s.FindPeersWithSubnet(ctx, "", 1, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
exists2, err := s.FindPeersWithSubnet(ctx, "", 2, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
exists3, err := s.FindPeersWithSubnet(ctx, "", 3, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
if !exists || !exists2 || !exists3 {
t.Fatal("Peer with subnet doesn't exist")
}
// Check if all peers are found.
for _, exist := range exists {
require.Equal(t, true, exist, "Peer with subnet doesn't exist")
// Update ENR of a peer.
testService := &Service{
dv5Listener: listeners[0],
metaData: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
Attnets: bitfield.NewBitvector64(),
}),
}
cache.SubnetIDs.AddAttesterSubnetID(0, 10)
testService.RefreshENR()
time.Sleep(2 * time.Second)
exists, err = s.FindPeersWithSubnet(ctx, "", 2, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
assert.Equal(t, true, exists, "Peer with subnet doesn't exist")
assert.NoError(t, s.Stop())
exitRoutine <- true
}
func Test_AttSubnets(t *testing.T) {
params.SetupTestConfigCleanup(t)
tests := []struct {
name string
record func(localNode *enode.LocalNode) *enr.Record
record func(t *testing.T) *enr.Record
want []uint64
wantErr bool
errContains string
}{
{
name: "valid record",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
localNode = initializeAttSubnets(localNode)
return localNode.Node().Record()
},
@@ -204,7 +168,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "too small subnet",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
entry := enr.WithEntry(attSubnetEnrKey, []byte{})
localNode.Set(entry)
return localNode.Node().Record()
@@ -215,7 +186,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "half sized subnet",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, 4))
localNode.Set(entry)
return localNode.Node().Record()
@@ -226,7 +204,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "too large subnet",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, byteCount(int(attestationSubnetCount))+1))
localNode.Set(entry)
return localNode.Node().Record()
@@ -237,7 +222,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "very large subnet",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, byteCount(int(attestationSubnetCount))+100))
localNode.Set(entry)
return localNode.Node().Record()
@@ -248,7 +240,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "single subnet",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
bitV := bitfield.NewBitvector64()
bitV.SetBitAt(0, true)
entry := enr.WithEntry(attSubnetEnrKey, bitV.Bytes())
@@ -260,10 +259,17 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "multiple subnets",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
bitV := bitfield.NewBitvector64()
for i := uint64(0); i < bitV.Len(); i++ {
// Keep only odd subnets.
// skip 2 subnets
if (i+1)%2 == 0 {
continue
}
@@ -281,7 +287,14 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "all subnets",
record: func(localNode *enode.LocalNode) *enr.Record {
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
bitV := bitfield.NewBitvector64()
for i := uint64(0); i < bitV.Len(); i++ {
bitV.SetBitAt(i, true)
@@ -298,35 +311,16 @@ func Test_AttSubnets(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record := tt.record(localNode)
got, err := attSubnets(record)
got, err := attSubnets(tt.record(t))
if (err != nil) != tt.wantErr {
t.Errorf("syncSubnets() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr {
assert.ErrorContains(t, tt.errContains, err)
}
want := make(map[uint64]bool, len(tt.want))
for _, subnet := range tt.want {
want[subnet] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("syncSubnets() got = %v, want %v", got, want)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("syncSubnets() got = %v, want %v", got, tt.want)
}
})
}

Some files were not shown because too many files have changed in this diff Show More