mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
46 Commits
forkchoice
...
v5.0.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b3053dc96a | ||
|
|
3d2230223f | ||
|
|
b008a6422d | ||
|
|
d19365507f | ||
|
|
c05e39a668 | ||
|
|
63c2b3563a | ||
|
|
a6e86c6731 | ||
|
|
32fb183392 | ||
|
|
cade09ba0b | ||
|
|
f85ddfe265 | ||
|
|
3b97094ea4 | ||
|
|
acdbf7c491 | ||
|
|
1cc1effd75 | ||
|
|
f7f1d249f2 | ||
|
|
02abb3e3c0 | ||
|
|
2255c8b287 | ||
|
|
27ecf448a7 | ||
|
|
e243f04e44 | ||
|
|
fca1adbad7 | ||
|
|
b692722ddf | ||
|
|
c4f6020677 | ||
|
|
d779e65d4e | ||
|
|
357211b7d9 | ||
|
|
2dd48343a2 | ||
|
|
7f931bf65b | ||
|
|
fda4589251 | ||
|
|
34593d34d4 | ||
|
|
4d18e590ed | ||
|
|
ec8b67cb12 | ||
|
|
a817aa0a8d | ||
|
|
d76f55e97a | ||
|
|
2de21eb22f | ||
|
|
58b8c31c93 | ||
|
|
f343333880 | ||
|
|
8e0b1b7e1f | ||
|
|
65f71b3a48 | ||
|
|
9fcb9b86af | ||
|
|
aa63c4e7f2 | ||
|
|
d6ae838bbf | ||
|
|
d49afb370c | ||
|
|
4d3a6d84d2 | ||
|
|
9c5d16e161 | ||
|
|
4731304187 | ||
|
|
02cbcf8545 | ||
|
|
4e10734ae4 | ||
|
|
e19c99c3e2 |
6
.bazelrc
6
.bazelrc
@@ -6,6 +6,12 @@ import %workspace%/build/bazelrc/debug.bazelrc
|
||||
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
|
||||
import %workspace%/build/bazelrc/performance.bazelrc
|
||||
|
||||
# hermetic_cc_toolchain v3.0.1 required changes.
|
||||
common --enable_platform_specific_config
|
||||
build:linux --sandbox_add_mount_pair=/tmp
|
||||
build:macos --sandbox_add_mount_pair=/var/tmp
|
||||
build:windows --sandbox_add_mount_pair=C:\Temp
|
||||
|
||||
# E2E run with debug gotag
|
||||
test:e2e --define gotags=debug
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
7.0.0
|
||||
7.1.0
|
||||
|
||||
821
MODULE.bazel.lock
generated
821
MODULE.bazel.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.4.0)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
|
||||
[](https://discord.gg/prysmaticlabs)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
48
WORKSPACE
48
WORKSPACE
@@ -16,12 +16,14 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
|
||||
|
||||
http_archive(
|
||||
name = "hermetic_cc_toolchain",
|
||||
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
|
||||
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
|
||||
sha256 = "3bc6ec127622fdceb4129cb06b6f7ab098c4d539124dde96a6318e7c32a53f7a",
|
||||
urls = [
|
||||
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
|
||||
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -81,10 +83,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
|
||||
integrity = "sha256-MpOL2hbmcABjA1R5Bj2dJMYO2o15/Uc5Vj9Q0zHLMgk=",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -128,9 +130,9 @@ aspect_bazel_lib_register_toolchains()
|
||||
|
||||
http_archive(
|
||||
name = "rules_oci",
|
||||
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
|
||||
strip_prefix = "rules_oci-1.3.4",
|
||||
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
|
||||
sha256 = "4a276e9566c03491649eef63f27c2816cc222f41ccdebd97d2c5159e84917c3b",
|
||||
strip_prefix = "rules_oci-1.7.4",
|
||||
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.7.4/rules_oci-v1.7.4.tar.gz",
|
||||
)
|
||||
|
||||
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
|
||||
@@ -151,17 +153,13 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "d6ab6b57e48c09523e93050f13698f708428cfd5e619252e369d377af6597707",
|
||||
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
load("//:distroless_deps.bzl", "distroless_deps")
|
||||
|
||||
distroless_deps()
|
||||
|
||||
# Override default import in rules_go with special patch until
|
||||
# https://github.com/gogo/protobuf/pull/582 is merged.
|
||||
git_repository(
|
||||
@@ -200,10 +198,14 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.21.6",
|
||||
go_version = "1.21.8",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
load("//:distroless_deps.bzl", "distroless_deps")
|
||||
|
||||
distroless_deps()
|
||||
|
||||
http_archive(
|
||||
name = "io_kubernetes_build",
|
||||
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
|
||||
@@ -241,9 +243,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.4.0-beta.7"
|
||||
|
||||
consensus_spec_test_version = "v1.4.0-beta.7-hotfix"
|
||||
consensus_spec_version = "v1.4.0"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -260,7 +260,7 @@ filegroup(
|
||||
)
|
||||
""",
|
||||
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -276,7 +276,7 @@ filegroup(
|
||||
)
|
||||
""",
|
||||
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -292,7 +292,7 @@ filegroup(
|
||||
)
|
||||
""",
|
||||
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -306,7 +306,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "049c29267310e6b88280f4f834a75866c2f5b9036fa97acb9d9c6db8f64d9118",
|
||||
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -6,12 +6,14 @@ go_library(
|
||||
"checkpoint.go",
|
||||
"client.go",
|
||||
"doc.go",
|
||||
"health.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon/iface:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -37,10 +39,12 @@ go_test(
|
||||
srcs = [
|
||||
"checkpoint_test.go",
|
||||
"client_test.go",
|
||||
"health_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon/testing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -54,5 +58,6 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
55
api/client/beacon/health.go
Normal file
55
api/client/beacon/health.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
|
||||
)
|
||||
|
||||
type NodeHealthTracker struct {
|
||||
isHealthy *bool
|
||||
healthChan chan bool
|
||||
node iface.HealthNode
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewNodeHealthTracker(node iface.HealthNode) *NodeHealthTracker {
|
||||
return &NodeHealthTracker{
|
||||
node: node,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// HealthUpdates provides a read-only channel for health updates.
|
||||
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
|
||||
return n.healthChan
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) IsHealthy() bool {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
if n.isHealthy == nil {
|
||||
return false
|
||||
}
|
||||
return *n.isHealthy
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
n.RLock()
|
||||
newStatus := n.node.IsHealthy(ctx)
|
||||
if n.isHealthy == nil {
|
||||
n.isHealthy = &newStatus
|
||||
}
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
n.RUnlock()
|
||||
|
||||
if isStatusChanged {
|
||||
n.Lock()
|
||||
// Double-check the condition to ensure it hasn't changed since the first check.
|
||||
n.isHealthy = &newStatus
|
||||
n.Unlock() // It's better to unlock as soon as the protected section is over.
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
return newStatus
|
||||
}
|
||||
118
api/client/beacon/health_test.go
Normal file
118
api/client/beacon/health_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
healthTesting "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestNodeHealth_IsHealthy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
isHealthy bool
|
||||
want bool
|
||||
}{
|
||||
{"initially healthy", true, true},
|
||||
{"initially unhealthy", false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.isHealthy,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
if got := n.IsHealthy(); got != tt.want {
|
||||
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
initial bool // Initial health status
|
||||
newStatus bool // Status to update to
|
||||
shouldSend bool // Should a message be sent through the channel
|
||||
}{
|
||||
{"healthy to unhealthy", true, false, true},
|
||||
{"unhealthy to healthy", false, true, true},
|
||||
{"remain healthy", true, true, false},
|
||||
{"remain unhealthy", false, false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := healthTesting.NewMockHealthClient(ctrl)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.initial,
|
||||
node: client,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
|
||||
s := n.CheckHealth(context.Background())
|
||||
// Check if health status was updated
|
||||
if s != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
|
||||
}
|
||||
|
||||
select {
|
||||
case status := <-n.HealthUpdates():
|
||||
if !tt.shouldSend {
|
||||
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
|
||||
} else if status != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
|
||||
}
|
||||
default:
|
||||
if tt.shouldSend {
|
||||
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := healthTesting.NewMockHealthClient(ctrl)
|
||||
n := NewNodeHealthTracker(client)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Number of goroutines to spawn for both reading and writing
|
||||
numGoroutines := 6
|
||||
|
||||
go func() {
|
||||
for range n.HealthUpdates() {
|
||||
// Consume values to avoid blocking on channel send.
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(numGoroutines * 2) // for readers and writers
|
||||
|
||||
// Concurrently update health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false)
|
||||
n.CheckHealth(context.Background())
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true)
|
||||
n.CheckHealth(context.Background())
|
||||
}()
|
||||
}
|
||||
|
||||
// Concurrently read health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = n.IsHealthy() // Just read the value
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait() // Wait for all goroutines to finish
|
||||
}
|
||||
8
api/client/beacon/iface/BUILD.bazel
Normal file
8
api/client/beacon/iface/BUILD.bazel
Normal file
@@ -0,0 +1,8 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["health.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
13
api/client/beacon/iface/health.go
Normal file
13
api/client/beacon/iface/health.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package iface
|
||||
|
||||
import "context"
|
||||
|
||||
type HealthTracker interface {
|
||||
HealthUpdates() <-chan bool
|
||||
IsHealthy() bool
|
||||
CheckHealth(ctx context.Context) bool
|
||||
}
|
||||
|
||||
type HealthNode interface {
|
||||
IsHealthy(ctx context.Context) bool
|
||||
}
|
||||
12
api/client/beacon/testing/BUILD.bazel
Normal file
12
api/client/beacon/testing/BUILD.bazel
Normal file
@@ -0,0 +1,12 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["mock.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/beacon/iface:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
53
api/client/beacon/testing/mock.go
Normal file
53
api/client/beacon/testing/mock.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = iface.HealthNode(&MockHealthClient{})
|
||||
)
|
||||
|
||||
// MockHealthClient is a mock of HealthClient interface.
|
||||
type MockHealthClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockHealthClientMockRecorder
|
||||
}
|
||||
|
||||
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
|
||||
type MockHealthClientMockRecorder struct {
|
||||
mock *MockHealthClient
|
||||
}
|
||||
|
||||
// IsHealthy mocks base method.
|
||||
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsHealthy", arg0)
|
||||
ret0, ok := ret[0].(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return ret0
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// IsHealthy indicates an expected call of IsHealthy.
|
||||
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
|
||||
}
|
||||
|
||||
// NewMockHealthClient creates a new mock instance.
|
||||
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
|
||||
mock := &MockHealthClient{ctrl: ctrl}
|
||||
mock.recorder = &MockHealthClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
@@ -304,6 +304,8 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
}
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Accept", "application/json")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
@@ -341,6 +343,8 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
}
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Accept", "application/json")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
@@ -379,6 +383,8 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Accept", "application/json")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
if err != nil {
|
||||
|
||||
@@ -321,6 +321,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||
@@ -347,6 +349,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
|
||||
@@ -376,6 +380,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
var req structs.SignedBlindedBeaconBlockDeneb
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -21,6 +21,9 @@ var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
||||
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
|
||||
var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
|
||||
// ErrConnectionIssue represents a connection problem.
|
||||
var ErrConnectionIssue = errors.New("could not connect")
|
||||
|
||||
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
|
||||
func Non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
|
||||
24
api/client/event/BUILD.bazel
Normal file
24
api/client/event/BUILD.bazel
Normal file
@@ -0,0 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["event_stream.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/event",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["event_stream_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
148
api/client/event/event_stream.go
Normal file
148
api/client/event/event_stream.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
EventHead = "head"
|
||||
EventBlock = "block"
|
||||
EventAttestation = "attestation"
|
||||
EventVoluntaryExit = "voluntary_exit"
|
||||
EventBlsToExecutionChange = "bls_to_execution_change"
|
||||
EventProposerSlashing = "proposer_slashing"
|
||||
EventAttesterSlashing = "attester_slashing"
|
||||
EventFinalizedCheckpoint = "finalized_checkpoint"
|
||||
EventChainReorg = "chain_reorg"
|
||||
EventContributionAndProof = "contribution_and_proof"
|
||||
EventLightClientFinalityUpdate = "light_client_finality_update"
|
||||
EventLightClientOptimisticUpdate = "light_client_optimistic_update"
|
||||
EventPayloadAttributes = "payload_attributes"
|
||||
EventBlobSidecar = "blob_sidecar"
|
||||
EventError = "error"
|
||||
EventConnectionError = "connection_error"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = EventStreamClient(&EventStream{})
|
||||
)
|
||||
|
||||
var DefaultEventTopics = []string{EventHead}
|
||||
|
||||
type EventStreamClient interface {
|
||||
Subscribe(eventsChannel chan<- *Event)
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
EventType string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// EventStream is responsible for subscribing to the Beacon API events endpoint
|
||||
// and dispatching received events to subscribers.
|
||||
type EventStream struct {
|
||||
ctx context.Context
|
||||
httpClient *http.Client
|
||||
host string
|
||||
topics []string
|
||||
}
|
||||
|
||||
func NewEventStream(ctx context.Context, httpClient *http.Client, host string, topics []string) (*EventStream, error) {
|
||||
// Check if the host is a valid URL
|
||||
_, err := url.ParseRequestURI(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(topics) == 0 {
|
||||
return nil, errors.New("no topics provided")
|
||||
}
|
||||
|
||||
return &EventStream{
|
||||
ctx: ctx,
|
||||
httpClient: httpClient,
|
||||
host: host,
|
||||
topics: topics,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
allTopics := strings.Join(h.topics, ",")
|
||||
log.WithField("topics", allTopics).Info("Listening to Beacon API events")
|
||||
fullUrl := h.host + "/eth/v1/events?topics=" + allTopics
|
||||
req, err := http.NewRequestWithContext(h.ctx, http.MethodGet, fullUrl, nil)
|
||||
if err != nil {
|
||||
eventsChannel <- &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
|
||||
}
|
||||
}
|
||||
req.Header.Set("Accept", api.EventStreamMediaType)
|
||||
req.Header.Set("Connection", api.KeepAlive)
|
||||
resp, err := h.httpClient.Do(req)
|
||||
if err != nil {
|
||||
eventsChannel <- &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if closeErr := resp.Body.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Error("Failed to close events response body")
|
||||
}
|
||||
}()
|
||||
// Create a new scanner to read lines from the response body
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
|
||||
var eventType, data string // Variables to store event type and data
|
||||
|
||||
// Iterate over lines of the event stream
|
||||
for scanner.Scan() {
|
||||
select {
|
||||
case <-h.ctx.Done():
|
||||
log.Info("Context canceled, stopping event stream")
|
||||
close(eventsChannel)
|
||||
return
|
||||
default:
|
||||
line := scanner.Text() // TODO(13730): scanner does not handle /r and does not fully adhere to https://html.spec.whatwg.org/multipage/server-sent-events.html#the-eventsource-interface
|
||||
// Handle the event based on your specific format
|
||||
if line == "" {
|
||||
// Empty line indicates the end of an event
|
||||
if eventType != "" && data != "" {
|
||||
// Process the event when both eventType and data are set
|
||||
eventsChannel <- &Event{EventType: eventType, Data: []byte(data)}
|
||||
}
|
||||
|
||||
// Reset eventType and data for the next event
|
||||
eventType, data = "", ""
|
||||
continue
|
||||
}
|
||||
et, ok := strings.CutPrefix(line, "event: ")
|
||||
if ok {
|
||||
// Extract event type from the "event" field
|
||||
eventType = et
|
||||
}
|
||||
d, ok := strings.CutPrefix(line, "data: ")
|
||||
if ok {
|
||||
// Extract data from the "data" field
|
||||
data = d
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
eventsChannel <- &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, errors.Wrap(client.ErrConnectionIssue, "scanner failed").Error()).Error()),
|
||||
}
|
||||
}
|
||||
}
|
||||
80
api/client/event/event_stream_test.go
Normal file
80
api/client/event/event_stream_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestNewEventStream(t *testing.T) {
|
||||
validURL := "http://localhost:8080"
|
||||
invalidURL := "://invalid"
|
||||
topics := []string{"topic1", "topic2"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
host string
|
||||
topics []string
|
||||
wantErr bool
|
||||
}{
|
||||
{"Valid input", validURL, topics, false},
|
||||
{"Invalid URL", invalidURL, topics, true},
|
||||
{"No topics", validURL, []string{}, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventStream(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
|
||||
flusher, ok := w.(http.Flusher)
|
||||
require.Equal(t, true, ok)
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := fmt.Fprintf(w, "event: head\ndata: data%d\n\n", i)
|
||||
require.NoError(t, err)
|
||||
flusher.Flush() // Trigger flush to simulate streaming data
|
||||
time.Sleep(100 * time.Millisecond) // Simulate delay between events
|
||||
}
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics)
|
||||
require.NoError(t, err)
|
||||
go stream.Subscribe(eventsChannel)
|
||||
|
||||
// Collect events
|
||||
var events []*Event
|
||||
|
||||
for len(events) != 2 {
|
||||
select {
|
||||
case event := <-eventsChannel:
|
||||
log.Info(event)
|
||||
events = append(events, event)
|
||||
}
|
||||
}
|
||||
|
||||
// Assertions to verify the events content
|
||||
expectedData := []string{"data1", "data2"}
|
||||
for i, event := range events {
|
||||
if string(event.Data) != expectedData[i] {
|
||||
t.Errorf("Expected event data %q, got %q", expectedData[i], string(event.Data))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -563,3 +563,9 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
|
||||
func (s *Service) inRegularSync() bool {
|
||||
return s.cfg.SyncChecker.Synced()
|
||||
}
|
||||
|
||||
// validating returns true if the beacon is tracking some validators that have
|
||||
// registered for proposing.
|
||||
func (s *Service) validating() bool {
|
||||
return s.cfg.TrackedValidatorsCache.Validating()
|
||||
}
|
||||
|
||||
@@ -82,19 +82,20 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
if level >= logrus.DebugLevel {
|
||||
parentRoot := block.ParentRoot()
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
|
||||
@@ -18,17 +18,63 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch {
|
||||
return nil
|
||||
}
|
||||
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
return nil
|
||||
}
|
||||
if c.Epoch == headEpoch {
|
||||
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(targetSlot)+1 < headEpoch {
|
||||
return nil
|
||||
}
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return st
|
||||
}
|
||||
slot, err := slots.EpochStart(c.Epoch)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// Try if we have already set the checkpoint cache
|
||||
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
||||
lock := async.NewMultilock(string(c.Root) + epochKey)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
return cachedState
|
||||
}
|
||||
st, err := s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, c.Root, slot)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, st); err != nil {
|
||||
return nil
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
||||
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch == headEpoch {
|
||||
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
|
||||
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
|
||||
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
return s.HeadStateReadOnly(ctx)
|
||||
}
|
||||
}
|
||||
if st := s.getRecentPreState(ctx, c); st != nil {
|
||||
return st, nil
|
||||
}
|
||||
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
|
||||
// allowing us to behave smarter in terms of how this function is used concurrently.
|
||||
|
||||
@@ -146,6 +146,28 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := context.Background()
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
}
|
||||
|
||||
func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
@@ -558,6 +559,20 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// The gossip handler for blobs writes the index of each verified blob referencing the given
|
||||
// root to the channel returned by blobNotifiers.forRoot.
|
||||
nc := s.blobNotifiers.forRoot(root)
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case idx := <-nc:
|
||||
@@ -571,11 +586,20 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
s.blobNotifiers.delete(root)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
}
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
|
||||
@@ -95,7 +95,9 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
|
||||
@@ -290,18 +290,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if params.BeaconConfig().ConfigName != params.PraterName {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
|
||||
8
beacon-chain/cache/skip_slot_cache.go
vendored
8
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -109,10 +109,6 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
|
||||
// MarkInProgress a request so that any other similar requests will block on
|
||||
// Get until MarkNotInProgress is called.
|
||||
func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
|
||||
if c.disabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
@@ -126,10 +122,6 @@ func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
|
||||
if c.disabled {
|
||||
return
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
|
||||
26
beacon-chain/cache/skip_slot_cache_test.go
vendored
26
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -2,6 +2,7 @@ package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
@@ -35,3 +36,28 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, res.ToProto(), s.ToProto(), "Expected equal protos to return from cache")
|
||||
}
|
||||
|
||||
func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := cache.NewSkipSlotCache()
|
||||
|
||||
r := [32]byte{'a'}
|
||||
c.Disable()
|
||||
|
||||
require.NoError(t, c.MarkInProgress(r))
|
||||
|
||||
c.Enable()
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
// Get call will only terminate when
|
||||
// it is not longer in progress.
|
||||
obj, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, obj)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
c.MarkNotInProgress(r)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
6
beacon-chain/cache/tracked_validators.go
vendored
6
beacon-chain/cache/tracked_validators.go
vendored
@@ -41,3 +41,9 @@ func (t *TrackedValidatorsCache) Prune() {
|
||||
defer t.Unlock()
|
||||
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Validating() bool {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
return len(t.trackedValidators) > 0
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
|
||||
@@ -224,7 +224,7 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
if b := bkt.Get(root[:]); b != nil {
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
return ErrDeleteFinalized
|
||||
}
|
||||
|
||||
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {
|
||||
|
||||
@@ -289,7 +289,7 @@ func TestStore_DeleteBlock(t *testing.T) {
|
||||
require.Equal(t, b, nil)
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, root2))
|
||||
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
|
||||
}
|
||||
|
||||
func TestStore_DeleteJustifiedBlock(t *testing.T) {
|
||||
@@ -309,7 +309,7 @@ func TestStore_DeleteJustifiedBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
|
||||
}
|
||||
|
||||
func TestStore_DeleteFinalizedBlock(t *testing.T) {
|
||||
@@ -329,7 +329,7 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
|
||||
}
|
||||
func TestStore_GenesisBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
@@ -2,8 +2,8 @@ package kv
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
|
||||
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
|
||||
// ErrDeleteFinalized is raised when we attempt to delete a finalized block/state
|
||||
var ErrDeleteFinalized = errors.New("cannot delete finalized block or state")
|
||||
|
||||
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
|
||||
// indicate that a value couldn't be found.
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -29,72 +28,76 @@ var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to
|
||||
// beacon block chain using the finalized root alone as this would exclude all other blocks in the
|
||||
// finalized epoch from being indexed as "final and canonical".
|
||||
//
|
||||
// The algorithm for building the index works as follows:
|
||||
// - De-index all finalized beacon block roots from previous_finalized_epoch to
|
||||
// new_finalized_epoch. (I.e. delete these roots from the index, to be re-indexed.)
|
||||
// - Build the canonical finalized chain by walking up the ancestry chain from the finalized block
|
||||
// root until a parent is found in the index, or the parent is genesis or the origin checkpoint.
|
||||
// - Add all block roots in the database where epoch(block.slot) == checkpoint.epoch.
|
||||
//
|
||||
// This method ensures that all blocks from the current finalized epoch are considered "final" while
|
||||
// maintaining only canonical and finalized blocks older than the current finalized epoch.
|
||||
// The main part of the algorithm traverses parent->child block relationships in the
|
||||
// `blockParentRootIndicesBucket` bucket to find the path between the last finalized checkpoint
|
||||
// and the current finalized checkpoint. It relies on the invariant that there is a unique path
|
||||
// between two finalized checkpoints.
|
||||
func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, checkpoint *ethpb.Checkpoint) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.updateFinalizedBlockRoots")
|
||||
defer span.End()
|
||||
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
|
||||
root := checkpoint.Root
|
||||
var previousRoot []byte
|
||||
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
|
||||
initCheckpointRoot := tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)
|
||||
|
||||
// De-index recent finalized block roots, to be re-indexed.
|
||||
finalizedBkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
previousFinalizedCheckpoint := ðpb.Checkpoint{}
|
||||
if b := bkt.Get(previousFinalizedCheckpointKey); b != nil {
|
||||
if b := finalizedBkt.Get(previousFinalizedCheckpointKey); b != nil {
|
||||
if err := decode(ctx, b, previousFinalizedCheckpoint); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
blockRoots, err := s.BlockRoots(ctx, filters.NewFilter().
|
||||
SetStartEpoch(previousFinalizedCheckpoint.Epoch).
|
||||
SetEndEpoch(checkpoint.Epoch+1),
|
||||
)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
for _, root := range blockRoots {
|
||||
if err := bkt.Delete(root[:]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Walk up the ancestry chain until we reach a block root present in the finalized block roots
|
||||
// index bucket or genesis block root.
|
||||
for {
|
||||
if bytes.Equal(root, genesisRoot) {
|
||||
break
|
||||
}
|
||||
|
||||
signedBlock, err := s.Block(ctx, bytesutil.ToBytes32(root))
|
||||
// Handle the case of checkpoint sync.
|
||||
if previousFinalizedCheckpoint.Root == nil && bytes.Equal(checkpoint.Root, tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)) {
|
||||
container := ðpb.FinalizedBlockRootContainer{}
|
||||
enc, err := encode(ctx, container)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(signedBlock); err != nil {
|
||||
if err = finalizedBkt.Put(checkpoint.Root, enc); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
block := signedBlock.Block()
|
||||
return updatePrevFinalizedCheckpoint(ctx, span, finalizedBkt, checkpoint)
|
||||
}
|
||||
|
||||
parentRoot := block.ParentRoot()
|
||||
container := ðpb.FinalizedBlockRootContainer{
|
||||
ParentRoot: parentRoot[:],
|
||||
ChildRoot: previousRoot,
|
||||
var finalized [][]byte
|
||||
if previousFinalizedCheckpoint.Root == nil {
|
||||
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
|
||||
_, finalized = pathToFinalizedCheckpoint(ctx, [][]byte{genesisRoot}, checkpoint.Root, tx)
|
||||
} else {
|
||||
if err := updateChildOfPrevFinalizedCheckpoint(
|
||||
ctx,
|
||||
span,
|
||||
finalizedBkt,
|
||||
tx.Bucket(blockParentRootIndicesBucket), previousFinalizedCheckpoint.Root,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
_, finalized = pathToFinalizedCheckpoint(ctx, [][]byte{previousFinalizedCheckpoint.Root}, checkpoint.Root, tx)
|
||||
}
|
||||
|
||||
for i, r := range finalized {
|
||||
var container *ethpb.FinalizedBlockRootContainer
|
||||
switch i {
|
||||
case 0:
|
||||
container = ðpb.FinalizedBlockRootContainer{
|
||||
ParentRoot: previousFinalizedCheckpoint.Root,
|
||||
}
|
||||
if len(finalized) > 1 {
|
||||
container.ChildRoot = finalized[i+1]
|
||||
}
|
||||
case len(finalized) - 1:
|
||||
// We don't know the finalized child of the new finalized checkpoint.
|
||||
// It will be filled out in the next function call.
|
||||
container = ðpb.FinalizedBlockRootContainer{}
|
||||
if len(finalized) > 1 {
|
||||
container.ParentRoot = finalized[i-1]
|
||||
}
|
||||
default:
|
||||
container = ðpb.FinalizedBlockRootContainer{
|
||||
ParentRoot: finalized[i-1],
|
||||
ChildRoot: finalized[i+1],
|
||||
}
|
||||
}
|
||||
|
||||
enc, err := encode(ctx, container)
|
||||
@@ -102,66 +105,13 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err := bkt.Put(root, enc); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// breaking here allows the initial checkpoint root to be correctly inserted,
|
||||
// but stops the loop from trying to search for its parent.
|
||||
if bytes.Equal(root, initCheckpointRoot) {
|
||||
break
|
||||
}
|
||||
|
||||
// Found parent, loop exit condition.
|
||||
pr := block.ParentRoot()
|
||||
if parentBytes := bkt.Get(pr[:]); parentBytes != nil {
|
||||
parent := ðpb.FinalizedBlockRootContainer{}
|
||||
if err := decode(ctx, parentBytes, parent); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
parent.ChildRoot = root
|
||||
enc, err := encode(ctx, parent)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err := bkt.Put(pr[:], enc); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
previousRoot = root
|
||||
root = pr[:]
|
||||
}
|
||||
|
||||
// Upsert blocks from the current finalized epoch.
|
||||
roots, err := s.BlockRoots(ctx, filters.NewFilter().SetStartEpoch(checkpoint.Epoch).SetEndEpoch(checkpoint.Epoch+1))
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
for _, root := range roots {
|
||||
root := root[:]
|
||||
if bytes.Equal(root, checkpoint.Root) || bkt.Get(root) != nil {
|
||||
continue
|
||||
}
|
||||
if err := bkt.Put(root, containerFinalizedButNotCanonical); err != nil {
|
||||
if err = finalizedBkt.Put(r, enc); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update previous checkpoint
|
||||
enc, err := encode(ctx, checkpoint)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return bkt.Put(previousFinalizedCheckpointKey, enc)
|
||||
return updatePrevFinalizedCheckpoint(ctx, span, finalizedBkt, checkpoint)
|
||||
}
|
||||
|
||||
// BackfillFinalizedIndex updates the finalized index for a contiguous chain of blocks that are the ancestors of the
|
||||
@@ -201,21 +151,20 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
|
||||
return err
|
||||
}
|
||||
encs[i-1] = penc
|
||||
|
||||
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
|
||||
// the parent_root value stored in the index data for finalizedChildRoot.
|
||||
if i == len(blocks)-1 {
|
||||
fbrs[i].ChildRoot = finalizedChildRoot[:]
|
||||
// Final element is complete, so it is pre-encoded like the others.
|
||||
enc, err := encode(ctx, fbrs[i])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
encs[i] = enc
|
||||
}
|
||||
}
|
||||
|
||||
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
|
||||
// the parent_root value stored in the index data for finalizedChildRoot.
|
||||
lastIdx := len(blocks) - 1
|
||||
fbrs[lastIdx].ChildRoot = finalizedChildRoot[:]
|
||||
// Final element is complete, so it is pre-encoded like the others.
|
||||
enc, err := encode(ctx, fbrs[lastIdx])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
encs[lastIdx] = enc
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
child := bkt.Get(finalizedChildRoot[:])
|
||||
@@ -243,8 +192,6 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
|
||||
|
||||
// IsFinalizedBlock returns true if the block root is present in the finalized block root index.
|
||||
// A beacon block root contained exists in this index if it is considered finalized and canonical.
|
||||
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
|
||||
// considered canonical in the "head view" of the beacon node.
|
||||
func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
|
||||
defer span.End()
|
||||
@@ -297,3 +244,53 @@ func (s *Store) FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (in
|
||||
tracing.AnnotateError(span, err)
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func pathToFinalizedCheckpoint(ctx context.Context, roots [][]byte, checkpointRoot []byte, tx *bolt.Tx) (bool, [][]byte) {
|
||||
if len(roots) == 0 || (len(roots) == 1 && roots[0] == nil) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, r := range roots {
|
||||
if bytes.Equal(r, checkpointRoot) {
|
||||
return true, [][]byte{r}
|
||||
}
|
||||
children := lookupValuesForIndices(ctx, map[string][]byte{string(blockParentRootIndicesBucket): r}, tx)
|
||||
if len(children) == 0 {
|
||||
children = [][][]byte{nil}
|
||||
}
|
||||
isPath, path := pathToFinalizedCheckpoint(ctx, children[0], checkpointRoot, tx)
|
||||
if isPath {
|
||||
return true, append([][]byte{r}, path...)
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func updatePrevFinalizedCheckpoint(ctx context.Context, span *trace.Span, finalizedBkt *bolt.Bucket, checkpoint *ethpb.Checkpoint) error {
|
||||
enc, err := encode(ctx, checkpoint)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return finalizedBkt.Put(previousFinalizedCheckpointKey, enc)
|
||||
}
|
||||
|
||||
func updateChildOfPrevFinalizedCheckpoint(ctx context.Context, span *trace.Span, finalizedBkt, parentBkt *bolt.Bucket, checkpointRoot []byte) error {
|
||||
container := ðpb.FinalizedBlockRootContainer{}
|
||||
if err := decode(ctx, finalizedBkt.Get(checkpointRoot), container); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
container.ChildRoot = parentBkt.Get(checkpointRoot)
|
||||
enc, err := encode(ctx, container)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err = finalizedBkt.Put(checkpointRoot, enc); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,38 +26,30 @@ func TestStore_IsFinalizedBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*2, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
// a state is required to save checkpoint
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
// All blocks up to slotsPerEpoch*2 should be in the finalized index.
|
||||
for i := uint64(0); i < slotsPerEpoch*2; i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
for i := uint64(0); i <= slotsPerEpoch; i++ {
|
||||
root, err = blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
|
||||
}
|
||||
for i := slotsPerEpoch * 3; i < uint64(len(blks)); i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
for i := slotsPerEpoch + 1; i < uint64(len(blks)); i++ {
|
||||
root, err = blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index %d was considered finalized in the index, but should not have", i)
|
||||
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index %d was considered finalized, but should not have", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
|
||||
func TestStore_IsFinalizedGenesisBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -69,136 +61,114 @@ func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Finalized genesis block doesn't exist in db")
|
||||
}
|
||||
|
||||
// This test scenario is to test a specific edge case where the finalized block root is not part of
|
||||
// the finalized and canonical chain.
|
||||
//
|
||||
// Example:
|
||||
// 0 1 2 3 4 5 6 slot
|
||||
// a <- b <-- d <- e <- f <- g roots
|
||||
//
|
||||
// ^- c
|
||||
//
|
||||
// Imagine that epochs are 2 slots and that epoch 1, 2, and 3 are finalized. Checkpoint roots would
|
||||
// be c, e, and g. In this scenario, c was a finalized checkpoint root but no block built upon it so
|
||||
// it should not be considered "final and canonical" in the view at slot 6.
|
||||
func TestStore_IsFinalized_ForkEdgeCase(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
blocks0 := makeBlocks(t, slotsPerEpoch*0, slotsPerEpoch, genesisBlockRoot)
|
||||
blocks1 := append(
|
||||
makeBlocks(t, slotsPerEpoch*1, 1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1]))), // No block builds off of the first block in epoch.
|
||||
makeBlocks(t, slotsPerEpoch*1+1, slotsPerEpoch-1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1])))...,
|
||||
)
|
||||
blocks2 := makeBlocks(t, slotsPerEpoch*2, slotsPerEpoch, bytesutil.ToBytes32(sszRootOrDie(t, blocks1[len(blocks1)-1])))
|
||||
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
require.NoError(t, db.SaveBlocks(ctx, blocks0))
|
||||
require.NoError(t, db.SaveBlocks(ctx, blocks1))
|
||||
require.NoError(t, db.SaveBlocks(ctx, blocks2))
|
||||
|
||||
// First checkpoint
|
||||
checkpoint1 := ðpb.Checkpoint{
|
||||
Root: sszRootOrDie(t, blocks1[0]),
|
||||
Epoch: 1,
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
// A state is required to save checkpoint
|
||||
require.NoError(t, db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint1.Root)))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, checkpoint1))
|
||||
// All blocks in blocks0 and blocks1 should be finalized and canonical.
|
||||
for i, block := range append(blocks0, blocks1...) {
|
||||
root := sszRootOrDie(t, block)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)), "%d - Expected block %#x to be finalized", i, root)
|
||||
}
|
||||
|
||||
// Second checkpoint
|
||||
checkpoint2 := ðpb.Checkpoint{
|
||||
Root: sszRootOrDie(t, blocks2[0]),
|
||||
Epoch: 2,
|
||||
}
|
||||
// A state is required to save checkpoint
|
||||
require.NoError(t, db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint2.Root)))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, checkpoint2))
|
||||
// All blocks in blocks0 and blocks2 should be finalized and canonical.
|
||||
for i, block := range append(blocks0, blocks2...) {
|
||||
root := sszRootOrDie(t, block)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)), "%d - Expected block %#x to be finalized", i, root)
|
||||
}
|
||||
// All blocks in blocks1 should be finalized and canonical, except blocks1[0].
|
||||
for i, block := range blocks1 {
|
||||
root := sszRootOrDie(t, block)
|
||||
if db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)) == (i == 0) {
|
||||
t.Errorf("Expected db.IsFinalizedBlock(ctx, blocks1[%d]) to be %v", i, i != 0)
|
||||
}
|
||||
}
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root))
|
||||
}
|
||||
|
||||
func TestStore_IsFinalizedChildBlock(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
ctx := context.Background()
|
||||
db := setupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
|
||||
eval := func(t testing.TB, ctx context.Context, db *Store, blks []interfaces.ReadOnlySignedBeaconBlock) {
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
// a state is required to save checkpoint
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
// All blocks up to slotsPerEpoch should have a finalized child block.
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
|
||||
blk, err := db.FinalizedChildBlock(ctx, root)
|
||||
assert.NoError(t, err)
|
||||
if blk == nil {
|
||||
t.Error("Child block doesn't exist for valid finalized block.")
|
||||
}
|
||||
}
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*2, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
setup := func(t testing.TB) *Store {
|
||||
db := setupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
|
||||
return db
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
root, err = blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
|
||||
blk, err := db.FinalizedChildBlock(ctx, root)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, blk == nil, "Child block at index %d was not considered finalized", i)
|
||||
}
|
||||
|
||||
t.Run("phase0", func(t *testing.T) {
|
||||
db := setup(t)
|
||||
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
|
||||
eval(t, ctx, db, blks)
|
||||
})
|
||||
|
||||
t.Run("altair", func(t *testing.T) {
|
||||
db := setup(t)
|
||||
|
||||
blks := makeBlocksAltair(t, 0, slotsPerEpoch*3, genesisBlockRoot)
|
||||
eval(t, ctx, db, blks)
|
||||
})
|
||||
}
|
||||
|
||||
func sszRootOrDie(t *testing.T, block interfaces.ReadOnlySignedBeaconBlock) []byte {
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
func TestStore_ChildRootOfPrevFinalizedCheckpointIsUpdated(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
ctx := context.Background()
|
||||
db := setupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
return root[:]
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
root2, err := blks[slotsPerEpoch*2].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp = ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: root2[:],
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
require.NoError(t, db.db.View(func(tx *bolt.Tx) error {
|
||||
container := ðpb.FinalizedBlockRootContainer{}
|
||||
f := tx.Bucket(finalizedBlockRootsIndexBucket).Get(root[:])
|
||||
require.NoError(t, decode(ctx, f, container))
|
||||
r, err := blks[slotsPerEpoch+1].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, r[:], container.ChildRoot)
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
func TestStore_OrphanedBlockIsNotFinalized(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
blk0 := util.NewBeaconBlock()
|
||||
blk0.Block.ParentRoot = genesisBlockRoot[:]
|
||||
blk0Root, err := blk0.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blk1 := util.NewBeaconBlock()
|
||||
blk1.Block.Slot = 1
|
||||
blk1.Block.ParentRoot = blk0Root[:]
|
||||
blk2 := util.NewBeaconBlock()
|
||||
blk2.Block.Slot = 2
|
||||
// orphan block at index 1
|
||||
blk2.Block.ParentRoot = blk0Root[:]
|
||||
blk2Root, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
sBlk0, err := consensusblocks.NewSignedBeaconBlock(blk0)
|
||||
require.NoError(t, err)
|
||||
sBlk1, err := consensusblocks.NewSignedBeaconBlock(blk1)
|
||||
require.NoError(t, err)
|
||||
sBlk2, err := consensusblocks.NewSignedBeaconBlock(blk2)
|
||||
require.NoError(t, err)
|
||||
blks := append([]interfaces.ReadOnlySignedBeaconBlock{sBlk0, sBlk1, sBlk2}, makeBlocks(t, 3, slotsPerEpoch*2-3, blk2Root)...)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
for i := uint64(0); i <= slotsPerEpoch; i++ {
|
||||
root, err = blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
if i == 1 {
|
||||
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index 1 was considered finalized, but should not have")
|
||||
} else {
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -219,22 +189,48 @@ func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.R
|
||||
return ifaceBlocks
|
||||
}
|
||||
|
||||
func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
|
||||
blocks := make([]*ethpb.SignedBeaconBlockAltair, num)
|
||||
ifaceBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, num)
|
||||
for j := startIdx; j < num+startIdx; j++ {
|
||||
parentRoot := make([]byte, fieldparams.RootLength)
|
||||
copy(parentRoot, previousRoot[:])
|
||||
blocks[j-startIdx] = util.NewBeaconBlockAltair()
|
||||
blocks[j-startIdx].Block.Slot = primitives.Slot(j + 1)
|
||||
blocks[j-startIdx].Block.ParentRoot = parentRoot
|
||||
var err error
|
||||
previousRoot, err = blocks[j-startIdx].Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ifaceBlocks[j-startIdx], err = consensusblocks.NewSignedBeaconBlock(blocks[j-startIdx])
|
||||
require.NoError(t, err)
|
||||
func TestStore_BackfillFinalizedIndexSingle(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
// we're making 4 blocks so we can test an element without a valid child at the end
|
||||
blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 4, [32]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
// existing is the child that we'll set up in the index by hand to seed the index.
|
||||
existing := blks[3]
|
||||
|
||||
// toUpdate is a single item update, emulating a backfill batch size of 1. it is the parent of `existing`.
|
||||
toUpdate := blks[2]
|
||||
|
||||
// set up existing finalized block
|
||||
ebpr := existing.Block().ParentRoot()
|
||||
ebr := existing.Root()
|
||||
ebf := ðpb.FinalizedBlockRootContainer{
|
||||
ParentRoot: ebpr[:],
|
||||
ChildRoot: make([]byte, 32), // we're bypassing validation to seed the db, so we don't need a valid child.
|
||||
}
|
||||
return ifaceBlocks
|
||||
enc, err := encode(ctx, ebf)
|
||||
require.NoError(t, err)
|
||||
// writing this to the index outside of the validating function to seed the test.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
return bkt.Put(ebr[:], enc)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{toUpdate}, ebr))
|
||||
|
||||
// make sure that we still correctly validate descendents in the single item case.
|
||||
noChild := blks[0] // will fail to update because we don't have blks[1] in the db.
|
||||
// test wrong child param
|
||||
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, ebr), errNotConnectedToFinalized)
|
||||
// test parent of child that isn't finalized
|
||||
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, blks[1].Root()), errFinalizedChildNotFound)
|
||||
|
||||
// now make it work by writing the missing block
|
||||
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[1]}, blks[2].Root()))
|
||||
// since blks[1] is now in the index, we should be able to update blks[0]
|
||||
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[0]}, blks[1].Root()))
|
||||
}
|
||||
|
||||
func TestStore_BackfillFinalizedIndex(t *testing.T) {
|
||||
@@ -252,23 +248,23 @@ func TestStore_BackfillFinalizedIndex(t *testing.T) {
|
||||
ParentRoot: ebpr[:],
|
||||
ChildRoot: chldr[:],
|
||||
}
|
||||
disjoint := []consensusblocks.ROBlock{
|
||||
blks[0],
|
||||
blks[2],
|
||||
}
|
||||
enc, err := encode(ctx, ebf)
|
||||
require.NoError(t, err)
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
return bkt.Put(ebr[:], enc)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// reslice to remove the existing blocks
|
||||
blks = blks[0:64]
|
||||
// check the other error conditions with a descendent root that really doesn't exist
|
||||
require.NoError(t, err)
|
||||
|
||||
disjoint := []consensusblocks.ROBlock{
|
||||
blks[0],
|
||||
blks[2],
|
||||
}
|
||||
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, disjoint, [32]byte{}), errIncorrectBlockParent)
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, errFinalizedChildNotFound, db.BackfillFinalizedIndex(ctx, blks, [32]byte{}))
|
||||
|
||||
// use the real root so that it succeeds
|
||||
|
||||
@@ -458,7 +458,7 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
|
||||
bkt = tx.Bucket(stateBucket)
|
||||
// Safeguard against deleting genesis, finalized, head state.
|
||||
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
return ErrDeleteFinalized
|
||||
}
|
||||
|
||||
// Nothing to delete if state doesn't exist.
|
||||
|
||||
@@ -7,9 +7,11 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -127,51 +129,16 @@ type BeaconNode struct {
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
// every required service to the node.
|
||||
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
|
||||
if err := configureTracing(cliCtx); err != nil {
|
||||
return nil, err
|
||||
if err := configureBeacon(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set beacon configuration options")
|
||||
}
|
||||
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
|
||||
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
return nil, fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
|
||||
}
|
||||
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
flags.ConfigureGlobalFlags(cliCtx)
|
||||
if err := configureChainConfig(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureHistoricalSlasher(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := configureBuilderCircuitBreaker(cliCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureEth1Config(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configureNetwork(cliCtx)
|
||||
if err := configureInteropConfig(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
registry := runtime.NewServiceRegistry()
|
||||
|
||||
ctx := cliCtx.Context
|
||||
|
||||
beacon := &BeaconNode{
|
||||
cliCtx: cliCtx,
|
||||
ctx: ctx,
|
||||
@@ -191,10 +158,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
slasherAttestationsFeed: new(event.Feed),
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
}
|
||||
|
||||
beacon.initialSyncComplete = make(chan struct{})
|
||||
beacon.syncChecker = &initialsync.SyncChecker{}
|
||||
for _, opt := range opts {
|
||||
if err := opt(beacon); err != nil {
|
||||
return nil, err
|
||||
@@ -203,8 +170,8 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
|
||||
synchronizer := startup.NewClockSynchronizer()
|
||||
beacon.clockWaiter = synchronizer
|
||||
|
||||
beacon.forkChoicer = doublylinkedtree.New()
|
||||
|
||||
depositAddress, err := execution.DepositContractAddress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -220,112 +187,29 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
beacon.BlobStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bfs, err := backfill.NewUpdater(ctx, beacon.db)
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "backfill status initialization error")
|
||||
}
|
||||
|
||||
log.Debugln("Starting State Gen")
|
||||
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
|
||||
if errors.Is(err, stategen.ErrNoGenesisBlock) {
|
||||
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
|
||||
"state bundled in the application. You must provide the --%s or --%s flag to load "+
|
||||
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
|
||||
}
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
}
|
||||
|
||||
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
|
||||
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
|
||||
|
||||
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
|
||||
beacon.BackfillOpts = append(beacon.BackfillOpts, backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
|
||||
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)))
|
||||
|
||||
beacon.BackfillOpts = append(
|
||||
beacon.BackfillOpts,
|
||||
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
|
||||
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
|
||||
)
|
||||
|
||||
bf, err := backfill.NewService(ctx, bfs, beacon.BlobStorage, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
if err := beacon.services.RegisterService(bf); err != nil {
|
||||
return nil, errors.Wrap(err, "error registering backfill service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering POW Chain Service")
|
||||
if err := beacon.registerPOWChainService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Attestation Pool Service")
|
||||
if err := beacon.registerAttestationPool(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Deterministic Genesis Service")
|
||||
if err := beacon.registerDeterministicGenesisService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Initial Sync Service")
|
||||
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Sync Service")
|
||||
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Slasher Service")
|
||||
if err := beacon.registerSlasherService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering builder service")
|
||||
if err := beacon.registerBuilderService(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering RPC Service")
|
||||
router := newRouter(cliCtx)
|
||||
if err := beacon.registerRPCService(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering GRPC Gateway Service")
|
||||
if err := beacon.registerGRPCGateway(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Validator Monitoring Service")
|
||||
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
|
||||
log.Debugln("Registering Prometheus Service")
|
||||
if err := beacon.registerPrometheusService(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := registerServices(cliCtx, beacon, synchronizer, bf, bfs); err != nil {
|
||||
return nil, errors.Wrap(err, "could not register services")
|
||||
}
|
||||
|
||||
// db.DatabasePath is the path to the containing directory
|
||||
@@ -343,6 +227,170 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
|
||||
return beacon, nil
|
||||
}
|
||||
|
||||
func configureBeacon(cliCtx *cli.Context) error {
|
||||
if err := configureTracing(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure tracing")
|
||||
}
|
||||
|
||||
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
|
||||
|
||||
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
return fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
|
||||
}
|
||||
|
||||
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure beacon chain")
|
||||
}
|
||||
|
||||
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure beacon chain")
|
||||
}
|
||||
|
||||
flags.ConfigureGlobalFlags(cliCtx)
|
||||
|
||||
if err := configureChainConfig(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure chain config")
|
||||
}
|
||||
|
||||
if err := configureHistoricalSlasher(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure historical slasher")
|
||||
}
|
||||
|
||||
if err := configureBuilderCircuitBreaker(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure builder circuit breaker")
|
||||
}
|
||||
|
||||
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure slots per archived point")
|
||||
}
|
||||
|
||||
if err := configureEth1Config(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure eth1 config")
|
||||
}
|
||||
|
||||
configureNetwork(cliCtx)
|
||||
|
||||
if err := configureInteropConfig(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure interop config")
|
||||
}
|
||||
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure execution setting")
|
||||
}
|
||||
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
|
||||
ctx := cliCtx.Context
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start DB")
|
||||
}
|
||||
beacon.BlobStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start slashing DB")
|
||||
}
|
||||
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not register P2P service")
|
||||
}
|
||||
|
||||
bfs, err := backfill.NewUpdater(ctx, beacon.db)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create backfill updater")
|
||||
}
|
||||
|
||||
log.Debugln("Starting State Gen")
|
||||
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
|
||||
if errors.Is(err, stategen.ErrNoGenesisBlock) {
|
||||
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
|
||||
"state bundled in the application. You must provide the --%s or --%s flag to load "+
|
||||
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not start state generation")
|
||||
}
|
||||
|
||||
return bfs, nil
|
||||
}
|
||||
|
||||
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bf *backfill.Service, bfs *backfill.Store) error {
|
||||
if err := beacon.services.RegisterService(bf); err != nil {
|
||||
return errors.Wrap(err, "could not register backfill service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering POW Chain Service")
|
||||
if err := beacon.registerPOWChainService(); err != nil {
|
||||
return errors.Wrap(err, "could not register POW chain service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Attestation Pool Service")
|
||||
if err := beacon.registerAttestationPool(); err != nil {
|
||||
return errors.Wrap(err, "could not register attestation pool service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Deterministic Genesis Service")
|
||||
if err := beacon.registerDeterministicGenesisService(); err != nil {
|
||||
return errors.Wrap(err, "could not register deterministic genesis service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
|
||||
return errors.Wrap(err, "could not register blockchain service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Initial Sync Service")
|
||||
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
|
||||
return errors.Wrap(err, "could not register initial sync service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Sync Service")
|
||||
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
|
||||
return errors.Wrap(err, "could not register sync service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Slasher Service")
|
||||
if err := beacon.registerSlasherService(); err != nil {
|
||||
return errors.Wrap(err, "could not register slasher service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering builder service")
|
||||
if err := beacon.registerBuilderService(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not register builder service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering RPC Service")
|
||||
router := newRouter(cliCtx)
|
||||
if err := beacon.registerRPCService(router); err != nil {
|
||||
return errors.Wrap(err, "could not register RPC service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering GRPC Gateway Service")
|
||||
if err := beacon.registerGRPCGateway(router); err != nil {
|
||||
return errors.Wrap(err, "could not register GRPC gateway service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Validator Monitoring Service")
|
||||
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
|
||||
return errors.Wrap(err, "could not register validator monitoring service")
|
||||
}
|
||||
|
||||
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
|
||||
log.Debugln("Registering Prometheus Service")
|
||||
if err := beacon.registerPrometheusService(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not register prometheus service")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func initSyncWaiter(ctx context.Context, complete chan struct{}) func() error {
|
||||
return func() error {
|
||||
select {
|
||||
@@ -431,40 +479,86 @@ func (b *BeaconNode) Close() {
|
||||
close(b.stop)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
|
||||
var err error
|
||||
clearDBConfirmed := false
|
||||
|
||||
if clearDB && !forceClearDB {
|
||||
const (
|
||||
actionText = "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
|
||||
deniedText = "Database will not be deleted. No changes have been made."
|
||||
)
|
||||
|
||||
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not confirm action")
|
||||
}
|
||||
}
|
||||
|
||||
if clearDBConfirmed || forceClearDB {
|
||||
log.Warning("Removing database")
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
if err := b.BlobStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
|
||||
knownContract, err := b.db.DepositContractAddress(b.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get deposit contract address")
|
||||
}
|
||||
|
||||
addr := common.HexToAddress(depositAddress)
|
||||
if len(knownContract) == 0 {
|
||||
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
|
||||
return errors.Wrap(err, "could not save deposit contract")
|
||||
}
|
||||
}
|
||||
|
||||
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
|
||||
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
|
||||
"you are trying to run on a different network than what the database contains. You can run once with "+
|
||||
"--%s to wipe the old database or use an alternative data directory with --%s",
|
||||
knownContract, addr.Bytes(), cmd.ClearDB.Name, cmd.DataDirFlag.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
var depositCache cache.DepositCache
|
||||
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||
}
|
||||
clearDBConfirmed := false
|
||||
if clearDB && !forceClearDB {
|
||||
actionText := "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
deniedText := "Database will not be deleted. No changes have been made."
|
||||
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
|
||||
|
||||
if clearDBRequired || forceClearDBRequired {
|
||||
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if clearDBConfirmed || forceClearDB {
|
||||
log.Warning("Removing database")
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
if err := b.BlobStorage.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.RunMigrations(b.ctx); err != nil {
|
||||
@@ -473,7 +567,6 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
|
||||
b.db = d
|
||||
|
||||
var depositCache cache.DepositCache
|
||||
if features.Get().EnableEIP4881 {
|
||||
depositCache, err = depositsnapshot.New()
|
||||
} else {
|
||||
@@ -488,16 +581,17 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
if b.GenesisInitializer != nil {
|
||||
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
|
||||
if err == db.ErrExistingGenesisState {
|
||||
return errors.New("Genesis state flag specified but a genesis state " +
|
||||
"exists already. Run again with --clear-db and/or ensure you are using the " +
|
||||
"appropriate testnet flag to load the given genesis state.")
|
||||
return errors.Errorf("Genesis state flag specified but a genesis state "+
|
||||
"exists already. Run again with --%s and/or ensure you are using the "+
|
||||
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
|
||||
}
|
||||
|
||||
return errors.Wrap(err, "could not load genesis from file")
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not ensure embedded genesis")
|
||||
}
|
||||
|
||||
if b.CheckpointInitializer != nil {
|
||||
@@ -506,23 +600,11 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
}
|
||||
}
|
||||
|
||||
knownContract, err := b.db.DepositContractAddress(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := b.checkAndSaveDepositContract(depositAddress); err != nil {
|
||||
return errors.Wrap(err, "could not check and save deposit contract")
|
||||
}
|
||||
addr := common.HexToAddress(depositAddress)
|
||||
if len(knownContract) == 0 {
|
||||
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
|
||||
return errors.Wrap(err, "could not save deposit contract")
|
||||
}
|
||||
}
|
||||
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
|
||||
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
|
||||
"you are trying to run on a different network than what the database contains. You can run once with "+
|
||||
"'--clear-db' to wipe the old database or use an alternative data directory with '--datadir'",
|
||||
knownContract, addr.Bytes())
|
||||
}
|
||||
log.Infof("Deposit contract: %#x", addr.Bytes())
|
||||
|
||||
log.WithField("address", depositAddress).Info("Deposit contract")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -610,31 +692,31 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBl
|
||||
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
bootstrapNodeAddrs, dataDir, err := registration.P2PPreregistration(cliCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "could not register p2p service")
|
||||
}
|
||||
|
||||
svc, err := p2p.NewService(b.ctx, &p2p.Config{
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
BootstrapNodeAddr: bootstrapNodeAddrs,
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -976,11 +1058,13 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
|
||||
if b.cliCtx.Bool(flags.DisableGRPCGateway.Name) {
|
||||
return nil
|
||||
}
|
||||
gatewayPort := b.cliCtx.Int(flags.GRPCGatewayPort.Name)
|
||||
gatewayHost := b.cliCtx.String(flags.GRPCGatewayHost.Name)
|
||||
gatewayPort := b.cliCtx.Int(flags.GRPCGatewayPort.Name)
|
||||
rpcHost := b.cliCtx.String(flags.RPCHost.Name)
|
||||
selfAddress := fmt.Sprintf("%s:%d", rpcHost, b.cliCtx.Int(flags.RPCPort.Name))
|
||||
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
||||
rpcPort := b.cliCtx.Int(flags.RPCPort.Name)
|
||||
|
||||
selfAddress := net.JoinHostPort(rpcHost, strconv.Itoa(rpcPort))
|
||||
gatewayAddress := net.JoinHostPort(gatewayHost, strconv.Itoa(gatewayPort))
|
||||
allowedOrigins := strings.Split(b.cliCtx.String(flags.GPRCGatewayCorsDomain.Name), ",")
|
||||
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
|
||||
selfCert := b.cliCtx.String(flags.CertFlag.Name)
|
||||
@@ -1074,9 +1158,9 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := append(b.serviceFlagOpts.builderOpts,
|
||||
builder.WithHeadFetcher(chainService),
|
||||
builder.WithDatabase(b.db))
|
||||
opts := b.serviceFlagOpts.builderOpts
|
||||
opts = append(opts, builder.WithHeadFetcher(chainService), builder.WithDatabase(b.db))
|
||||
|
||||
// make cache the default.
|
||||
if !cliCtx.Bool(features.DisableRegistrationCache.Name) {
|
||||
opts = append(opts, builder.WithRegistrationCache())
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -31,9 +32,9 @@ func P2PPreregistration(cliCtx *cli.Context) (bootstrapNodeAddrs []string, dataD
|
||||
if dataDir == "" {
|
||||
dataDir = cmd.DefaultDataDir()
|
||||
if dataDir == "" {
|
||||
log.Fatal(
|
||||
"Could not determine your system's HOME path, please specify a --datadir you wish " +
|
||||
"to use for your chain data",
|
||||
err = errors.Errorf(
|
||||
"Could not determine your system's HOME path, please specify a --%s you wish to use for your chain data",
|
||||
cmd.DataDirFlag.Name,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,6 +49,7 @@ go_test(
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// pruneAttsPool prunes attestations pool on every slot interval.
|
||||
@@ -66,7 +67,18 @@ func (s *Service) pruneExpiredAtts() {
|
||||
|
||||
// Return true if the input slot has been expired.
|
||||
// Expired is defined as one epoch behind than current time.
|
||||
func (s *Service) expired(slot primitives.Slot) bool {
|
||||
func (s *Service) expired(providedSlot primitives.Slot) bool {
|
||||
providedEpoch := slots.ToEpoch(providedSlot)
|
||||
currSlot := slots.CurrentSlot(s.genesisTime)
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if currEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
return s.expiredPreDeneb(providedSlot)
|
||||
}
|
||||
return providedEpoch+1 < currEpoch
|
||||
}
|
||||
|
||||
// Handles expiration of attestations before deneb.
|
||||
func (s *Service) expiredPreDeneb(slot primitives.Slot) bool {
|
||||
expirationSlot := slot + params.BeaconConfig().SlotsPerEpoch
|
||||
expirationTime := s.genesisTime + uint64(expirationSlot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
currentTime := uint64(prysmTime.Now().Unix())
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -127,3 +128,22 @@ func TestPruneExpired_Expired(t *testing.T) {
|
||||
assert.Equal(t, true, s.expired(0), "Should be expired")
|
||||
assert.Equal(t, false, s.expired(1), "Should not be expired")
|
||||
}
|
||||
|
||||
func TestPruneExpired_ExpiredDeneb(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.DenebForkEpoch = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rewind back 4 epochs + 10 slots worth of time.
|
||||
s.genesisTime = uint64(prysmTime.Now().Unix()) - (4*uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) + 10)
|
||||
secondEpochStart := primitives.Slot(2 * uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
thirdEpochStart := primitives.Slot(3 * uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
assert.Equal(t, true, s.expired(secondEpochStart), "Should be expired")
|
||||
assert.Equal(t, false, s.expired(thirdEpochStart), "Should not be expired")
|
||||
|
||||
}
|
||||
|
||||
@@ -94,6 +94,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_libp2p_go_mplex//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -68,7 +69,7 @@ func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
|
||||
go s.broadcastAttestation(ctx, subnet, att, forkDigest)
|
||||
go s.internalBroadcastAttestation(ctx, subnet, att, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -94,8 +95,8 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastAttestation")
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
|
||||
@@ -137,7 +138,10 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
// acceptable threshold, we exit early and do not broadcast it.
|
||||
currSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
|
||||
log.Warnf("Attestation is too old to broadcast, discarding it. Current Slot: %d , Attestation Slot: %d", currSlot, att.Data.Slot)
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestationSlot": att.Data.Slot,
|
||||
"currentSlot": currSlot,
|
||||
}).Warning("Attestation is too old to broadcast, discarding it")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -218,13 +222,13 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
|
||||
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
|
||||
go s.internalBroadcastBlob(ctx, subnet, blob, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
|
||||
|
||||
@@ -240,9 +240,8 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
var hosts []host.Host
|
||||
// setup other nodes.
|
||||
cfg = &Config{
|
||||
BootstrapNodeAddr: []string{bootNode.String()},
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
}
|
||||
// Setup 2 different hosts
|
||||
for i := 1; i <= 2; i++ {
|
||||
|
||||
@@ -12,28 +12,27 @@ const defaultPubsubQueueSize = 600
|
||||
// Config for the p2p service. These parameters are set from application level flags
|
||||
// to initialize the p2p service.
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
StaticPeers []string
|
||||
BootstrapNodeAddr []string
|
||||
Discv5BootStrapAddr []string
|
||||
RelayNodeAddr string
|
||||
LocalIP string
|
||||
HostAddress string
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
MetaDataDir string
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ClockWaiter startup.ClockWaiter
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
StaticPeers []string
|
||||
Discv5BootStrapAddrs []string
|
||||
RelayNodeAddr string
|
||||
LocalIP string
|
||||
HostAddress string
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
MetaDataDir string
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
|
||||
@@ -25,7 +25,7 @@ const (
|
||||
)
|
||||
|
||||
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
|
||||
func (_ *Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -63,12 +63,12 @@ func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
|
||||
|
||||
// InterceptSecured tests whether a given connection, now authenticated,
|
||||
// is allowed.
|
||||
func (_ *Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
|
||||
func (*Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptUpgraded tests whether a fully capable connection is allowed.
|
||||
func (_ *Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
func (*Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,11 @@ type Listener interface {
|
||||
LocalNode() *enode.LocalNode
|
||||
}
|
||||
|
||||
const (
|
||||
udp4 = iota
|
||||
udp6
|
||||
)
|
||||
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
@@ -62,8 +67,14 @@ func (s *Service) RefreshENR() {
|
||||
// Compare current epoch with our fork epochs
|
||||
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
|
||||
switch {
|
||||
// Altair Behaviour
|
||||
case currEpoch >= altairForkEpoch:
|
||||
case currEpoch < altairForkEpoch:
|
||||
// Phase 0 behaviour.
|
||||
if bytes.Equal(bitV, currentBitV) {
|
||||
// return early if bitfield hasn't changed
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
default:
|
||||
// Retrieve sync subnets from application level
|
||||
// cache.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
@@ -82,13 +93,6 @@ func (s *Service) RefreshENR() {
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
default:
|
||||
// Phase 0 behaviour.
|
||||
if bytes.Equal(bitV, currentBitV) {
|
||||
// return early if bitfield hasn't changed
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
}
|
||||
// ping all peers to inform them of new metadata
|
||||
s.pingPeers()
|
||||
@@ -140,9 +144,9 @@ func (s *Service) createListener(
|
||||
// by default we will listen to all interfaces.
|
||||
var bindIP net.IP
|
||||
switch udpVersionFromIP(ipAddr) {
|
||||
case "udp4":
|
||||
case udp4:
|
||||
bindIP = net.IPv4zero
|
||||
case "udp6":
|
||||
case udp6:
|
||||
bindIP = net.IPv6zero
|
||||
default:
|
||||
return nil, errors.New("invalid ip provided")
|
||||
@@ -160,6 +164,7 @@ func (s *Service) createListener(
|
||||
IP: bindIP,
|
||||
Port: int(s.cfg.UDPPort),
|
||||
}
|
||||
|
||||
// Listen to all network interfaces
|
||||
// for both ip protocols.
|
||||
networkVersion := "udp"
|
||||
@@ -177,44 +182,27 @@ func (s *Service) createListener(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create local node")
|
||||
}
|
||||
if s.cfg.HostAddress != "" {
|
||||
hostIP := net.ParseIP(s.cfg.HostAddress)
|
||||
if hostIP.To4() == nil && hostIP.To16() == nil {
|
||||
log.Errorf("Invalid host address given: %s", hostIP.String())
|
||||
} else {
|
||||
localNode.SetFallbackIP(hostIP)
|
||||
localNode.SetStaticIP(hostIP)
|
||||
}
|
||||
}
|
||||
if s.cfg.HostDNS != "" {
|
||||
host := s.cfg.HostDNS
|
||||
ips, err := net.LookupIP(host)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not resolve host address")
|
||||
}
|
||||
if len(ips) > 0 {
|
||||
// Use first IP returned from the
|
||||
// resolver.
|
||||
firstIP := ips[0]
|
||||
localNode.SetFallbackIP(firstIP)
|
||||
}
|
||||
}
|
||||
dv5Cfg := discover.Config{
|
||||
PrivateKey: privKey,
|
||||
}
|
||||
dv5Cfg.Bootnodes = []*enode.Node{}
|
||||
for _, addr := range s.cfg.Discv5BootStrapAddr {
|
||||
|
||||
bootNodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
|
||||
for _, addr := range s.cfg.Discv5BootStrapAddrs {
|
||||
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not bootstrap addr")
|
||||
}
|
||||
dv5Cfg.Bootnodes = append(dv5Cfg.Bootnodes, bootNode)
|
||||
|
||||
bootNodes = append(bootNodes, bootNode)
|
||||
}
|
||||
|
||||
dv5Cfg := discover.Config{
|
||||
PrivateKey: privKey,
|
||||
Bootnodes: bootNodes,
|
||||
}
|
||||
|
||||
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not listen to discV5")
|
||||
}
|
||||
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
@@ -242,8 +230,35 @@ func (s *Service) createLocalNode(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
|
||||
}
|
||||
|
||||
localNode = initializeAttSubnets(localNode)
|
||||
return initializeSyncCommSubnets(localNode), nil
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
hostIP := net.ParseIP(s.cfg.HostAddress)
|
||||
if hostIP.To4() == nil && hostIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid host address: %s", s.cfg.HostAddress)
|
||||
} else {
|
||||
localNode.SetFallbackIP(hostIP)
|
||||
localNode.SetStaticIP(hostIP)
|
||||
}
|
||||
}
|
||||
|
||||
if s.cfg != nil && s.cfg.HostDNS != "" {
|
||||
host := s.cfg.HostDNS
|
||||
ips, err := net.LookupIP(host)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not resolve host address: %s", host)
|
||||
}
|
||||
if len(ips) > 0 {
|
||||
// Use first IP returned from the
|
||||
// resolver.
|
||||
firstIP := ips[0]
|
||||
localNode.SetFallbackIP(firstIP)
|
||||
}
|
||||
}
|
||||
|
||||
return localNode, nil
|
||||
}
|
||||
|
||||
func (s *Service) startDiscoveryV5(
|
||||
@@ -262,58 +277,69 @@ func (s *Service) startDiscoveryV5(
|
||||
// filterPeer validates each node that we retrieve from our dht. We
|
||||
// try to ascertain that the peer can be a valid protocol peer.
|
||||
// Validity Conditions:
|
||||
// 1. The local node is still actively looking for peers to
|
||||
// connect to.
|
||||
// 2. Peer has a valid IP and TCP port set in their enr.
|
||||
// 3. Peer hasn't been marked as 'bad'
|
||||
// 4. Peer is not currently active or connected.
|
||||
// 5. Peer is ready to receive incoming connections.
|
||||
// 6. Peer's fork digest in their ENR matches that of
|
||||
// 1. Peer has a valid IP and TCP port set in their enr.
|
||||
// 2. Peer hasn't been marked as 'bad'.
|
||||
// 3. Peer is not currently active or connected.
|
||||
// 4. Peer is ready to receive incoming connections.
|
||||
// 5. Peer's fork digest in their ENR matches that of
|
||||
// our localnodes.
|
||||
func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
// Ignore nil node entries passed in.
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
// ignore nodes with no ip address stored.
|
||||
|
||||
// Ignore nodes with no IP address stored.
|
||||
if node.IP() == nil {
|
||||
return false
|
||||
}
|
||||
// do not dial nodes with their tcp ports not set
|
||||
|
||||
// Ignore nodes with their TCP ports not set.
|
||||
if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil {
|
||||
if !enr.IsNotFound(err) {
|
||||
log.WithError(err).Debug("Could not retrieve tcp port")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
peerData, multiAddr, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore bad nodes.
|
||||
if s.peers.IsBad(peerData.ID) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore nodes that are already active.
|
||||
if s.peers.IsActive(peerData.ID) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore nodes that are already connected.
|
||||
if s.host.Network().Connectedness(peerData.ID) == network.Connected {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore nodes that are not ready to receive incoming connections.
|
||||
if !s.peers.IsReadyToDial(peerData.ID) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore nodes that don't match our fork digest.
|
||||
nodeENR := node.Record()
|
||||
// Decide whether or not to connect to peer that does not
|
||||
// match the proper fork ENR data with our local node.
|
||||
if s.genesisValidatorsRoot != nil {
|
||||
if err := s.compareForkENR(nodeENR); err != nil {
|
||||
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Add peer to peer handler.
|
||||
s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -363,7 +389,7 @@ func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
return allAddrs, nil
|
||||
}
|
||||
|
||||
func parseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
|
||||
func ParseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
|
||||
discv5Nodes, _ = parseGenericAddrs(addrs)
|
||||
if len(discv5Nodes) == 0 {
|
||||
log.Warn("No bootstrap addresses supplied")
|
||||
@@ -483,9 +509,9 @@ func multiAddrFromString(address string) (ma.Multiaddr, error) {
|
||||
return ma.NewMultiaddr(address)
|
||||
}
|
||||
|
||||
func udpVersionFromIP(ipAddr net.IP) string {
|
||||
func udpVersionFromIP(ipAddr net.IP) int {
|
||||
if ipAddr.To4() != nil {
|
||||
return "udp4"
|
||||
return udp4
|
||||
}
|
||||
return "udp6"
|
||||
return udp6
|
||||
}
|
||||
|
||||
@@ -42,10 +42,6 @@ import (
|
||||
|
||||
var discoveryWaitTime = 1 * time.Second
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
}
|
||||
|
||||
func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
|
||||
ip, err := prysmNetwork.ExternalIPv4()
|
||||
require.NoError(t, err, "Could not get ip")
|
||||
@@ -103,8 +99,8 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
for i := 1; i <= 5; i++ {
|
||||
port = 3000 + i
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
}
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s = &Service{
|
||||
@@ -134,6 +130,106 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateLocalNode(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
cfg: nil,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host address",
|
||||
cfg: &Config{HostAddress: "invalid"},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host address",
|
||||
cfg: &Config{HostAddress: "192.168.0.1"},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host DNS",
|
||||
cfg: &Config{HostDNS: "invalid"},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host DNS",
|
||||
cfg: &Config{HostDNS: "www.google.com"},
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Define ports.
|
||||
const (
|
||||
udpPort = 2000
|
||||
tcpPort = 3000
|
||||
)
|
||||
|
||||
// Create a private key.
|
||||
address, privKey := createAddrAndPrivKey(t)
|
||||
|
||||
// Create a service.
|
||||
service := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: tt.cfg,
|
||||
}
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort)
|
||||
if tt.expectedError {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedAddress := address
|
||||
if tt.cfg != nil && tt.cfg.HostAddress != "" {
|
||||
expectedAddress = net.ParseIP(tt.cfg.HostAddress)
|
||||
}
|
||||
|
||||
// Check IP.
|
||||
// IP is not checked int case of DNS, since it can be resolved to different IPs.
|
||||
if tt.cfg == nil || tt.cfg.HostDNS == "" {
|
||||
ip := new(net.IP)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("ip", ip)))
|
||||
require.Equal(t, true, ip.Equal(expectedAddress))
|
||||
require.Equal(t, true, localNode.Node().IP().Equal(expectedAddress))
|
||||
}
|
||||
|
||||
// Check UDP.
|
||||
udp := new(uint16)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("udp", udp)))
|
||||
require.Equal(t, udpPort, localNode.Node().UDP())
|
||||
|
||||
// Check TCP.
|
||||
tcp := new(uint16)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("tcp", tcp)))
|
||||
require.Equal(t, tcpPort, localNode.Node().TCP())
|
||||
|
||||
// Check fork is set.
|
||||
fork := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2ENRKey, fork)))
|
||||
require.NotEmpty(t, *fork)
|
||||
|
||||
// Check att subnets.
|
||||
attSubnets := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(attSubnetEnrKey, attSubnets)))
|
||||
require.DeepSSZEqual(t, []byte{0, 0, 0, 0, 0, 0, 0, 0}, *attSubnets)
|
||||
|
||||
// Check sync committees subnets.
|
||||
syncSubnets := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
|
||||
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
|
||||
addr := net.ParseIP("invalidIP")
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
@@ -310,12 +406,12 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCorrectUDPVersion(t *testing.T) {
|
||||
assert.Equal(t, "udp4", udpVersionFromIP(net.IPv4zero), "incorrect network version")
|
||||
assert.Equal(t, "udp6", udpVersionFromIP(net.IPv6zero), "incorrect network version")
|
||||
assert.Equal(t, "udp4", udpVersionFromIP(net.IP{200, 20, 12, 255}), "incorrect network version")
|
||||
assert.Equal(t, "udp6", udpVersionFromIP(net.IP{22, 23, 24, 251, 17, 18, 0, 0, 0, 0, 12, 14, 212, 213, 16, 22}), "incorrect network version")
|
||||
assert.Equal(t, udp4, udpVersionFromIP(net.IPv4zero), "incorrect network version")
|
||||
assert.Equal(t, udp6, udpVersionFromIP(net.IPv6zero), "incorrect network version")
|
||||
assert.Equal(t, udp4, udpVersionFromIP(net.IP{200, 20, 12, 255}), "incorrect network version")
|
||||
assert.Equal(t, udp6, udpVersionFromIP(net.IP{22, 23, 24, 251, 17, 18, 0, 0, 0, 0, 12, 14, 212, 213, 16, 22}), "incorrect network version")
|
||||
// v4 in v6
|
||||
assert.Equal(t, "udp4", udpVersionFromIP(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 212, 213, 16, 22}), "incorrect network version")
|
||||
assert.Equal(t, udp4, udpVersionFromIP(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 212, 213, 16, 22}), "incorrect network version")
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
|
||||
@@ -46,9 +46,9 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
@@ -132,8 +132,8 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
}
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
|
||||
@@ -20,7 +20,7 @@ self=%s
|
||||
%d peers
|
||||
%v
|
||||
`,
|
||||
s.cfg.BootstrapNodeAddr,
|
||||
s.cfg.Discv5BootStrapAddrs,
|
||||
s.selfAddresses(),
|
||||
len(s.host.Network().Peers()),
|
||||
formatPeers(s.host), // Must be last. Writes one entry per row.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
mplex "github.com/libp2p/go-libp2p-mplex"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
gomplex "github.com/libp2p/go-mplex"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
@@ -31,29 +33,31 @@ func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
}
|
||||
|
||||
// buildOptions for the libp2p host.
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Option {
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
|
||||
cfg := s.cfg
|
||||
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to p2p listen")
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip.String(), cfg.TCPPort)
|
||||
}
|
||||
if cfg.LocalIP != "" {
|
||||
if net.ParseIP(cfg.LocalIP) == nil {
|
||||
log.Fatalf("Invalid local ip provided: %s", cfg.LocalIP)
|
||||
return nil, errors.Wrapf(err, "invalid local ip provided: %s:%d", cfg.LocalIP, cfg.TCPPort)
|
||||
}
|
||||
|
||||
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to p2p listen")
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", cfg.LocalIP, cfg.TCPPort)
|
||||
}
|
||||
}
|
||||
ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(priKey)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to retrieve private key")
|
||||
return nil, errors.Wrap(err, "cannot convert private key to interface private key. (Private key not displayed in logs for security reasons)")
|
||||
}
|
||||
id, err := peer.IDFromPublicKey(ifaceKey.GetPublic())
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to retrieve peer id")
|
||||
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
|
||||
}
|
||||
|
||||
log.Infof("Running node with peer id of %s ", id.String())
|
||||
|
||||
options := []libp2p.Option{
|
||||
@@ -64,10 +68,10 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.DefaultMuxers,
|
||||
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.Ping(false), // Disable Ping Service.
|
||||
}
|
||||
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
|
||||
if cfg.EnableUPnP {
|
||||
options = append(options, libp2p.NATPortMap()) // Allow to use UPnP
|
||||
}
|
||||
@@ -99,12 +103,11 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
return addrs
|
||||
}))
|
||||
}
|
||||
// Disable Ping Service.
|
||||
options = append(options, libp2p.Ping(false))
|
||||
|
||||
if features.Get().DisableResourceManager {
|
||||
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||
}
|
||||
return options
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
@@ -134,3 +137,8 @@ func privKeyOption(privkey *ecdsa.PrivateKey) libp2p.Option {
|
||||
return cfg.Apply(libp2p.Identity(ifaceKey))
|
||||
}
|
||||
}
|
||||
|
||||
// Configures stream timeouts on mplex.
|
||||
func configureMplex() {
|
||||
gomplex.ResetStreamTimeout = 5 * time.Second
|
||||
}
|
||||
|
||||
@@ -119,7 +119,9 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
svc.privKey, err = privKey(svc.cfg)
|
||||
assert.NoError(t, err)
|
||||
ipAddr := network.IPAddr()
|
||||
opts := svc.buildOptions(ipAddr, svc.privKey)
|
||||
opts, err := svc.buildOptions(ipAddr, svc.privKey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestGossipParameters(t *testing.T) {
|
||||
pms := pubsubGossipParam()
|
||||
assert.Equal(t, gossipSubMcacheLen, pms.HistoryLength, "gossipSubMcacheLen")
|
||||
assert.Equal(t, gossipSubMcacheGossip, pms.HistoryGossip, "gossipSubMcacheGossip")
|
||||
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Milliseconds()/pms.HeartbeatInterval.Milliseconds()), "gossipSubSeenTtl")
|
||||
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Seconds()), "gossipSubSeenTtl")
|
||||
}
|
||||
|
||||
func TestFanoutParameters(t *testing.T) {
|
||||
|
||||
@@ -107,5 +107,4 @@ func TestStore_TrustedPeers(t *testing.T) {
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid1))
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid2))
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid3))
|
||||
|
||||
}
|
||||
|
||||
@@ -56,12 +56,12 @@ func newBadResponsesScorer(store *peerdata.Store, config *BadResponsesScorerConf
|
||||
func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.score(pid)
|
||||
return s.scoreNoLock(pid)
|
||||
}
|
||||
|
||||
// score is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) score(pid peer.ID) float64 {
|
||||
if s.isBadPeer(pid) {
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -87,11 +87,11 @@ func (s *BadResponsesScorer) Params() *BadResponsesScorerConfig {
|
||||
func (s *BadResponsesScorer) Count(pid peer.ID) (int, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.count(pid)
|
||||
return s.countNoLock(pid)
|
||||
}
|
||||
|
||||
// count is a lock-free version of Count.
|
||||
func (s *BadResponsesScorer) count(pid peer.ID) (int, error) {
|
||||
// countNoLock is a lock-free version of Count.
|
||||
func (s *BadResponsesScorer) countNoLock(pid peer.ID) (int, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.BadResponses, nil
|
||||
}
|
||||
@@ -119,11 +119,11 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeer(pid)
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeer is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeer(pid peer.ID) bool {
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.BadResponses >= s.config.Threshold
|
||||
}
|
||||
@@ -137,7 +137,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeer(pid) {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ import (
|
||||
)
|
||||
|
||||
func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
const pid = "peer1"
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -28,15 +30,23 @@ func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
|
||||
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score for unregistered peer")
|
||||
scorer.Increment("peer1")
|
||||
assert.Equal(t, -2.5, scorer.Score("peer1"))
|
||||
scorer.Increment("peer1")
|
||||
assert.Equal(t, float64(-5), scorer.Score("peer1"))
|
||||
scorer.Increment("peer1")
|
||||
scorer.Increment("peer1")
|
||||
assert.Equal(t, -100.0, scorer.Score("peer1"))
|
||||
assert.Equal(t, true, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
|
||||
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
|
||||
|
||||
@@ -98,11 +98,11 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo
|
||||
func (s *BlockProviderScorer) Score(pid peer.ID) float64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.score(pid)
|
||||
return s.scoreNoLock(pid)
|
||||
}
|
||||
|
||||
// score is a lock-free version of Score.
|
||||
func (s *BlockProviderScorer) score(pid peer.ID) float64 {
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *BlockProviderScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
score := float64(0)
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
// Boost score of new peers or peers that haven't been accessed for too long.
|
||||
@@ -126,7 +126,7 @@ func (s *BlockProviderScorer) Params() *BlockProviderScorerConfig {
|
||||
func (s *BlockProviderScorer) IncrementProcessedBlocks(pid peer.ID, cnt uint64) {
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
defer s.touch(pid)
|
||||
defer s.touchNoLock(pid)
|
||||
|
||||
if cnt <= 0 {
|
||||
return
|
||||
@@ -145,11 +145,11 @@ func (s *BlockProviderScorer) IncrementProcessedBlocks(pid peer.ID, cnt uint64)
|
||||
func (s *BlockProviderScorer) Touch(pid peer.ID, t ...time.Time) {
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
s.touch(pid, t...)
|
||||
s.touchNoLock(pid, t...)
|
||||
}
|
||||
|
||||
// touch is a lock-free version of Touch.
|
||||
func (s *BlockProviderScorer) touch(pid peer.ID, t ...time.Time) {
|
||||
// touchNoLock is a lock-free version of Touch.
|
||||
func (s *BlockProviderScorer) touchNoLock(pid peer.ID, t ...time.Time) {
|
||||
peerData := s.store.PeerDataGetOrCreate(pid)
|
||||
if len(t) == 1 {
|
||||
peerData.BlockProviderUpdated = t[0]
|
||||
@@ -162,11 +162,11 @@ func (s *BlockProviderScorer) touch(pid peer.ID, t ...time.Time) {
|
||||
func (s *BlockProviderScorer) ProcessedBlocks(pid peer.ID) uint64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.processedBlocks(pid)
|
||||
return s.processedBlocksNoLock(pid)
|
||||
}
|
||||
|
||||
// processedBlocks is a lock-free version of ProcessedBlocks.
|
||||
func (s *BlockProviderScorer) processedBlocks(pid peer.ID) uint64 {
|
||||
// processedBlocksNoLock is a lock-free version of ProcessedBlocks.
|
||||
func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.ProcessedBlocks
|
||||
}
|
||||
@@ -177,13 +177,13 @@ func (s *BlockProviderScorer) processedBlocks(pid peer.ID) uint64 {
|
||||
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
|
||||
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
|
||||
// out low-scorers (see WeightSorted method).
|
||||
func (_ *BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
// No peers are considered bad by block providers scorer.
|
||||
func (_ *BlockProviderScorer) BadPeers() []peer.ID {
|
||||
func (*BlockProviderScorer) BadPeers() []peer.ID {
|
||||
return []peer.ID{}
|
||||
}
|
||||
|
||||
@@ -277,9 +277,9 @@ func (s *BlockProviderScorer) mapScoresAndPeers(
|
||||
peers := make([]peer.ID, len(pids))
|
||||
for i, pid := range pids {
|
||||
if scoreFn != nil {
|
||||
scores[pid] = scoreFn(pid, s.score(pid))
|
||||
scores[pid] = scoreFn(pid, s.scoreNoLock(pid))
|
||||
} else {
|
||||
scores[pid] = s.score(pid)
|
||||
scores[pid] = s.scoreNoLock(pid)
|
||||
}
|
||||
peers[i] = pid
|
||||
}
|
||||
@@ -293,9 +293,9 @@ func (s *BlockProviderScorer) FormatScorePretty(pid peer.ID) string {
|
||||
if !features.Get().EnablePeerScorer {
|
||||
return "disabled"
|
||||
}
|
||||
score := s.score(pid)
|
||||
score := s.scoreNoLock(pid)
|
||||
return fmt.Sprintf("[%0.1f%%, raw: %0.2f, blocks: %d/%d]",
|
||||
(score/s.MaxScore())*100, score, s.processedBlocks(pid), s.config.ProcessedBlocksCap)
|
||||
(score/s.MaxScore())*100, score, s.processedBlocksNoLock(pid), s.config.ProcessedBlocksCap)
|
||||
}
|
||||
|
||||
// MaxScore exposes maximum score attainable by peers.
|
||||
|
||||
@@ -38,11 +38,11 @@ func newGossipScorer(store *peerdata.Store, config *GossipScorerConfig) *GossipS
|
||||
func (s *GossipScorer) Score(pid peer.ID) float64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.score(pid)
|
||||
return s.scoreNoLock(pid)
|
||||
}
|
||||
|
||||
// score is a lock-free version of Score.
|
||||
func (s *GossipScorer) score(pid peer.ID) float64 {
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return 0
|
||||
@@ -54,11 +54,11 @@ func (s *GossipScorer) score(pid peer.ID) float64 {
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeer(pid)
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeer is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) isBadPeer(pid peer.ID) bool {
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
@@ -73,7 +73,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeer(pid) {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
@@ -98,11 +98,11 @@ func (s *GossipScorer) SetGossipData(pid peer.ID, gScore float64,
|
||||
func (s *GossipScorer) GossipData(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.gossipData(pid)
|
||||
return s.gossipDataNoLock(pid)
|
||||
}
|
||||
|
||||
// gossipData lock-free version of GossipData.
|
||||
func (s *GossipScorer) gossipData(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
|
||||
// gossipDataNoLock lock-free version of GossipData.
|
||||
func (s *GossipScorer) gossipDataNoLock(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.GossipScore, peerData.BehaviourPenalty, peerData.TopicScores, nil
|
||||
}
|
||||
|
||||
@@ -41,12 +41,12 @@ func newPeerStatusScorer(store *peerdata.Store, config *PeerStatusScorerConfig)
|
||||
func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.score(pid)
|
||||
return s.scoreNoLock(pid)
|
||||
}
|
||||
|
||||
// score is a lock-free version of Score.
|
||||
func (s *PeerStatusScorer) score(pid peer.ID) float64 {
|
||||
if s.isBadPeer(pid) {
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -70,11 +70,11 @@ func (s *PeerStatusScorer) score(pid peer.ID) float64 {
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeer(pid)
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeer is lock-free version of IsBadPeer.
|
||||
func (s *PeerStatusScorer) isBadPeer(pid peer.ID) bool {
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
@@ -100,7 +100,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeer(pid) {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
@@ -129,11 +129,11 @@ func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, val
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.peerStatus(pid)
|
||||
return s.peerStatusNoLock(pid)
|
||||
}
|
||||
|
||||
// peerStatus lock-free version of PeerStatus.
|
||||
func (s *PeerStatusScorer) peerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
// peerStatusNoLock lock-free version of PeerStatus.
|
||||
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.Status, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.ChainState == nil {
|
||||
return nil, peerdata.ErrNoPeerStatus
|
||||
|
||||
@@ -116,10 +116,10 @@ func (s *Service) ScoreNoLock(pid peer.ID) float64 {
|
||||
if _, ok := s.store.PeerData(pid); !ok {
|
||||
return 0
|
||||
}
|
||||
score += s.scorers.badResponsesScorer.score(pid) * s.scorerWeight(s.scorers.badResponsesScorer)
|
||||
score += s.scorers.blockProviderScorer.score(pid) * s.scorerWeight(s.scorers.blockProviderScorer)
|
||||
score += s.scorers.peerStatusScorer.score(pid) * s.scorerWeight(s.scorers.peerStatusScorer)
|
||||
score += s.scorers.gossipScorer.score(pid) * s.scorerWeight(s.scorers.gossipScorer)
|
||||
score += s.scorers.badResponsesScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.badResponsesScorer)
|
||||
score += s.scorers.blockProviderScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.blockProviderScorer)
|
||||
score += s.scorers.peerStatusScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.peerStatusScorer)
|
||||
score += s.scorers.gossipScorer.scoreNoLock(pid) * s.scorerWeight(s.scorers.gossipScorer)
|
||||
return math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor
|
||||
}
|
||||
|
||||
@@ -132,14 +132,14 @@ func (s *Service) IsBadPeer(pid peer.ID) bool {
|
||||
|
||||
// IsBadPeerNoLock is a lock-free version of IsBadPeer.
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
|
||||
if s.scorers.badResponsesScorer.isBadPeer(pid) {
|
||||
if s.scorers.badResponsesScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
}
|
||||
if s.scorers.peerStatusScorer.isBadPeer(pid) {
|
||||
if s.scorers.peerStatusScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
}
|
||||
if features.Get().EnablePeerScorer {
|
||||
if s.scorers.gossipScorer.isBadPeer(pid) {
|
||||
if s.scorers.gossipScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ const (
|
||||
// gossip parameters
|
||||
gossipSubMcacheLen = 6 // number of windows to retain full messages in cache for `IWANT` responses
|
||||
gossipSubMcacheGossip = 3 // number of windows to gossip about
|
||||
gossipSubSeenTTL = 550 // number of heartbeat intervals to retain message IDs
|
||||
gossipSubSeenTTL = 768 // number of seconds to retain message IDs ( 2 epochs)
|
||||
|
||||
// fanout ttl
|
||||
gossipSubFanoutTTL = 60000000000 // TTL for fanout maps for topics we are not subscribed to but have published to, in nano seconds
|
||||
@@ -165,7 +165,8 @@ func pubsubGossipParam() pubsub.GossipSubParams {
|
||||
// to configure our message id time-cache rather than instantiating
|
||||
// it with a router instance.
|
||||
func setPubSubParameters() {
|
||||
pubsub.TimeCacheDuration = 550 * gossipSubHeartbeatInterval
|
||||
seenTtl := 2 * time.Second * time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
pubsub.TimeCacheDuration = seenTtl
|
||||
}
|
||||
|
||||
// convert from libp2p's internal schema to a compatible prysm protobuf format.
|
||||
|
||||
@@ -82,44 +82,50 @@ type Service struct {
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
// connections are made until the Start function is called during the service registry startup.
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
var err error
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop().
|
||||
|
||||
cfg = validateConfig(cfg)
|
||||
privKey, err := privKey(cfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to generate p2p private key")
|
||||
}
|
||||
|
||||
metaData, err := metaDataFromConfig(cfg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create peer metadata")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addrFilter, err := configureFilter(cfg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create address filter")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ipLimiter := leakybucket.NewCollector(ipLimit, ipBurst, 30*time.Second, true /* deleteEmptyBuckets */)
|
||||
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: cfg,
|
||||
addrFilter: addrFilter,
|
||||
ipLimiter: ipLimiter,
|
||||
privKey: privKey,
|
||||
metaData: metaData,
|
||||
isPreGenesis: true,
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
}
|
||||
|
||||
s.cfg = validateConfig(s.cfg)
|
||||
|
||||
dv5Nodes := parseBootStrapAddrs(s.cfg.BootstrapNodeAddr)
|
||||
|
||||
cfg.Discv5BootStrapAddr = dv5Nodes
|
||||
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
s.privKey, err = privKey(s.cfg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to generate p2p private key")
|
||||
return nil, err
|
||||
}
|
||||
s.metaData, err = metaDataFromConfig(s.cfg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create peer metadata")
|
||||
return nil, err
|
||||
}
|
||||
s.addrFilter, err = configureFilter(s.cfg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create address filter")
|
||||
return nil, err
|
||||
}
|
||||
s.ipLimiter = leakybucket.NewCollector(ipLimit, ipBurst, 30*time.Second, true /* deleteEmptyBuckets */)
|
||||
|
||||
opts := s.buildOptions(ipAddr, s.privKey)
|
||||
opts, err := s.buildOptions(ipAddr, s.privKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to build p2p options")
|
||||
}
|
||||
// Sets mplex timeouts
|
||||
configureMplex()
|
||||
h, err := libp2p.New(opts...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create p2p host")
|
||||
@@ -285,7 +291,7 @@ func (s *Service) Started() bool {
|
||||
}
|
||||
|
||||
// Encoding returns the configured networking encoding.
|
||||
func (_ *Service) Encoding() encoder.NetworkEncoding {
|
||||
func (*Service) Encoding() encoder.NetworkEncoding {
|
||||
return &encoder.SszNetworkEncoder{}
|
||||
}
|
||||
|
||||
@@ -451,8 +457,8 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
|
||||
}
|
||||
|
||||
func (s *Service) connectToBootnodes() error {
|
||||
nodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddr))
|
||||
for _, addr := range s.cfg.Discv5BootStrapAddr {
|
||||
nodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
|
||||
for _, addr := range s.cfg.Discv5BootStrapAddrs {
|
||||
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -213,10 +213,9 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
// setup other nodes.
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg = &Config{
|
||||
BootstrapNodeAddr: []string{bootNode.String()},
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
ClockWaiter: cs,
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
for i := 1; i <= 5; i++ {
|
||||
h, pkey, ipAddr := createHost(t, port+i)
|
||||
|
||||
@@ -46,9 +46,13 @@ const syncLockerVal = 100
|
||||
const blobSubnetLockerVal = 110
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then we try to connect
|
||||
// with those peers. This method will block until the required amount of
|
||||
// peers are found, the method only exits in the event of context timeouts.
|
||||
// subscribed to a particular subnet. Then it tries to connect
|
||||
// with those peers. This method will block until either:
|
||||
// - the required amount of peers are found, or
|
||||
// - the context is terminated.
|
||||
// On some edge cases, this method may hang indefinitely while peers
|
||||
// are actually found. In such a case, the user should cancel the context
|
||||
// and re-run the method again.
|
||||
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
index uint64, threshold int) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
@@ -73,9 +77,9 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
return false, errors.New("no subnet exists for provided topic")
|
||||
}
|
||||
|
||||
currNum := len(s.pubsub.ListPeers(topic))
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
currNum := len(s.pubsub.ListPeers(topic))
|
||||
if currNum >= threshold {
|
||||
break
|
||||
}
|
||||
@@ -99,7 +103,6 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
}
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
currNum = len(s.pubsub.ListPeers(topic))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -110,18 +113,13 @@ func (s *Service) filterPeerForAttSubnet(index uint64) func(node *enode.Node) bo
|
||||
if !s.filterPeer(node) {
|
||||
return false
|
||||
}
|
||||
|
||||
subnets, err := attSubnets(node.Record())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
indExists := false
|
||||
for _, comIdx := range subnets {
|
||||
if comIdx == index {
|
||||
indExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return indExists
|
||||
|
||||
return subnets[index]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,8 +203,10 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
|
||||
//
|
||||
// return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)]
|
||||
func computeSubscribedSubnets(nodeID enode.ID, epoch primitives.Epoch) ([]uint64, error) {
|
||||
subs := []uint64{}
|
||||
for i := uint64(0); i < params.BeaconConfig().SubnetsPerNode; i++ {
|
||||
subnetsPerNode := params.BeaconConfig().SubnetsPerNode
|
||||
subs := make([]uint64, 0, subnetsPerNode)
|
||||
|
||||
for i := uint64(0); i < subnetsPerNode; i++ {
|
||||
sub, err := computeSubscribedSubnet(nodeID, epoch, i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -281,19 +281,20 @@ func initializeSyncCommSubnets(node *enode.LocalNode) *enode.LocalNode {
|
||||
|
||||
// Reads the attestation subnets entry from a node's ENR and determines
|
||||
// the committee indices of the attestation subnets the node is subscribed to.
|
||||
func attSubnets(record *enr.Record) ([]uint64, error) {
|
||||
func attSubnets(record *enr.Record) (map[uint64]bool, error) {
|
||||
bitV, err := attBitvector(record)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committeeIdxs := make(map[uint64]bool)
|
||||
// lint:ignore uintcast -- subnet count can be safely cast to int.
|
||||
if len(bitV) != byteCount(int(attestationSubnetCount)) {
|
||||
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
return committeeIdxs, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
}
|
||||
var committeeIdxs []uint64
|
||||
|
||||
for i := uint64(0); i < attestationSubnetCount; i++ {
|
||||
if bitV.BitAt(i) {
|
||||
committeeIdxs = append(committeeIdxs, i)
|
||||
committeeIdxs[i] = true
|
||||
}
|
||||
}
|
||||
return committeeIdxs, nil
|
||||
|
||||
@@ -3,49 +3,46 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
// This test needs to be entirely rewritten and should be done in a follow up PR from #7885.
|
||||
t.Skip("This test is now failing after PR 7885 due to false positive")
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 4
|
||||
flags.Init(gFlags)
|
||||
// Reset config.
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
s := &Service{
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
// Topology of this test:
|
||||
//
|
||||
//
|
||||
// Node 1 (subscribed to subnet 1) --\
|
||||
// |
|
||||
// Node 2 (subscribed to subnet 2) --+--> BootNode (not subscribed to any subnet) <------- Node 0 (not subscribed to any subnet)
|
||||
// |
|
||||
// Node 3 (subscribed to subnet 3) --/
|
||||
//
|
||||
// The purpose of this test is to ensure that the "Node 0" (connected only to the boot node) is able to
|
||||
// find and connect to a node already subscribed to a specific subnet.
|
||||
// In our case: The node i is subscribed to subnet i, with i = 1, 2, 3
|
||||
|
||||
// Define the genesis validators root, to ensure everybody is on the same network.
|
||||
const genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
|
||||
genesisValidatorsRoot, err := hex.DecodeString(genesisValidatorRootStr[2:])
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a context.
|
||||
ctx := context.Background()
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
// Use shorter period for testing.
|
||||
currentPeriod := pollingPeriod
|
||||
pollingPeriod = 1 * time.Second
|
||||
@@ -53,113 +50,150 @@ func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
|
||||
pollingPeriod = currentPeriod
|
||||
}()
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
// Create flags.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
|
||||
params.BeaconNetworkConfig().MinimumPeersInSubnetSearch = 1
|
||||
|
||||
// Reset config.
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
// First, generate a bootstrap node.
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
|
||||
bootNodeService := &Service{
|
||||
cfg: &Config{TCPPort: 2000, UDPPort: 3000},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
|
||||
bootNodeForkDigest, err := bootNodeService.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
bootListener, err := bootNodeService.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
bootNodeENR := bootListener.Self().String()
|
||||
|
||||
// Create 3 nodes, each subscribed to a different subnet.
|
||||
// Each node is connected to the boostrap node.
|
||||
services := make([]*Service, 0, 3)
|
||||
|
||||
for i := 1; i <= 3; i++ {
|
||||
port = 3000 + i
|
||||
cfg := &Config{
|
||||
BootstrapNodeAddr: []string{bootNode.String()},
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
UDPPort: uint(port),
|
||||
}
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s = &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
subnet := uint64(i)
|
||||
service, err := NewService(ctx, &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNodeENR},
|
||||
MaxPeers: 30,
|
||||
TCPPort: uint(2000 + i),
|
||||
UDPPort: uint(3000 + i),
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
|
||||
nodeForkDigest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, nodeForkDigest == bootNodeForkDigest, "fork digest of the node doesn't match the boot node")
|
||||
|
||||
// Start the service.
|
||||
service.Start()
|
||||
|
||||
// Set the ENR `attnets`, used by Prysm to filter peers by subnet.
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(uint64(i), true)
|
||||
|
||||
bitV.SetBitAt(subnet, true)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, &bitV)
|
||||
listener.LocalNode().Set(entry)
|
||||
listeners = append(listeners, listener)
|
||||
service.dv5Listener.LocalNode().Set(entry)
|
||||
|
||||
// Join and subscribe to the subnet, needed by libp2p.
|
||||
topic, err := service.pubsub.Join(fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet) + "/ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = topic.Subscribe()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Memoize the service.
|
||||
services = append(services, service)
|
||||
}
|
||||
|
||||
// Stop the services.
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
for _, listener := range listeners {
|
||||
listener.Close()
|
||||
for _, service := range services {
|
||||
err := service.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Make one service on port 4001.
|
||||
port = 4001
|
||||
gs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
BootstrapNodeAddr: []string{bootNode.String()},
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
UDPPort: uint(port),
|
||||
ClockWaiter: gs,
|
||||
Discv5BootStrapAddrs: []string{bootNodeENR},
|
||||
MaxPeers: 30,
|
||||
TCPPort: 2010,
|
||||
UDPPort: 3010,
|
||||
}
|
||||
s, err = NewService(context.Background(), cfg)
|
||||
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
s.Start()
|
||||
<-exitRoutine
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
|
||||
service.Start()
|
||||
defer func() {
|
||||
err := service.Stop()
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
|
||||
// Wait for the nodes to have their local routing tables to be populated with the other nodes
|
||||
time.Sleep(6 * discoveryWaitTime)
|
||||
// Look up 3 different subnets.
|
||||
exists := make([]bool, 0, 3)
|
||||
for i := 1; i <= 3; i++ {
|
||||
subnet := uint64(i)
|
||||
topic := fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet)
|
||||
|
||||
exist := false
|
||||
|
||||
// This for loop is used to ensure we don't get stuck in `FindPeersWithSubnet`.
|
||||
// Read the documentation of `FindPeersWithSubnet` for more details.
|
||||
for j := 0; j < 3; j++ {
|
||||
ctxWithTimeOut, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
exist, err = service.FindPeersWithSubnet(ctxWithTimeOut, topic, subnet, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
if exist {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
exists = append(exists, exist)
|
||||
|
||||
// look up 3 different subnets
|
||||
ctx := context.Background()
|
||||
exists, err := s.FindPeersWithSubnet(ctx, "", 1, flags.Get().MinimumPeersPerSubnet)
|
||||
require.NoError(t, err)
|
||||
exists2, err := s.FindPeersWithSubnet(ctx, "", 2, flags.Get().MinimumPeersPerSubnet)
|
||||
require.NoError(t, err)
|
||||
exists3, err := s.FindPeersWithSubnet(ctx, "", 3, flags.Get().MinimumPeersPerSubnet)
|
||||
require.NoError(t, err)
|
||||
if !exists || !exists2 || !exists3 {
|
||||
t.Fatal("Peer with subnet doesn't exist")
|
||||
}
|
||||
|
||||
// Update ENR of a peer.
|
||||
testService := &Service{
|
||||
dv5Listener: listeners[0],
|
||||
metaData: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
Attnets: bitfield.NewBitvector64(),
|
||||
}),
|
||||
// Check if all peers are found.
|
||||
for _, exist := range exists {
|
||||
require.Equal(t, true, exist, "Peer with subnet doesn't exist")
|
||||
}
|
||||
cache.SubnetIDs.AddAttesterSubnetID(0, 10)
|
||||
testService.RefreshENR()
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
exists, err = s.FindPeersWithSubnet(ctx, "", 2, flags.Get().MinimumPeersPerSubnet)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, true, exists, "Peer with subnet doesn't exist")
|
||||
assert.NoError(t, s.Stop())
|
||||
exitRoutine <- true
|
||||
}
|
||||
|
||||
func Test_AttSubnets(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
record func(t *testing.T) *enr.Record
|
||||
record func(localNode *enode.LocalNode) *enr.Record
|
||||
want []uint64
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "valid record",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
record: func(localNode *enode.LocalNode) *enr.Record {
|
||||
localNode = initializeAttSubnets(localNode)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
@@ -168,14 +202,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "too small subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
record: func(localNode *enode.LocalNode) *enr.Record {
|
||||
entry := enr.WithEntry(attSubnetEnrKey, []byte{})
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
@@ -186,14 +213,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "half sized subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
record: func(localNode *enode.LocalNode) *enr.Record {
|
||||
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, 4))
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
@@ -204,14 +224,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "too large subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
record: func(localNode *enode.LocalNode) *enr.Record {
|
||||
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, byteCount(int(attestationSubnetCount))+1))
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
@@ -222,14 +235,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "very large subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
record: func(localNode *enode.LocalNode) *enr.Record {
|
||||
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, byteCount(int(attestationSubnetCount))+100))
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
@@ -240,14 +246,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "single subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
record: func(localNode *enode.LocalNode) *enr.Record {
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(0, true)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, bitV.Bytes())
|
||||
@@ -259,17 +258,10 @@ func Test_AttSubnets(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "multiple subnets",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
record: func(localNode *enode.LocalNode) *enr.Record {
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for i := uint64(0); i < bitV.Len(); i++ {
|
||||
// skip 2 subnets
|
||||
// Keep only odd subnets.
|
||||
if (i+1)%2 == 0 {
|
||||
continue
|
||||
}
|
||||
@@ -287,14 +279,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "all subnets",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
record: func(localNode *enode.LocalNode) *enr.Record {
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for i := uint64(0); i < bitV.Len(); i++ {
|
||||
bitV.SetBitAt(i, true)
|
||||
@@ -311,16 +296,35 @@ func Test_AttSubnets(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := attSubnets(tt.record(t))
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
record := tt.record(localNode)
|
||||
|
||||
got, err := attSubnets(record)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("syncSubnets() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if tt.wantErr {
|
||||
assert.ErrorContains(t, tt.errContains, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("syncSubnets() got = %v, want %v", got, tt.want)
|
||||
|
||||
want := make(map[uint64]bool, len(tt.want))
|
||||
for _, subnet := range tt.want {
|
||||
want[subnet] = true
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("syncSubnets() got = %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -54,38 +54,45 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
return privKeyFromFile(cfg.PrivateKey)
|
||||
}
|
||||
|
||||
// Default keys have the next highest precedence, if they exist.
|
||||
_, err := os.Stat(defaultKeyPath)
|
||||
defaultKeysExist := !os.IsNotExist(err)
|
||||
if err != nil && defaultKeysExist {
|
||||
return nil, err
|
||||
}
|
||||
// Default keys have the next highest precedence, if they exist.
|
||||
|
||||
if defaultKeysExist {
|
||||
return privKeyFromFile(defaultKeyPath)
|
||||
}
|
||||
|
||||
// There are no keys on the filesystem, so we need to generate one.
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If the StaticPeerID flag is set, save the generated key as the default
|
||||
// key, so that it will be used by default on the next node start.
|
||||
if cfg.StaticPeerID {
|
||||
rawbytes, err := priv.Raw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dst := make([]byte, hex.EncodedLen(len(rawbytes)))
|
||||
hex.Encode(dst, rawbytes)
|
||||
if err := file.WriteFile(defaultKeyPath, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Infof("Wrote network key to file")
|
||||
// Read the key from the defaultKeyPath file just written
|
||||
// for the strongest guarantee that the next start will be the same as this one.
|
||||
return privKeyFromFile(defaultKeyPath)
|
||||
|
||||
// If the StaticPeerID flag is not set, return the private key.
|
||||
if !cfg.StaticPeerID {
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
}
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
|
||||
// Save the generated key as the default key, so that it will be used by
|
||||
// default on the next node start.
|
||||
rawbytes, err := priv.Raw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dst := make([]byte, hex.EncodedLen(len(rawbytes)))
|
||||
hex.Encode(dst, rawbytes)
|
||||
if err := file.WriteFile(defaultKeyPath, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info("Wrote network key to file")
|
||||
// Read the key from the defaultKeyPath file just written
|
||||
// for the strongest guarantee that the next start will be the same as this one.
|
||||
return privKeyFromFile(defaultKeyPath)
|
||||
}
|
||||
|
||||
// Retrieves a p2p networking private key from a file path.
|
||||
|
||||
@@ -25,7 +25,7 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
time2 "time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
@@ -23,7 +24,6 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -124,86 +124,93 @@ func (s *Server) StreamEvents(w http.ResponseWriter, r *http.Request) {
|
||||
// stalling while waiting for the first response chunk.
|
||||
// After that we send a keepalive dummy message every SECONDS_PER_SLOT
|
||||
// to prevent anyone (e.g. proxy servers) from closing connections.
|
||||
sendKeepalive(w, flusher)
|
||||
if err := sendKeepalive(w, flusher); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
keepaliveTicker := time2.NewTicker(time2.Duration(params.BeaconConfig().SecondsPerSlot) * time2.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-opsChan:
|
||||
handleBlockOperationEvents(w, flusher, topicsMap, event)
|
||||
if err := handleBlockOperationEvents(w, flusher, topicsMap, event); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
case event := <-stateChan:
|
||||
s.handleStateEvents(ctx, w, flusher, topicsMap, event)
|
||||
if err := s.handleStateEvents(ctx, w, flusher, topicsMap, event); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
case <-keepaliveTicker.C:
|
||||
sendKeepalive(w, flusher)
|
||||
if err := sendKeepalive(w, flusher); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) {
|
||||
func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) error {
|
||||
switch event.Type {
|
||||
case operation.AggregatedAttReceived:
|
||||
if _, ok := requestedTopics[AttestationTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
attData, ok := event.Data.(*operation.AggregatedAttReceivedData)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
|
||||
}
|
||||
att := structs.AttFromConsensus(attData.Attestation.Aggregate)
|
||||
send(w, flusher, AttestationTopic, att)
|
||||
return send(w, flusher, AttestationTopic, att)
|
||||
case operation.UnaggregatedAttReceived:
|
||||
if _, ok := requestedTopics[AttestationTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
attData, ok := event.Data.(*operation.UnAggregatedAttReceivedData)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
|
||||
}
|
||||
att := structs.AttFromConsensus(attData.Attestation)
|
||||
send(w, flusher, AttestationTopic, att)
|
||||
return send(w, flusher, AttestationTopic, att)
|
||||
case operation.ExitReceived:
|
||||
if _, ok := requestedTopics[VoluntaryExitTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
exitData, ok := event.Data.(*operation.ExitReceivedData)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, VoluntaryExitTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, VoluntaryExitTopic)
|
||||
}
|
||||
exit := structs.SignedExitFromConsensus(exitData.Exit)
|
||||
send(w, flusher, VoluntaryExitTopic, exit)
|
||||
return send(w, flusher, VoluntaryExitTopic, exit)
|
||||
case operation.SyncCommitteeContributionReceived:
|
||||
if _, ok := requestedTopics[SyncCommitteeContributionTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
contributionData, ok := event.Data.(*operation.SyncCommitteeContributionReceivedData)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, SyncCommitteeContributionTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, SyncCommitteeContributionTopic)
|
||||
}
|
||||
contribution := structs.SignedContributionAndProofFromConsensus(contributionData.Contribution)
|
||||
send(w, flusher, SyncCommitteeContributionTopic, contribution)
|
||||
return send(w, flusher, SyncCommitteeContributionTopic, contribution)
|
||||
case operation.BLSToExecutionChangeReceived:
|
||||
if _, ok := requestedTopics[BLSToExecutionChangeTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
changeData, ok := event.Data.(*operation.BLSToExecutionChangeReceivedData)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, BLSToExecutionChangeTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, BLSToExecutionChangeTopic)
|
||||
}
|
||||
send(w, flusher, BLSToExecutionChangeTopic, structs.SignedBLSChangeFromConsensus(changeData.Change))
|
||||
return send(w, flusher, BLSToExecutionChangeTopic, structs.SignedBLSChangeFromConsensus(changeData.Change))
|
||||
case operation.BlobSidecarReceived:
|
||||
if _, ok := requestedTopics[BlobSidecarTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
blobData, ok := event.Data.(*operation.BlobSidecarReceivedData)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, BlobSidecarTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, BlobSidecarTopic)
|
||||
}
|
||||
versionedHash := blockchain.ConvertKzgCommitmentToVersionedHash(blobData.Blob.KzgCommitment)
|
||||
blobEvent := &structs.BlobSidecarEvent{
|
||||
@@ -213,38 +220,36 @@ func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, req
|
||||
VersionedHash: versionedHash.String(),
|
||||
KzgCommitment: hexutil.Encode(blobData.Blob.KzgCommitment),
|
||||
}
|
||||
send(w, flusher, BlobSidecarTopic, blobEvent)
|
||||
return send(w, flusher, BlobSidecarTopic, blobEvent)
|
||||
case operation.AttesterSlashingReceived:
|
||||
if _, ok := requestedTopics[AttesterSlashingTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
attesterSlashingData, ok := event.Data.(*operation.AttesterSlashingReceivedData)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, AttesterSlashingTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, AttesterSlashingTopic)
|
||||
}
|
||||
send(w, flusher, AttesterSlashingTopic, structs.AttesterSlashingFromConsensus(attesterSlashingData.AttesterSlashing))
|
||||
return send(w, flusher, AttesterSlashingTopic, structs.AttesterSlashingFromConsensus(attesterSlashingData.AttesterSlashing))
|
||||
case operation.ProposerSlashingReceived:
|
||||
if _, ok := requestedTopics[ProposerSlashingTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
proposerSlashingData, ok := event.Data.(*operation.ProposerSlashingReceivedData)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, ProposerSlashingTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, ProposerSlashingTopic)
|
||||
}
|
||||
send(w, flusher, ProposerSlashingTopic, structs.ProposerSlashingFromConsensus(proposerSlashingData.ProposerSlashing))
|
||||
return send(w, flusher, ProposerSlashingTopic, structs.ProposerSlashingFromConsensus(proposerSlashingData.ProposerSlashing))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) {
|
||||
func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) error {
|
||||
switch event.Type {
|
||||
case statefeed.NewHead:
|
||||
if _, ok := requestedTopics[HeadTopic]; ok {
|
||||
headData, ok := event.Data.(*ethpb.EventHead)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, HeadTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, HeadTopic)
|
||||
}
|
||||
head := &structs.HeadEvent{
|
||||
Slot: fmt.Sprintf("%d", headData.Slot),
|
||||
@@ -255,23 +260,22 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
|
||||
PreviousDutyDependentRoot: hexutil.Encode(headData.PreviousDutyDependentRoot),
|
||||
CurrentDutyDependentRoot: hexutil.Encode(headData.CurrentDutyDependentRoot),
|
||||
}
|
||||
send(w, flusher, HeadTopic, head)
|
||||
return send(w, flusher, HeadTopic, head)
|
||||
}
|
||||
if _, ok := requestedTopics[PayloadAttributesTopic]; ok {
|
||||
s.sendPayloadAttributes(ctx, w, flusher)
|
||||
return s.sendPayloadAttributes(ctx, w, flusher)
|
||||
}
|
||||
case statefeed.MissedSlot:
|
||||
if _, ok := requestedTopics[PayloadAttributesTopic]; ok {
|
||||
s.sendPayloadAttributes(ctx, w, flusher)
|
||||
return s.sendPayloadAttributes(ctx, w, flusher)
|
||||
}
|
||||
case statefeed.FinalizedCheckpoint:
|
||||
if _, ok := requestedTopics[FinalizedCheckpointTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
checkpointData, ok := event.Data.(*ethpb.EventFinalizedCheckpoint)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, FinalizedCheckpointTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, FinalizedCheckpointTopic)
|
||||
}
|
||||
checkpoint := &structs.FinalizedCheckpointEvent{
|
||||
Block: hexutil.Encode(checkpointData.Block),
|
||||
@@ -279,15 +283,14 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
|
||||
Epoch: fmt.Sprintf("%d", checkpointData.Epoch),
|
||||
ExecutionOptimistic: checkpointData.ExecutionOptimistic,
|
||||
}
|
||||
send(w, flusher, FinalizedCheckpointTopic, checkpoint)
|
||||
return send(w, flusher, FinalizedCheckpointTopic, checkpoint)
|
||||
case statefeed.LightClientFinalityUpdate:
|
||||
if _, ok := requestedTopics[LightClientFinalityUpdateTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
updateData, ok := event.Data.(*ethpbv2.LightClientFinalityUpdateWithVersion)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, LightClientFinalityUpdateTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, LightClientFinalityUpdateTopic)
|
||||
}
|
||||
|
||||
var finalityBranch []string
|
||||
@@ -318,15 +321,14 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
|
||||
SignatureSlot: fmt.Sprintf("%d", updateData.Data.SignatureSlot),
|
||||
},
|
||||
}
|
||||
send(w, flusher, LightClientFinalityUpdateTopic, update)
|
||||
return send(w, flusher, LightClientFinalityUpdateTopic, update)
|
||||
case statefeed.LightClientOptimisticUpdate:
|
||||
if _, ok := requestedTopics[LightClientOptimisticUpdateTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
updateData, ok := event.Data.(*ethpbv2.LightClientOptimisticUpdateWithVersion)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, LightClientOptimisticUpdateTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, LightClientOptimisticUpdateTopic)
|
||||
}
|
||||
update := &structs.LightClientOptimisticUpdateEvent{
|
||||
Version: version.String(int(updateData.Version)),
|
||||
@@ -345,15 +347,14 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
|
||||
SignatureSlot: fmt.Sprintf("%d", updateData.Data.SignatureSlot),
|
||||
},
|
||||
}
|
||||
send(w, flusher, LightClientOptimisticUpdateTopic, update)
|
||||
return send(w, flusher, LightClientOptimisticUpdateTopic, update)
|
||||
case statefeed.Reorg:
|
||||
if _, ok := requestedTopics[ChainReorgTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
reorgData, ok := event.Data.(*ethpb.EventChainReorg)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, ChainReorgTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, ChainReorgTopic)
|
||||
}
|
||||
reorg := &structs.ChainReorgEvent{
|
||||
Slot: fmt.Sprintf("%d", reorgData.Slot),
|
||||
@@ -365,78 +366,69 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
|
||||
Epoch: fmt.Sprintf("%d", reorgData.Epoch),
|
||||
ExecutionOptimistic: reorgData.ExecutionOptimistic,
|
||||
}
|
||||
send(w, flusher, ChainReorgTopic, reorg)
|
||||
return send(w, flusher, ChainReorgTopic, reorg)
|
||||
case statefeed.BlockProcessed:
|
||||
if _, ok := requestedTopics[BlockTopic]; !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
blkData, ok := event.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, BlockTopic)
|
||||
return
|
||||
return write(w, flusher, topicDataMismatch, event.Data, BlockTopic)
|
||||
}
|
||||
blockRoot, err := blkData.SignedBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get block root: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get block root: "+err.Error())
|
||||
}
|
||||
blk := &structs.BlockEvent{
|
||||
Slot: fmt.Sprintf("%d", blkData.Slot),
|
||||
Block: hexutil.Encode(blockRoot[:]),
|
||||
ExecutionOptimistic: blkData.Optimistic,
|
||||
}
|
||||
send(w, flusher, BlockTopic, blk)
|
||||
return send(w, flusher, BlockTopic, blk)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This event stream is intended to be used by builders and relays.
|
||||
// Parent fields are based on state at N_{current_slot}, while the rest of fields are based on state of N_{current_slot + 1}
|
||||
func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWriter, flusher http.Flusher) {
|
||||
func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWriter, flusher http.Flusher) error {
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get head root: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get head root: "+err.Error())
|
||||
}
|
||||
st, err := s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get head state: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get head state: "+err.Error())
|
||||
}
|
||||
// advance the head state
|
||||
headState, err := transition.ProcessSlotsIfPossible(ctx, st, s.ChainInfoFetcher.CurrentSlot()+1)
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not advance head state: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not advance head state: "+err.Error())
|
||||
}
|
||||
|
||||
headBlock, err := s.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get head block: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get head block: "+err.Error())
|
||||
}
|
||||
|
||||
headPayload, err := headBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get execution payload: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get execution payload: "+err.Error())
|
||||
}
|
||||
|
||||
t, err := slots.ToTime(headState.GenesisTime(), headState.Slot())
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get head state slot time: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get head state slot time: "+err.Error())
|
||||
}
|
||||
|
||||
prevRando, err := helpers.RandaoMix(headState, time.CurrentEpoch(headState))
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get head state randao mix: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get head state randao mix: "+err.Error())
|
||||
}
|
||||
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(ctx, headState)
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get head state proposer index: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get head state proposer index: "+err.Error())
|
||||
}
|
||||
|
||||
var attributes interface{}
|
||||
@@ -450,8 +442,7 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
|
||||
case version.Capella:
|
||||
withdrawals, err := headState.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
|
||||
}
|
||||
attributes = &structs.PayloadAttributesV2{
|
||||
Timestamp: fmt.Sprintf("%d", t.Unix()),
|
||||
@@ -462,13 +453,11 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
|
||||
case version.Deneb:
|
||||
withdrawals, err := headState.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
|
||||
}
|
||||
parentRoot, err := headBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not get head block root: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not get head block root: "+err.Error())
|
||||
}
|
||||
attributes = &structs.PayloadAttributesV3{
|
||||
Timestamp: fmt.Sprintf("%d", t.Unix()),
|
||||
@@ -478,14 +467,12 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
|
||||
ParentBeaconBlockRoot: hexutil.Encode(parentRoot[:]),
|
||||
}
|
||||
default:
|
||||
write(w, flusher, "Payload version %s is not supported", version.String(headState.Version()))
|
||||
return
|
||||
return write(w, flusher, "Payload version %s is not supported", version.String(headState.Version()))
|
||||
}
|
||||
|
||||
attributesBytes, err := json.Marshal(attributes)
|
||||
if err != nil {
|
||||
write(w, flusher, err.Error())
|
||||
return
|
||||
return write(w, flusher, err.Error())
|
||||
}
|
||||
eventData := structs.PayloadAttributesEventData{
|
||||
ProposerIndex: fmt.Sprintf("%d", proposerIndex),
|
||||
@@ -497,32 +484,31 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
|
||||
}
|
||||
eventDataBytes, err := json.Marshal(eventData)
|
||||
if err != nil {
|
||||
write(w, flusher, err.Error())
|
||||
return
|
||||
return write(w, flusher, err.Error())
|
||||
}
|
||||
send(w, flusher, PayloadAttributesTopic, &structs.PayloadAttributesEvent{
|
||||
return send(w, flusher, PayloadAttributesTopic, &structs.PayloadAttributesEvent{
|
||||
Version: version.String(headState.Version()),
|
||||
Data: eventDataBytes,
|
||||
})
|
||||
}
|
||||
|
||||
func send(w http.ResponseWriter, flusher http.Flusher, name string, data interface{}) {
|
||||
func send(w http.ResponseWriter, flusher http.Flusher, name string, data interface{}) error {
|
||||
j, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
write(w, flusher, "Could not marshal event to JSON: "+err.Error())
|
||||
return
|
||||
return write(w, flusher, "Could not marshal event to JSON: "+err.Error())
|
||||
}
|
||||
write(w, flusher, "event: %s\ndata: %s\n\n", name, string(j))
|
||||
return write(w, flusher, "event: %s\ndata: %s\n\n", name, string(j))
|
||||
}
|
||||
|
||||
func sendKeepalive(w http.ResponseWriter, flusher http.Flusher) {
|
||||
write(w, flusher, ":\n\n")
|
||||
func sendKeepalive(w http.ResponseWriter, flusher http.Flusher) error {
|
||||
return write(w, flusher, ":\n\n")
|
||||
}
|
||||
|
||||
func write(w http.ResponseWriter, flusher http.Flusher, format string, a ...any) {
|
||||
func write(w http.ResponseWriter, flusher http.Flusher, format string, a ...any) error {
|
||||
_, err := fmt.Fprintf(w, format, a...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not write to response writer")
|
||||
return errors.Wrap(err, "could not write to response writer")
|
||||
}
|
||||
flusher.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,8 +18,10 @@ go_library(
|
||||
"@com_github_golang_protobuf//ptypes/timestamp",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
],
|
||||
@@ -45,7 +47,9 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//reflection:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
|
||||
@@ -6,7 +6,9 @@ package node
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
@@ -21,8 +23,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/io/logs"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
@@ -45,6 +49,35 @@ type Server struct {
|
||||
BeaconMonitoringPort int
|
||||
}
|
||||
|
||||
// GetHealth checks the health of the node
|
||||
func (ns *Server) GetHealth(ctx context.Context, request *ethpb.HealthRequest) (*empty.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "node.GetHealth")
|
||||
defer span.End()
|
||||
|
||||
// Set a timeout for the health check operation
|
||||
timeoutDuration := 10 * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, timeoutDuration)
|
||||
defer cancel() // Important to avoid a context leak
|
||||
|
||||
if ns.SyncChecker.Synced() {
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
if ns.SyncChecker.Syncing() || ns.SyncChecker.Initialized() {
|
||||
if request.SyncingStatus != 0 {
|
||||
// override the 200 success with the provided request status
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(request.SyncingStatus, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
return &empty.Empty{}, status.Errorf(codes.Unavailable, "service unavailable")
|
||||
}
|
||||
|
||||
// GetSyncStatus checks the current network sync status of the node.
|
||||
func (ns *Server) GetSyncStatus(_ context.Context, _ *empty.Empty) (*ethpb.SyncStatus, error) {
|
||||
return ðpb.SyncStatus{
|
||||
|
||||
@@ -3,12 +3,14 @@ package node
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
@@ -22,6 +24,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
@@ -170,3 +173,53 @@ func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) {
|
||||
assert.Equal(t, ep, res.CurrentAddress)
|
||||
assert.Equal(t, errStr, res.CurrentConnectionError)
|
||||
}
|
||||
|
||||
func TestNodeServer_GetHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input *mockSync.Sync
|
||||
customStatus uint64
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||
},
|
||||
{
|
||||
name: "syncing",
|
||||
input: &mockSync.Sync{IsSyncing: false},
|
||||
wantedErr: "service unavailable",
|
||||
},
|
||||
{
|
||||
name: "custom sync status",
|
||||
input: &mockSync.Sync{IsSyncing: true},
|
||||
customStatus: 206,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
server := grpc.NewServer()
|
||||
ns := &Server{
|
||||
SyncChecker: tt.input,
|
||||
}
|
||||
ethpb.RegisterNodeServer(server, ns)
|
||||
reflection.Register(server)
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
|
||||
_, err := ns.GetHealth(ctx, ðpb.HealthRequest{SyncingStatus: tt.customStatus})
|
||||
if tt.wantedErr == "" {
|
||||
require.NoError(t, err)
|
||||
return
|
||||
}
|
||||
if tt.customStatus != 0 {
|
||||
// Assuming the call was successful, now extract the headers
|
||||
headers, _ := metadata.FromIncomingContext(ctx)
|
||||
// Check for the specific header
|
||||
values, ok := headers["x-http-code"]
|
||||
require.Equal(t, true, ok && len(values) > 0)
|
||||
require.Equal(t, fmt.Sprintf("%d", tt.customStatus), values[0])
|
||||
|
||||
}
|
||||
require.ErrorContains(t, tt.wantedErr, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
@@ -39,6 +40,12 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
func setFeeRecipientIfBurnAddress(val *cache.TrackedValidator) {
|
||||
if val.FeeRecipient == primitives.ExecutionAddress([20]byte{}) && val.Index == 0 {
|
||||
val.FeeRecipient = primitives.ExecutionAddress(params.BeaconConfig().DefaultFeeRecipient)
|
||||
}
|
||||
}
|
||||
|
||||
// This returns the local execution payload of a given slot. The function has full awareness of pre and post merge.
|
||||
func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) (interfaces.ExecutionData, bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.getLocalPayload")
|
||||
@@ -62,6 +69,7 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
if !tracked {
|
||||
logrus.WithFields(logFields).Warn("could not find tracked proposer index")
|
||||
}
|
||||
setFeeRecipientIfBurnAddress(&val)
|
||||
|
||||
var err error
|
||||
if ok && payloadId != [8]byte{} {
|
||||
|
||||
@@ -383,3 +383,16 @@ func TestServer_getTerminalBlockHashIfExists(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetFeeRecipientIfBurnAddress(t *testing.T) {
|
||||
val := &cache.TrackedValidator{Index: 1}
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DefaultFeeRecipient = common.Address([20]byte{'a'})
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.NotEqual(t, common.Address(val.FeeRecipient), params.BeaconConfig().DefaultFeeRecipient)
|
||||
setFeeRecipientIfBurnAddress(val)
|
||||
require.NotEqual(t, common.Address(val.FeeRecipient), params.BeaconConfig().DefaultFeeRecipient)
|
||||
val.Index = 0
|
||||
setFeeRecipientIfBurnAddress(val)
|
||||
require.Equal(t, common.Address(val.FeeRecipient), params.BeaconConfig().DefaultFeeRecipient)
|
||||
}
|
||||
|
||||
@@ -2697,10 +2697,13 @@ func TestProposer_PrepareBeaconProposer(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
require.Equal(t, false, proposerServer.TrackedValidatorsCache.Validating())
|
||||
_, err := proposerServer.PrepareBeaconProposer(ctx, tt.args.request)
|
||||
if tt.wantErr != "" {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
return
|
||||
} else {
|
||||
require.Equal(t, true, proposerServer.TrackedValidatorsCache.Validating())
|
||||
}
|
||||
require.NoError(t, err)
|
||||
val, tracked := proposerServer.TrackedValidatorsCache.Validator(1)
|
||||
|
||||
@@ -189,7 +189,7 @@ func TestLoadBlocks_FirstBranch(t *testing.T) {
|
||||
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
|
||||
require.NoError(t, err)
|
||||
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 8, roots[len(roots)-1])
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 9, roots[len(roots)-1])
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := []*ethpb.SignedBeaconBlock{
|
||||
@@ -220,7 +220,7 @@ func TestLoadBlocks_SecondBranch(t *testing.T) {
|
||||
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
|
||||
require.NoError(t, err)
|
||||
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 5, roots[5])
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 6, roots[5])
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := []*ethpb.SignedBeaconBlock{
|
||||
@@ -249,7 +249,7 @@ func TestLoadBlocks_ThirdBranch(t *testing.T) {
|
||||
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
|
||||
require.NoError(t, err)
|
||||
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 7, roots[7])
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 8, roots[7])
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := []*ethpb.SignedBeaconBlock{
|
||||
@@ -280,7 +280,7 @@ func TestLoadBlocks_SameSlots(t *testing.T) {
|
||||
roots, savedBlocks, err := tree2(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
|
||||
require.NoError(t, err)
|
||||
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 3, roots[6])
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 4, roots[6])
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := []*ethpb.SignedBeaconBlock{
|
||||
@@ -309,7 +309,7 @@ func TestLoadBlocks_SameEndSlots(t *testing.T) {
|
||||
roots, savedBlocks, err := tree3(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
|
||||
require.NoError(t, err)
|
||||
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 2, roots[2])
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 3, roots[2])
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := []*ethpb.SignedBeaconBlock{
|
||||
@@ -337,7 +337,7 @@ func TestLoadBlocks_SameEndSlotsWith2blocks(t *testing.T) {
|
||||
roots, savedBlocks, err := tree4(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
|
||||
require.NoError(t, err)
|
||||
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 2, roots[1])
|
||||
filteredBlocks, err := s.loadBlocks(ctx, 0, 3, roots[1])
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := []*ethpb.SignedBeaconBlock{
|
||||
@@ -363,7 +363,7 @@ func TestLoadBlocks_BadStart(t *testing.T) {
|
||||
|
||||
roots, _, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
|
||||
require.NoError(t, err)
|
||||
_, err = s.loadBlocks(ctx, 0, 5, roots[8])
|
||||
_, err = s.loadBlocks(ctx, 0, 6, roots[8])
|
||||
assert.ErrorContains(t, "end block roots don't match", err)
|
||||
}
|
||||
|
||||
@@ -374,63 +374,63 @@ func TestLoadBlocks_BadStart(t *testing.T) {
|
||||
// \- B7
|
||||
func tree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
|
||||
b0 := util.NewBeaconBlock()
|
||||
b0.Block.Slot = 0
|
||||
b0.Block.Slot = 1
|
||||
b0.Block.ParentRoot = genesisRoot
|
||||
r0, err := b0.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.Slot = 2
|
||||
b1.Block.ParentRoot = r0[:]
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 2
|
||||
b2.Block.Slot = 3
|
||||
b2.Block.ParentRoot = r1[:]
|
||||
r2, err := b2.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b3 := util.NewBeaconBlock()
|
||||
b3.Block.Slot = 3
|
||||
b3.Block.Slot = 4
|
||||
b3.Block.ParentRoot = r1[:]
|
||||
r3, err := b3.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b4 := util.NewBeaconBlock()
|
||||
b4.Block.Slot = 4
|
||||
b4.Block.Slot = 5
|
||||
b4.Block.ParentRoot = r2[:]
|
||||
r4, err := b4.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b5 := util.NewBeaconBlock()
|
||||
b5.Block.Slot = 5
|
||||
b5.Block.Slot = 6
|
||||
b5.Block.ParentRoot = r3[:]
|
||||
r5, err := b5.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b6 := util.NewBeaconBlock()
|
||||
b6.Block.Slot = 6
|
||||
b6.Block.Slot = 7
|
||||
b6.Block.ParentRoot = r4[:]
|
||||
r6, err := b6.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b7 := util.NewBeaconBlock()
|
||||
b7.Block.Slot = 7
|
||||
b7.Block.Slot = 8
|
||||
b7.Block.ParentRoot = r6[:]
|
||||
r7, err := b7.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b8 := util.NewBeaconBlock()
|
||||
b8.Block.Slot = 8
|
||||
b8.Block.Slot = 9
|
||||
b8.Block.ParentRoot = r6[:]
|
||||
r8, err := b8.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -466,21 +466,21 @@ func tree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
// \- B2 -- B3
|
||||
func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
|
||||
b0 := util.NewBeaconBlock()
|
||||
b0.Block.Slot = 0
|
||||
b0.Block.Slot = 1
|
||||
b0.Block.ParentRoot = genesisRoot
|
||||
r0, err := b0.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.Slot = 2
|
||||
b1.Block.ParentRoot = r0[:]
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b21 := util.NewBeaconBlock()
|
||||
b21.Block.Slot = 2
|
||||
b21.Block.Slot = 3
|
||||
b21.Block.ParentRoot = r1[:]
|
||||
b21.Block.StateRoot = bytesutil.PadTo([]byte{'A'}, 32)
|
||||
r21, err := b21.Block.HashTreeRoot()
|
||||
@@ -488,7 +488,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
return nil, nil, err
|
||||
}
|
||||
b22 := util.NewBeaconBlock()
|
||||
b22.Block.Slot = 2
|
||||
b22.Block.Slot = 3
|
||||
b22.Block.ParentRoot = r1[:]
|
||||
b22.Block.StateRoot = bytesutil.PadTo([]byte{'B'}, 32)
|
||||
r22, err := b22.Block.HashTreeRoot()
|
||||
@@ -496,7 +496,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
return nil, nil, err
|
||||
}
|
||||
b23 := util.NewBeaconBlock()
|
||||
b23.Block.Slot = 2
|
||||
b23.Block.Slot = 3
|
||||
b23.Block.ParentRoot = r1[:]
|
||||
b23.Block.StateRoot = bytesutil.PadTo([]byte{'C'}, 32)
|
||||
r23, err := b23.Block.HashTreeRoot()
|
||||
@@ -504,7 +504,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
return nil, nil, err
|
||||
}
|
||||
b24 := util.NewBeaconBlock()
|
||||
b24.Block.Slot = 2
|
||||
b24.Block.Slot = 3
|
||||
b24.Block.ParentRoot = r1[:]
|
||||
b24.Block.StateRoot = bytesutil.PadTo([]byte{'D'}, 32)
|
||||
r24, err := b24.Block.HashTreeRoot()
|
||||
@@ -512,7 +512,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
return nil, nil, err
|
||||
}
|
||||
b3 := util.NewBeaconBlock()
|
||||
b3.Block.Slot = 3
|
||||
b3.Block.Slot = 4
|
||||
b3.Block.ParentRoot = r24[:]
|
||||
r3, err := b3.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -549,21 +549,21 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
// \- B2
|
||||
func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
|
||||
b0 := util.NewBeaconBlock()
|
||||
b0.Block.Slot = 0
|
||||
b0.Block.Slot = 1
|
||||
b0.Block.ParentRoot = genesisRoot
|
||||
r0, err := b0.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.Slot = 2
|
||||
b1.Block.ParentRoot = r0[:]
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b21 := util.NewBeaconBlock()
|
||||
b21.Block.Slot = 2
|
||||
b21.Block.Slot = 3
|
||||
b21.Block.ParentRoot = r1[:]
|
||||
b21.Block.StateRoot = bytesutil.PadTo([]byte{'A'}, 32)
|
||||
r21, err := b21.Block.HashTreeRoot()
|
||||
@@ -571,7 +571,7 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
return nil, nil, err
|
||||
}
|
||||
b22 := util.NewBeaconBlock()
|
||||
b22.Block.Slot = 2
|
||||
b22.Block.Slot = 3
|
||||
b22.Block.ParentRoot = r1[:]
|
||||
b22.Block.StateRoot = bytesutil.PadTo([]byte{'B'}, 32)
|
||||
r22, err := b22.Block.HashTreeRoot()
|
||||
@@ -579,7 +579,7 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
return nil, nil, err
|
||||
}
|
||||
b23 := util.NewBeaconBlock()
|
||||
b23.Block.Slot = 2
|
||||
b23.Block.Slot = 3
|
||||
b23.Block.ParentRoot = r1[:]
|
||||
b23.Block.StateRoot = bytesutil.PadTo([]byte{'C'}, 32)
|
||||
r23, err := b23.Block.HashTreeRoot()
|
||||
@@ -587,7 +587,7 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
return nil, nil, err
|
||||
}
|
||||
b24 := util.NewBeaconBlock()
|
||||
b24.Block.Slot = 2
|
||||
b24.Block.Slot = 3
|
||||
b24.Block.ParentRoot = r1[:]
|
||||
b24.Block.StateRoot = bytesutil.PadTo([]byte{'D'}, 32)
|
||||
r24, err := b24.Block.HashTreeRoot()
|
||||
@@ -626,14 +626,14 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
// \- B2
|
||||
func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
|
||||
b0 := util.NewBeaconBlock()
|
||||
b0.Block.Slot = 0
|
||||
b0.Block.Slot = 1
|
||||
b0.Block.ParentRoot = genesisRoot
|
||||
r0, err := b0.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b21 := util.NewBeaconBlock()
|
||||
b21.Block.Slot = 2
|
||||
b21.Block.Slot = 3
|
||||
b21.Block.ParentRoot = r0[:]
|
||||
b21.Block.StateRoot = bytesutil.PadTo([]byte{'A'}, 32)
|
||||
r21, err := b21.Block.HashTreeRoot()
|
||||
@@ -641,7 +641,7 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
return nil, nil, err
|
||||
}
|
||||
b22 := util.NewBeaconBlock()
|
||||
b22.Block.Slot = 2
|
||||
b22.Block.Slot = 3
|
||||
b22.Block.ParentRoot = r0[:]
|
||||
b22.Block.StateRoot = bytesutil.PadTo([]byte{'B'}, 32)
|
||||
r22, err := b22.Block.HashTreeRoot()
|
||||
@@ -649,7 +649,7 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
return nil, nil, err
|
||||
}
|
||||
b23 := util.NewBeaconBlock()
|
||||
b23.Block.Slot = 2
|
||||
b23.Block.Slot = 3
|
||||
b23.Block.ParentRoot = r0[:]
|
||||
b23.Block.StateRoot = bytesutil.PadTo([]byte{'C'}, 32)
|
||||
r23, err := b23.Block.HashTreeRoot()
|
||||
@@ -657,7 +657,7 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
|
||||
return nil, nil, err
|
||||
}
|
||||
b24 := util.NewBeaconBlock()
|
||||
b24.Block.Slot = 2
|
||||
b24.Block.Slot = 3
|
||||
b24.Block.ParentRoot = r0[:]
|
||||
b24.Block.StateRoot = bytesutil.PadTo([]byte{'D'}, 32)
|
||||
r24, err := b24.Block.HashTreeRoot()
|
||||
@@ -697,17 +697,17 @@ func TestLoadFinalizedBlocks(t *testing.T) {
|
||||
gRoot, err := gBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, gBlock)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, [32]byte{}))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
roots, _, err := tree1(t, beaconDB, gRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
filteredBlocks, err := s.loadFinalizedBlocks(ctx, 0, 8)
|
||||
filteredBlocks, err := s.loadFinalizedBlocks(ctx, 0, 9)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(filteredBlocks))
|
||||
require.Equal(t, 1, len(filteredBlocks))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: roots[8][:]}))
|
||||
|
||||
require.NoError(t, s.beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: roots[8][:]}))
|
||||
filteredBlocks, err = s.loadFinalizedBlocks(ctx, 0, 8)
|
||||
filteredBlocks, err = s.loadFinalizedBlocks(ctx, 0, 9)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(filteredBlocks))
|
||||
require.Equal(t, 7, len(filteredBlocks))
|
||||
}
|
||||
|
||||
@@ -79,6 +79,10 @@ func (c *batchSequencer) update(b batch) {
|
||||
// so we want to copy c to a, then on i=3, d to b, then on i=4 e to c.
|
||||
c.seq[i-done] = c.seq[i]
|
||||
}
|
||||
if done == 1 && len(c.seq) == 1 {
|
||||
c.seq[0] = c.batcher.beforeBatch(c.seq[0])
|
||||
return
|
||||
}
|
||||
// Overwrite the moved batches with the next ones in the sequence.
|
||||
// Continuing the example in the comment above, len(c.seq)==5, done=2, so i=3.
|
||||
// We want to replace index 3 with the batch that should be processed after index 2,
|
||||
|
||||
@@ -64,6 +64,35 @@ func TestBatcherBefore(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchSingleItem(t *testing.T) {
|
||||
var min, max, size primitives.Slot
|
||||
// seqLen = 1 means just one worker
|
||||
seqLen := 1
|
||||
min = 0
|
||||
max = 11235
|
||||
size = 64
|
||||
seq := newBatchSequencer(seqLen, min, max, size)
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got))
|
||||
b := got[0]
|
||||
|
||||
// calling sequence again should give you the next (earlier) batch
|
||||
seq.update(b.withState(batchImportComplete))
|
||||
next, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(next))
|
||||
require.Equal(t, b.end, next[0].end+size)
|
||||
|
||||
// should get the same batch again when update is called with an error
|
||||
seq.update(next[0].withState(batchErrRetryable))
|
||||
same, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(same))
|
||||
require.Equal(t, next[0].begin, same[0].begin)
|
||||
require.Equal(t, next[0].end, same[0].end)
|
||||
}
|
||||
|
||||
func TestBatchSequencer(t *testing.T) {
|
||||
var min, max, size primitives.Slot
|
||||
seqLen := 8
|
||||
|
||||
@@ -118,6 +118,24 @@ func WithVerifierWaiter(viw InitializerWaiter) ServiceOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithMinimumSlot allows the user to specify a different backfill minimum slot than the spec default of current - MIN_EPOCHS_FOR_BLOCK_REQUESTS.
|
||||
// If this value is greater than current - MIN_EPOCHS_FOR_BLOCK_REQUESTS, it will be ignored with a warning log.
|
||||
func WithMinimumSlot(s primitives.Slot) ServiceOption {
|
||||
ms := func(current primitives.Slot) primitives.Slot {
|
||||
specMin := minimumBackfillSlot(current)
|
||||
if s < specMin {
|
||||
return s
|
||||
}
|
||||
log.WithField("userSlot", s).WithField("specMinSlot", specMin).
|
||||
Warn("Ignoring user-specified slot > MIN_EPOCHS_FOR_BLOCK_REQUESTS.")
|
||||
return specMin
|
||||
}
|
||||
return func(s *Service) error {
|
||||
s.ms = ms
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewService initializes the backfill Service. Like all implementations of the Service interface,
|
||||
// the service won't begin its runloop until Start() is called.
|
||||
func NewService(ctx context.Context, su *Store, bStore *filesystem.BlobStorage, cw startup.ClockWaiter, p p2p.P2P, pa PeerAssigner, opts ...ServiceOption) (*Service, error) {
|
||||
@@ -289,7 +307,7 @@ func (s *Service) Start() {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
if allComplete := s.updateComplete(); allComplete {
|
||||
if s.updateComplete() {
|
||||
return
|
||||
}
|
||||
s.importBatches(ctx)
|
||||
@@ -316,11 +334,11 @@ func (s *Service) downscore(b batch) {
|
||||
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(b.blockPid)
|
||||
}
|
||||
|
||||
func (s *Service) Stop() error {
|
||||
func (*Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Status() error {
|
||||
func (*Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ func (m mockMinimumSlotter) minimumSlot(_ primitives.Slot) primitives.Slot {
|
||||
type mockInitalizerWaiter struct {
|
||||
}
|
||||
|
||||
func (mi *mockInitalizerWaiter) WaitForInitializer(ctx context.Context) (*verification.Initializer, error) {
|
||||
func (*mockInitalizerWaiter) WaitForInitializer(_ context.Context) (*verification.Initializer, error) {
|
||||
return &verification.Initializer{}, nil
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestServiceInit(t *testing.T) {
|
||||
}
|
||||
go srv.Start()
|
||||
todo := make([]batch, 0)
|
||||
todo = testReadN(t, ctx, pool.todoChan, nWorkers, todo)
|
||||
todo = testReadN(ctx, t, pool.todoChan, nWorkers, todo)
|
||||
require.Equal(t, nWorkers, len(todo))
|
||||
for i := 0; i < remaining; i++ {
|
||||
b := todo[i]
|
||||
@@ -75,7 +75,7 @@ func TestServiceInit(t *testing.T) {
|
||||
b.state = batchImportable
|
||||
}
|
||||
pool.finishedChan <- b
|
||||
todo = testReadN(t, ctx, pool.todoChan, 1, todo)
|
||||
todo = testReadN(ctx, t, pool.todoChan, 1, todo)
|
||||
}
|
||||
require.Equal(t, remaining+nWorkers, len(todo))
|
||||
for i := remaining; i < remaining+nWorkers; i++ {
|
||||
@@ -90,14 +90,12 @@ func TestMinimumBackfillSlot(t *testing.T) {
|
||||
minSlot := minimumBackfillSlot(primitives.Slot(currSlot))
|
||||
require.Equal(t, 100*params.BeaconConfig().SlotsPerEpoch, minSlot)
|
||||
|
||||
oe = helpers.MinEpochsForBlockRequests()
|
||||
|
||||
currSlot = oe.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
minSlot = minimumBackfillSlot(primitives.Slot(currSlot))
|
||||
require.Equal(t, primitives.Slot(1), minSlot)
|
||||
}
|
||||
|
||||
func testReadN(t *testing.T, ctx context.Context, c chan batch, n int, into []batch) []batch {
|
||||
func testReadN(ctx context.Context, t *testing.T, c chan batch, n int, into []batch) []batch {
|
||||
for i := 0; i < n; i++ {
|
||||
select {
|
||||
case b := <-c:
|
||||
@@ -109,3 +107,28 @@ func testReadN(t *testing.T, ctx context.Context, c chan batch, n int, into []ba
|
||||
}
|
||||
return into
|
||||
}
|
||||
|
||||
func TestBackfillMinSlotDefault(t *testing.T) {
|
||||
oe := helpers.MinEpochsForBlockRequests()
|
||||
current := primitives.Slot((oe + 100).Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
s := &Service{}
|
||||
specMin := minimumBackfillSlot(current)
|
||||
|
||||
t.Run("equal to specMin", func(t *testing.T) {
|
||||
opt := WithMinimumSlot(specMin)
|
||||
require.NoError(t, opt(s))
|
||||
require.Equal(t, specMin, s.ms(current))
|
||||
})
|
||||
t.Run("older than specMin", func(t *testing.T) {
|
||||
opt := WithMinimumSlot(specMin - 1)
|
||||
require.NoError(t, opt(s))
|
||||
// if WithMinimumSlot is older than the spec minimum, we should use it.
|
||||
require.Equal(t, specMin-1, s.ms(current))
|
||||
})
|
||||
t.Run("newer than specMin", func(t *testing.T) {
|
||||
opt := WithMinimumSlot(specMin + 1)
|
||||
require.NoError(t, opt(s))
|
||||
// if WithMinimumSlot is newer than the spec minimum, we should use the spec minimum
|
||||
require.Equal(t, specMin, s.ms(current))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func (w *p2pWorker) run(ctx context.Context) {
|
||||
|
||||
func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
|
||||
cs := w.c.CurrentSlot()
|
||||
blobRetentionStart, err := sync.BlobsByRangeMinStartSlot(cs)
|
||||
blobRetentionStart, err := sync.BlobRPCMinValidSlot(cs)
|
||||
if err != nil {
|
||||
return b.withRetryableError(errors.Wrap(err, "configuration issue, could not compute minimum blob retention slot"))
|
||||
}
|
||||
|
||||
@@ -327,40 +327,3 @@ func TestTestcaseSetup_BlocksAndBlobs(t *testing.T) {
|
||||
require.Equal(t, true, found != nil)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTripDenebSave(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.BeaconConfig()
|
||||
repositionFutureEpochs(cfg)
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
parentRoot := [32]byte{}
|
||||
c := blobsTestCase{}
|
||||
chain, clock := defaultMockChain(t)
|
||||
c.chain = chain
|
||||
c.clock = clock
|
||||
oldest, err := slots.EpochStart(blobMinReqEpoch(c.chain.FinalizedCheckPoint.Epoch, slots.ToEpoch(c.clock.CurrentSlot())))
|
||||
require.NoError(t, err)
|
||||
maxBlobs := fieldparams.MaxBlobsPerBlock
|
||||
block, bsc := generateTestBlockWithSidecars(t, parentRoot, oldest, maxBlobs)
|
||||
require.Equal(t, len(block.Block.Body.BlobKzgCommitments), len(bsc))
|
||||
require.Equal(t, maxBlobs, len(bsc))
|
||||
for i := range bsc {
|
||||
require.DeepEqual(t, block.Block.Body.BlobKzgCommitments[i], bsc[i].KzgCommitment)
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
util.SaveBlock(t, ctx, d, block)
|
||||
root, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
dbBlock, err := d.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
comms, err := dbBlock.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, maxBlobs, len(comms))
|
||||
for i := range bsc {
|
||||
require.DeepEqual(t, comms[i], bsc[i].KzgCommitment)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,14 +12,12 @@ go_library(
|
||||
"log.go",
|
||||
"round_robin.go",
|
||||
"service.go",
|
||||
"verification.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/initial-sync",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -41,7 +39,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
|
||||
@@ -478,7 +478,7 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.Bl
|
||||
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
|
||||
return bwb, nil
|
||||
}
|
||||
blobWindowStart, err := prysmsync.BlobsByRangeMinStartSlot(f.clock.CurrentSlot())
|
||||
blobWindowStart, err := prysmsync.BlobRPCMinValidSlot(f.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -167,7 +168,7 @@ func (s *Service) processFetchedDataRegSync(
|
||||
if len(bwb) == 0 {
|
||||
return
|
||||
}
|
||||
bv := newBlobBatchVerifier(s.newBlobVerifier)
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
batchFields := logrus.Fields{
|
||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
||||
@@ -326,7 +327,7 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
|
||||
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
|
||||
}
|
||||
|
||||
bv := newBlobBatchVerifier(s.newBlobVerifier)
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
s.logBatchSyncStatus(genesis, first, len(bwb))
|
||||
for _, bb := range bwb {
|
||||
|
||||
@@ -340,7 +340,7 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
if len(sidecars) != len(req) {
|
||||
continue
|
||||
}
|
||||
bv := newBlobBatchVerifier(s.newBlobVerifier)
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
current := s.clock.CurrentSlot()
|
||||
if err := avs.Persist(current, sidecars...); err != nil {
|
||||
@@ -362,3 +362,9 @@ func shufflePeers(pids []peer.ID) {
|
||||
pids[i], pids[j] = pids[j], pids[i]
|
||||
})
|
||||
}
|
||||
|
||||
func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,6 +144,12 @@ var (
|
||||
Help: "Time for gossiped blob sidecars to arrive",
|
||||
},
|
||||
)
|
||||
blobSidecarVerificationGossipSummary = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "gossip_blob_sidecar_verification_milliseconds",
|
||||
Help: "Time to verify gossiped blob sidecars",
|
||||
},
|
||||
)
|
||||
|
||||
// Sync committee verification performance.
|
||||
syncMessagesForUnknownBlocks = promauto.NewCounter(
|
||||
|
||||
@@ -151,14 +151,14 @@ func (s *Service) sendAndSaveBlobSidecars(ctx context.Context, request types.Blo
|
||||
if len(sidecars) != len(request) {
|
||||
return fmt.Errorf("received %d blob sidecars, expected %d for RPC", len(sidecars), len(request))
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.PendingQueueSidecarRequirements)
|
||||
for _, sidecar := range sidecars {
|
||||
if err := verify.BlobAlignsWithBlock(sidecar, RoBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(blobFields(sidecar)).Debug("Received blob sidecar RPC")
|
||||
}
|
||||
|
||||
vscs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
vscs, err := bv.VerifiedROBlobs(ctx, RoBlock, sidecars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -123,10 +123,10 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlobsByRangeMinStartSlot returns the lowest slot that we should expect peers to respect as the
|
||||
// BlobRPCMinValidSlot returns the lowest slot that we should expect peers to respect as the
|
||||
// start slot in a BlobSidecarsByRange request. This can be used to validate incoming requests and
|
||||
// to avoid pestering peers with requests for blobs that are outside the retention window.
|
||||
func BlobsByRangeMinStartSlot(current primitives.Slot) (primitives.Slot, error) {
|
||||
func BlobRPCMinValidSlot(current primitives.Slot) (primitives.Slot, error) {
|
||||
// Avoid overflow if we're running on a config where deneb is set to far future epoch.
|
||||
if params.BeaconConfig().DenebForkEpoch == math.MaxUint64 {
|
||||
return primitives.Slot(math.MaxUint64), nil
|
||||
@@ -176,9 +176,9 @@ func validateBlobsByRange(r *pb.BlobSidecarsByRangeRequest, current primitives.S
|
||||
// [max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]
|
||||
// where current_epoch is defined by the current wall-clock time,
|
||||
// and clients MUST support serving requests of blobs on this range.
|
||||
minStartSlot, err := BlobsByRangeMinStartSlot(current)
|
||||
minStartSlot, err := BlobRPCMinValidSlot(current)
|
||||
if err != nil {
|
||||
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "BlobsByRangeMinStartSlot error")
|
||||
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "BlobRPCMinValidSlot error")
|
||||
}
|
||||
if rp.start > maxStart {
|
||||
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "start > maxStart")
|
||||
|
||||
@@ -178,7 +178,7 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
and clients MUST support serving requests of blobs on this range.
|
||||
*/
|
||||
defaultCurrent := denebSlot + 100 + minReqSlots
|
||||
defaultMinStart, err := BlobsByRangeMinStartSlot(defaultCurrent)
|
||||
defaultMinStart, err := BlobRPCMinValidSlot(defaultCurrent)
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -285,3 +285,67 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
name string
|
||||
current func(t *testing.T) types.Slot
|
||||
expected types.Slot
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "before deneb",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch - 1)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot,
|
||||
},
|
||||
{
|
||||
name: "equal to deneb",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot,
|
||||
},
|
||||
{
|
||||
name: "after deneb, before expiry starts",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch + params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot,
|
||||
},
|
||||
{
|
||||
name: "expiry starts one epoch after deneb + MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch + params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot + params.BeaconConfig().SlotsPerEpoch,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
current := c.current(t)
|
||||
got, err := BlobRPCMinValidSlot(current)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,30 +13,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func blobMinReqEpoch(finalized, current primitives.Epoch) primitives.Epoch {
|
||||
// max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)
|
||||
denebFork := params.BeaconConfig().DenebForkEpoch
|
||||
var reqWindow primitives.Epoch
|
||||
if current > params.BeaconConfig().MinEpochsForBlobsSidecarsRequest {
|
||||
reqWindow = current - params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
}
|
||||
if finalized >= reqWindow && finalized > denebFork {
|
||||
return finalized
|
||||
}
|
||||
if reqWindow >= finalized && reqWindow > denebFork {
|
||||
return reqWindow
|
||||
}
|
||||
return denebFork
|
||||
}
|
||||
|
||||
// blobSidecarByRootRPCHandler handles the /eth2/beacon_chain/req/blob_sidecars_by_root/1/ RPC request.
|
||||
// spec: https://github.com/ethereum/consensus-specs/blob/a7e45db9ac2b60a33e144444969ad3ac0aae3d4c/specs/deneb/p2p-interface.md#blobsidecarsbyroot-v1
|
||||
func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
@@ -65,7 +47,13 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
if len(blobIdents) > batchSize {
|
||||
ticker = time.NewTicker(time.Second)
|
||||
}
|
||||
minReqEpoch := blobMinReqEpoch(s.cfg.chain.FinalizedCheckpt().Epoch, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
|
||||
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
|
||||
cs := s.cfg.clock.CurrentSlot()
|
||||
minReqSlot, err := BlobRPCMinValidSlot(cs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unexpected error computing min valid blob request slot, current_slot=%d", cs)
|
||||
}
|
||||
|
||||
for i := range blobIdents {
|
||||
if err := ctx.Err(); err != nil {
|
||||
@@ -95,12 +83,15 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
|
||||
// If any root in the request content references a block earlier than minimum_request_epoch,
|
||||
// peers MAY respond with error code 3: ResourceUnavailable or not include the blob in the response.
|
||||
if slots.ToEpoch(sc.Slot()) < minReqEpoch {
|
||||
// note: we are deviating from the spec to allow requests for blobs that are before minimum_request_epoch,
|
||||
// up to the beginning of the retention period.
|
||||
if sc.Slot() < minReqSlot {
|
||||
s.writeErrorResponseToStream(responseCodeResourceUnavailable, types.ErrBlobLTMinRequest.Error(), stream)
|
||||
log.WithError(types.ErrBlobLTMinRequest).
|
||||
Debugf("requested blob for block %#x before minimum_request_epoch", blobIdents[i].BlockRoot)
|
||||
return types.ErrBlobLTMinRequest
|
||||
}
|
||||
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func (c *blobsTestCase) defaultOldestSlotByRoot(t *testing.T) types.Slot {
|
||||
oldest, err := slots.EpochStart(blobMinReqEpoch(c.chain.FinalizedCheckPoint.Epoch, slots.ToEpoch(c.clock.CurrentSlot())))
|
||||
oldest, err := BlobRPCMinValidSlot(c.clock.CurrentSlot())
|
||||
require.NoError(t, err)
|
||||
return oldest
|
||||
}
|
||||
@@ -259,71 +259,3 @@ func TestBlobsByRootOK(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobsByRootMinReqEpoch(t *testing.T) {
|
||||
winMin := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
cases := []struct {
|
||||
name string
|
||||
finalized types.Epoch
|
||||
current types.Epoch
|
||||
deneb types.Epoch
|
||||
expected types.Epoch
|
||||
}{
|
||||
{
|
||||
name: "testnet genesis",
|
||||
deneb: 100,
|
||||
current: 0,
|
||||
finalized: 0,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "underflow averted",
|
||||
deneb: 100,
|
||||
current: winMin - 1,
|
||||
finalized: 0,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "underflow averted - finalized is higher",
|
||||
deneb: 100,
|
||||
current: winMin - 1,
|
||||
finalized: winMin - 2,
|
||||
expected: winMin - 2,
|
||||
},
|
||||
{
|
||||
name: "underflow averted - genesis at deneb",
|
||||
deneb: 0,
|
||||
current: winMin - 1,
|
||||
finalized: 0,
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "max is finalized",
|
||||
deneb: 100,
|
||||
current: 99 + winMin,
|
||||
finalized: 101,
|
||||
expected: 101,
|
||||
},
|
||||
{
|
||||
name: "reqWindow > finalized, reqWindow < deneb",
|
||||
deneb: 100,
|
||||
current: 99 + winMin,
|
||||
finalized: 98,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
repositionFutureEpochs(cfg)
|
||||
cfg.DenebForkEpoch = c.deneb
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
ep := blobMinReqEpoch(c.finalized, c.current)
|
||||
require.Equal(t, c.expected, ep)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,9 +42,10 @@ func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream l
|
||||
return fmt.Errorf("wrong message type for goodbye, got %T, wanted *uint64", msg)
|
||||
}
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
log.WithError(err).Warn("Goodbye message from rate-limited peer.")
|
||||
} else {
|
||||
s.rateLimiter.add(stream, 1)
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
log := log.WithField("Reason", goodbyeMessage(*m))
|
||||
log.WithField("peer", stream.Conn().RemotePeer()).Trace("Peer has sent a goodbye message")
|
||||
s.cfg.p2p.Peers().SetNextValidTime(stream.Conn().RemotePeer(), goodByeBackoff(*m))
|
||||
|
||||
@@ -53,7 +53,7 @@ const rangeLimit uint64 = 1024
|
||||
const seenBlockSize = 1000
|
||||
const seenBlobSize = seenBlockSize * 4 // Each block can have max 4 blobs. Worst case 164kB for cache.
|
||||
const seenUnaggregatedAttSize = 20000
|
||||
const seenAggregatedAttSize = 1024
|
||||
const seenAggregatedAttSize = 16384
|
||||
const seenSyncMsgSize = 1000 // Maximum of 512 sync committee members, 1000 is a safe amount.
|
||||
const seenSyncContributionSize = 512 // Maximum of SYNC_COMMITTEE_SIZE as specified by the spec.
|
||||
const seenExitSize = 100
|
||||
|
||||
@@ -321,7 +321,7 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a static subnet with the given topic and index.A given validator and subscription handler is
|
||||
// subscribe to a static subnet with the given topic and index. A given validator and subscription handler is
|
||||
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte, subnetCount uint64) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
@@ -3,17 +3,21 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -109,6 +113,7 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes
|
||||
}
|
||||
|
||||
if err := vf.SidecarKzgProofVerified(); err != nil {
|
||||
saveInvalidBlobToTemp(blob)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
@@ -118,10 +123,12 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes
|
||||
|
||||
fields := blobFields(blob)
|
||||
sinceSlotStartTime := receivedTime.Sub(startTime)
|
||||
validationTime := s.cfg.clock.Now().Sub(receivedTime)
|
||||
fields["sinceSlotStartTime"] = sinceSlotStartTime
|
||||
fields["validationTime"] = s.cfg.clock.Now().Sub(receivedTime)
|
||||
fields["validationTime"] = validationTime
|
||||
log.WithFields(fields).Debug("Received blob sidecar gossip")
|
||||
|
||||
blobSidecarVerificationGossipSummary.Observe(float64(validationTime.Milliseconds()))
|
||||
blobSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
|
||||
|
||||
vBlobData, err := vf.VerifiedROBlob()
|
||||
@@ -165,3 +172,21 @@ func blobFields(b blocks.ROBlob) logrus.Fields {
|
||||
func computeSubnetForBlobSidecar(index uint64) uint64 {
|
||||
return index % params.BeaconConfig().BlobsidecarSubnetCount
|
||||
}
|
||||
|
||||
// saveInvalidBlobToTemp as a block ssz. Writes to temp directory.
|
||||
func saveInvalidBlobToTemp(b blocks.ROBlob) {
|
||||
if !features.Get().SaveInvalidBlob {
|
||||
return
|
||||
}
|
||||
filename := fmt.Sprintf("blob_sidecar_%#x_%d_%d.ssz", b.BlockRoot(), b.Slot(), b.Index)
|
||||
fp := path.Join(os.TempDir(), filename)
|
||||
log.Warnf("Writing invalid blob sidecar to disk at %s", fp)
|
||||
enc, err := b.MarshalSSZ()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to ssz encode blob sidecar")
|
||||
return
|
||||
}
|
||||
if err := file.WriteFile(fp, enc); err != nil {
|
||||
log.WithError(err).Error("Failed to write to disk")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,14 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"batch.go",
|
||||
"blob.go",
|
||||
"cache.go",
|
||||
"error.go",
|
||||
"fake.go",
|
||||
"initializer.go",
|
||||
"interface.go",
|
||||
"metrics.go",
|
||||
"mock.go",
|
||||
"result.go",
|
||||
],
|
||||
@@ -35,6 +37,8 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -42,6 +46,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"batch_test.go",
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"initializer_test.go",
|
||||
@@ -66,5 +71,6 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package initialsync
|
||||
package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
)
|
||||
@@ -20,21 +18,17 @@ var (
|
||||
ErrBatchBlockRootMismatch = errors.New("Sidecar block header root does not match signed block")
|
||||
)
|
||||
|
||||
func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
func newBlobBatchVerifier(newVerifier verification.NewBlobVerifier) *BlobBatchVerifier {
|
||||
// NewBlobBatchVerifier initializes a blob batch verifier. It requires the caller to correctly specify
|
||||
// verification Requirements and to also pass in a NewBlobVerifier, which is a callback function that
|
||||
// returns a new BlobVerifier for handling a single blob in the batch.
|
||||
func NewBlobBatchVerifier(newVerifier NewBlobVerifier, reqs []Requirement) *BlobBatchVerifier {
|
||||
return &BlobBatchVerifier{
|
||||
verifyKzg: kzg.Verify,
|
||||
newVerifier: newVerifier,
|
||||
reqs: reqs,
|
||||
}
|
||||
}
|
||||
|
||||
type kzgVerifier func(b ...blocks.ROBlob) error
|
||||
|
||||
// BlobBatchVerifier solves problems that come from verifying batches of blobs from RPC.
|
||||
// First: we only update forkchoice after the entire batch has completed, so the n+1 elements in the batch
|
||||
// won't be in forkchoice yet.
|
||||
@@ -42,18 +36,17 @@ type kzgVerifier func(b ...blocks.ROBlob) error
|
||||
// method to BlobVerifier to verify the kzg commitments of all blob sidecars for a block together, then using the cached
|
||||
// result of the batch verification when verifying the individual blobs.
|
||||
type BlobBatchVerifier struct {
|
||||
verifyKzg kzgVerifier
|
||||
newVerifier verification.NewBlobVerifier
|
||||
verifyKzg roblobCommitmentVerifier
|
||||
newVerifier NewBlobVerifier
|
||||
reqs []Requirement
|
||||
}
|
||||
|
||||
var _ das.BlobBatchVerifier = &BlobBatchVerifier{}
|
||||
|
||||
// VerifiedROBlobs satisfies the das.BlobBatchVerifier interface, used by das.AvailabilityStore.
|
||||
func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.ROBlock, scs []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
|
||||
if len(scs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
// We assume the proposer was validated wrt the block in batch block processing before performing the DA check.
|
||||
|
||||
// We assume the proposer is validated wrt the block in batch block processing before performing the DA check.
|
||||
// So at this stage we just need to make sure the value being signed and signature bytes match the block.
|
||||
for i := range scs {
|
||||
if blk.Signature() != bytesutil.ToBytes96(scs[i].SignedBlockHeader.Signature) {
|
||||
@@ -71,7 +64,7 @@ func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.
|
||||
}
|
||||
vs := make([]blocks.VerifiedROBlob, len(scs))
|
||||
for i := range scs {
|
||||
vb, err := batch.verifyOneBlob(ctx, scs[i])
|
||||
vb, err := batch.verifyOneBlob(scs[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -80,13 +73,13 @@ func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
func (batch *BlobBatchVerifier) verifyOneBlob(ctx context.Context, sc blocks.ROBlob) (blocks.VerifiedROBlob, error) {
|
||||
func (batch *BlobBatchVerifier) verifyOneBlob(sc blocks.ROBlob) (blocks.VerifiedROBlob, error) {
|
||||
vb := blocks.VerifiedROBlob{}
|
||||
bv := batch.newVerifier(sc, verification.InitsyncSidecarRequirements)
|
||||
bv := batch.newVerifier(sc, batch.reqs)
|
||||
// We can satisfy the following 2 requirements immediately because VerifiedROBlobs always verifies commitments
|
||||
// and block signature for all blobs in the batch before calling verifyOneBlob.
|
||||
bv.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
|
||||
bv.SatisfyRequirement(verification.RequireValidProposerSignature)
|
||||
bv.SatisfyRequirement(RequireSidecarKzgProofVerified)
|
||||
bv.SatisfyRequirement(RequireValidProposerSignature)
|
||||
|
||||
if err := bv.BlobIndexInBounds(); err != nil {
|
||||
return vb, err
|
||||
189
beacon-chain/verification/batch_test.go
Normal file
189
beacon-chain/verification/batch_test.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBatchVerifier(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCV := func(err error) roblobCommitmentVerifier {
|
||||
return func(...blocks.ROBlob) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var invCmtErr = errors.New("mock invalid commitment")
|
||||
type vbcbt func() (blocks.VerifiedROBlob, error)
|
||||
vbcb := func(bl blocks.ROBlob, err error) vbcbt {
|
||||
return func() (blocks.VerifiedROBlob, error) {
|
||||
return blocks.VerifiedROBlob{ROBlob: bl}, err
|
||||
}
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
nv func() NewBlobVerifier
|
||||
cv roblobCommitmentVerifier
|
||||
bandb func(t *testing.T, n int) (blocks.ROBlock, []blocks.ROBlob)
|
||||
err error
|
||||
nblobs int
|
||||
reqs []Requirement
|
||||
}{
|
||||
{
|
||||
name: "no blobs",
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
},
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
|
||||
}
|
||||
},
|
||||
nblobs: 0,
|
||||
},
|
||||
{
|
||||
name: "happy path",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
},
|
||||
nblobs: 3,
|
||||
},
|
||||
{
|
||||
name: "partial batch",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
// Add extra blobs to the block that we won't return
|
||||
blk, blbs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb+3)
|
||||
return blk, blbs[0:3]
|
||||
},
|
||||
nblobs: 3,
|
||||
},
|
||||
{
|
||||
name: "invalid commitment",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
}
|
||||
},
|
||||
cv: mockCV(invCmtErr),
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
},
|
||||
err: invCmtErr,
|
||||
nblobs: 1,
|
||||
},
|
||||
{
|
||||
name: "signature mismatch",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
blk, blbs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
blbs[0].SignedBlockHeader.Signature = []byte("wrong")
|
||||
return blk, blbs
|
||||
},
|
||||
err: ErrBatchSignatureMismatch,
|
||||
nblobs: 2,
|
||||
},
|
||||
{
|
||||
name: "root mismatch",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
blk, blbs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
wr, err := blocks.NewROBlobWithRoot(blbs[0].BlobSidecar, bytesutil.ToBytes32([]byte("wrong")))
|
||||
require.NoError(t, err)
|
||||
blbs[0] = wr
|
||||
return blk, blbs
|
||||
},
|
||||
err: ErrBatchBlockRootMismatch,
|
||||
nblobs: 1,
|
||||
},
|
||||
{
|
||||
name: "idx oob",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{
|
||||
ErrBlobIndexInBounds: ErrBlobIndexInvalid,
|
||||
cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
},
|
||||
nblobs: 1,
|
||||
err: ErrBlobIndexInvalid,
|
||||
},
|
||||
{
|
||||
name: "inclusion proof invalid",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{
|
||||
ErrSidecarInclusionProven: ErrSidecarInclusionProofInvalid,
|
||||
cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
},
|
||||
nblobs: 1,
|
||||
err: ErrSidecarInclusionProofInvalid,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
blk, blbs := c.bandb(t, c.nblobs)
|
||||
reqs := c.reqs
|
||||
if reqs == nil {
|
||||
reqs = InitsyncSidecarRequirements
|
||||
}
|
||||
bbv := NewBlobBatchVerifier(c.nv(), reqs)
|
||||
if c.cv == nil {
|
||||
bbv.verifyKzg = mockCV(nil)
|
||||
} else {
|
||||
bbv.verifyKzg = c.cv
|
||||
}
|
||||
vb, err := bbv.VerifiedROBlobs(ctx, blk, blbs)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.nblobs, len(vb))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -70,6 +70,9 @@ var InitsyncSidecarRequirements = requirementList(GossipSidecarRequirements).exc
|
||||
// BackfillSidecarRequirements is the same as InitsyncSidecarRequirements.
|
||||
var BackfillSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
|
||||
|
||||
// PendingQueueSidecarRequirements is the same as InitsyncSidecarRequirements, used by the pending blocks queue.
|
||||
var PendingQueueSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
|
||||
|
||||
var (
|
||||
ErrBlobInvalid = errors.New("blob failed verification")
|
||||
// ErrBlobIndexInvalid means RequireBlobIndexInBounds failed.
|
||||
@@ -190,12 +193,15 @@ func (bv *ROBlobVerifier) ValidProposerSignature(ctx context.Context) (err error
|
||||
// First check if there is a cached verification that can be reused.
|
||||
seen, err := bv.sc.SignatureVerified(sd)
|
||||
if seen {
|
||||
blobVerificationProposerSignatureCache.WithLabelValues("hit-valid").Inc()
|
||||
if err != nil {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("reusing failed proposer signature validation from cache")
|
||||
blobVerificationProposerSignatureCache.WithLabelValues("hit-invalid").Inc()
|
||||
return ErrInvalidProposerSignature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
blobVerificationProposerSignatureCache.WithLabelValues("miss").Inc()
|
||||
|
||||
// Retrieve the parent state to fallback to full verification.
|
||||
parent, err := bv.parentState(ctx)
|
||||
|
||||
16
beacon-chain/verification/metrics.go
Normal file
16
beacon-chain/verification/metrics.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
blobVerificationProposerSignatureCache = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "blob_verification_proposer_signature_cache",
|
||||
Help: "BlobSidecar proposer signature cache result.",
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user