mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
68 Commits
v5.0.1-rc.
...
new-cache-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
214b17e3b8 | ||
|
|
bbb4bc1d2c | ||
|
|
f343333880 | ||
|
|
8e0b1b7e1f | ||
|
|
65f71b3a48 | ||
|
|
9fcb9b86af | ||
|
|
aa63c4e7f2 | ||
|
|
d6ae838bbf | ||
|
|
d49afb370c | ||
|
|
4d3a6d84d2 | ||
|
|
9c5d16e161 | ||
|
|
4731304187 | ||
|
|
02cbcf8545 | ||
|
|
4e10734ae4 | ||
|
|
e19c99c3e2 | ||
|
|
697bcd418c | ||
|
|
ec7949fa4b | ||
|
|
cb8eb4e955 | ||
|
|
800f3b572f | ||
|
|
9d3af41acb | ||
|
|
07a0a95ee7 | ||
|
|
9e7352704c | ||
|
|
2616de1eb1 | ||
|
|
b2e3c29ab3 | ||
|
|
83538251aa | ||
|
|
2442280e37 | ||
|
|
4608569495 | ||
|
|
20d013a30b | ||
|
|
b0a2115a26 | ||
|
|
102518e106 | ||
|
|
e49ed4d554 | ||
|
|
21775eed52 | ||
|
|
ee9274a9bc | ||
|
|
ef21d3adf8 | ||
|
|
b6ce6c2eba | ||
|
|
b3caaa9acc | ||
|
|
d6fb8c29c9 | ||
|
|
3df7a1f067 | ||
|
|
4c3dbae3c0 | ||
|
|
68b78dd520 | ||
|
|
2e2ef4a179 | ||
|
|
b61d17731e | ||
|
|
6d3c6a6331 | ||
|
|
f1615c4c88 | ||
|
|
87b127365f | ||
|
|
5215ed03fd | ||
|
|
0453d18395 | ||
|
|
0132c1b17d | ||
|
|
d9d2ee75de | ||
|
|
ddb321e0ce | ||
|
|
5735379963 | ||
|
|
1d5a09c05d | ||
|
|
70e1b11aeb | ||
|
|
e100fb0c08 | ||
|
|
789c3f8078 | ||
|
|
0b261cba5e | ||
|
|
7a9608ea20 | ||
|
|
f795e09ecf | ||
|
|
e6a6365bdd | ||
|
|
4c66e4d060 | ||
|
|
daad29d0de | ||
|
|
9f67ad9496 | ||
|
|
0ee0653a15 | ||
|
|
4ff91bebf8 | ||
|
|
f85e027141 | ||
|
|
e09ae75c9f | ||
|
|
cb80d5ad32 | ||
|
|
24b029bbef |
6
.bazelrc
6
.bazelrc
@@ -6,6 +6,12 @@ import %workspace%/build/bazelrc/debug.bazelrc
|
||||
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
|
||||
import %workspace%/build/bazelrc/performance.bazelrc
|
||||
|
||||
# hermetic_cc_toolchain v3.0.1 required changes.
|
||||
common --enable_platform_specific_config
|
||||
build:linux --sandbox_add_mount_pair=/tmp
|
||||
build:macos --sandbox_add_mount_pair=/var/tmp
|
||||
build:windows --sandbox_add_mount_pair=C:\Temp
|
||||
|
||||
# E2E run with debug gotag
|
||||
test:e2e --define gotags=debug
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
7.0.0
|
||||
7.1.0
|
||||
|
||||
821
MODULE.bazel.lock
generated
821
MODULE.bazel.lock
generated
File diff suppressed because it is too large
Load Diff
37
WORKSPACE
37
WORKSPACE
@@ -16,12 +16,14 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
|
||||
|
||||
http_archive(
|
||||
name = "hermetic_cc_toolchain",
|
||||
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
|
||||
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
|
||||
sha256 = "3bc6ec127622fdceb4129cb06b6f7ab098c4d539124dde96a6318e7c32a53f7a",
|
||||
urls = [
|
||||
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
|
||||
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -81,10 +83,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
|
||||
integrity = "sha256-MpOL2hbmcABjA1R5Bj2dJMYO2o15/Uc5Vj9Q0zHLMgk=",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -113,6 +115,13 @@ http_archive(
|
||||
url = "https://github.com/GoogleContainerTools/distroless/archive/9dc924b9fe812eec2fa0061824dcad39eb09d0d6.tar.gz", # 2024-01-24
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "aspect_bazel_lib",
|
||||
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
|
||||
strip_prefix = "bazel-lib-2.5.0",
|
||||
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
|
||||
)
|
||||
|
||||
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
|
||||
|
||||
aspect_bazel_lib_dependencies()
|
||||
@@ -144,17 +153,13 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "d6ab6b57e48c09523e93050f13698f708428cfd5e619252e369d377af6597707",
|
||||
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
load("//:distroless_deps.bzl", "distroless_deps")
|
||||
|
||||
distroless_deps()
|
||||
|
||||
# Override default import in rules_go with special patch until
|
||||
# https://github.com/gogo/protobuf/pull/582 is merged.
|
||||
git_repository(
|
||||
@@ -193,10 +198,14 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.21.6",
|
||||
go_version = "1.21.8",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
load("//:distroless_deps.bzl", "distroless_deps")
|
||||
|
||||
distroless_deps()
|
||||
|
||||
http_archive(
|
||||
name = "io_kubernetes_build",
|
||||
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
|
||||
|
||||
@@ -6,11 +6,14 @@ go_library(
|
||||
"checkpoint.go",
|
||||
"client.go",
|
||||
"doc.go",
|
||||
"health.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon/iface:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -36,10 +39,12 @@ go_test(
|
||||
srcs = [
|
||||
"checkpoint_test.go",
|
||||
"client_test.go",
|
||||
"health_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon/testing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -53,5 +58,6 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
@@ -74,7 +74,12 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
|
||||
}
|
||||
log.Printf("detected supported config in remote finalized state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"name": vu.Config.ConfigName,
|
||||
"fork": version.String(vu.Fork),
|
||||
}).Info("Detected supported config in remote finalized state")
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||
@@ -108,10 +113,10 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
}
|
||||
|
||||
log.
|
||||
WithField("block_slot", b.Block().Slot()).
|
||||
WithField("state_slot", s.Slot()).
|
||||
WithField("state_root", hexutil.Encode(sr[:])).
|
||||
WithField("block_root", hexutil.Encode(br[:])).
|
||||
WithField("blockSlot", b.Block().Slot()).
|
||||
WithField("stateSlot", s.Slot()).
|
||||
WithField("stateRoot", hexutil.Encode(sr[:])).
|
||||
WithField("blockRoot", hexutil.Encode(br[:])).
|
||||
Info("Downloaded checkpoint sync state and block.")
|
||||
return &OriginData{
|
||||
st: s,
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -309,9 +309,9 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
|
||||
}
|
||||
for _, failure := range errorJson.Failures {
|
||||
w := request[failure.Index].Message
|
||||
log.WithFields(log.Fields{
|
||||
"validator_index": w.ValidatorIndex,
|
||||
"withdrawal_address": w.ToExecutionAddress,
|
||||
log.WithFields(logrus.Fields{
|
||||
"validatorIndex": w.ValidatorIndex,
|
||||
"withdrawalAddress": w.ToExecutionAddress,
|
||||
}).Error(failure.Message)
|
||||
}
|
||||
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
|
||||
@@ -341,9 +341,9 @@ type forkScheduleResponse struct {
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.Atoi(d.Epoch)
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
@@ -355,7 +355,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(uint64(epoch)),
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
|
||||
55
api/client/beacon/health.go
Normal file
55
api/client/beacon/health.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
|
||||
)
|
||||
|
||||
type NodeHealthTracker struct {
|
||||
isHealthy *bool
|
||||
healthChan chan bool
|
||||
node iface.HealthNode
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewNodeHealthTracker(node iface.HealthNode) *NodeHealthTracker {
|
||||
return &NodeHealthTracker{
|
||||
node: node,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// HealthUpdates provides a read-only channel for health updates.
|
||||
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
|
||||
return n.healthChan
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) IsHealthy() bool {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
if n.isHealthy == nil {
|
||||
return false
|
||||
}
|
||||
return *n.isHealthy
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
n.RLock()
|
||||
newStatus := n.node.IsHealthy(ctx)
|
||||
if n.isHealthy == nil {
|
||||
n.isHealthy = &newStatus
|
||||
}
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
n.RUnlock()
|
||||
|
||||
if isStatusChanged {
|
||||
n.Lock()
|
||||
// Double-check the condition to ensure it hasn't changed since the first check.
|
||||
n.isHealthy = &newStatus
|
||||
n.Unlock() // It's better to unlock as soon as the protected section is over.
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
return newStatus
|
||||
}
|
||||
118
api/client/beacon/health_test.go
Normal file
118
api/client/beacon/health_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
healthTesting "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestNodeHealth_IsHealthy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
isHealthy bool
|
||||
want bool
|
||||
}{
|
||||
{"initially healthy", true, true},
|
||||
{"initially unhealthy", false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.isHealthy,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
if got := n.IsHealthy(); got != tt.want {
|
||||
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
initial bool // Initial health status
|
||||
newStatus bool // Status to update to
|
||||
shouldSend bool // Should a message be sent through the channel
|
||||
}{
|
||||
{"healthy to unhealthy", true, false, true},
|
||||
{"unhealthy to healthy", false, true, true},
|
||||
{"remain healthy", true, true, false},
|
||||
{"remain unhealthy", false, false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := healthTesting.NewMockHealthClient(ctrl)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.initial,
|
||||
node: client,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
|
||||
s := n.CheckHealth(context.Background())
|
||||
// Check if health status was updated
|
||||
if s != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
|
||||
}
|
||||
|
||||
select {
|
||||
case status := <-n.HealthUpdates():
|
||||
if !tt.shouldSend {
|
||||
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
|
||||
} else if status != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
|
||||
}
|
||||
default:
|
||||
if tt.shouldSend {
|
||||
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := healthTesting.NewMockHealthClient(ctrl)
|
||||
n := NewNodeHealthTracker(client)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Number of goroutines to spawn for both reading and writing
|
||||
numGoroutines := 6
|
||||
|
||||
go func() {
|
||||
for range n.HealthUpdates() {
|
||||
// Consume values to avoid blocking on channel send.
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(numGoroutines * 2) // for readers and writers
|
||||
|
||||
// Concurrently update health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false)
|
||||
n.CheckHealth(context.Background())
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true)
|
||||
n.CheckHealth(context.Background())
|
||||
}()
|
||||
}
|
||||
|
||||
// Concurrently read health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = n.IsHealthy() // Just read the value
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait() // Wait for all goroutines to finish
|
||||
}
|
||||
8
api/client/beacon/iface/BUILD.bazel
Normal file
8
api/client/beacon/iface/BUILD.bazel
Normal file
@@ -0,0 +1,8 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["health.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
13
api/client/beacon/iface/health.go
Normal file
13
api/client/beacon/iface/health.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package iface
|
||||
|
||||
import "context"
|
||||
|
||||
type HealthTracker interface {
|
||||
HealthUpdates() <-chan bool
|
||||
IsHealthy() bool
|
||||
CheckHealth(ctx context.Context) bool
|
||||
}
|
||||
|
||||
type HealthNode interface {
|
||||
IsHealthy(ctx context.Context) bool
|
||||
}
|
||||
5
api/client/beacon/log.go
Normal file
5
api/client/beacon/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package beacon
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "beacon")
|
||||
12
api/client/beacon/testing/BUILD.bazel
Normal file
12
api/client/beacon/testing/BUILD.bazel
Normal file
@@ -0,0 +1,12 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["mock.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/beacon/iface:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
53
api/client/beacon/testing/mock.go
Normal file
53
api/client/beacon/testing/mock.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = iface.HealthNode(&MockHealthClient{})
|
||||
)
|
||||
|
||||
// MockHealthClient is a mock of HealthClient interface.
|
||||
type MockHealthClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockHealthClientMockRecorder
|
||||
}
|
||||
|
||||
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
|
||||
type MockHealthClientMockRecorder struct {
|
||||
mock *MockHealthClient
|
||||
}
|
||||
|
||||
// IsHealthy mocks base method.
|
||||
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsHealthy", arg0)
|
||||
ret0, ok := ret[0].(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return ret0
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// IsHealthy indicates an expected call of IsHealthy.
|
||||
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
|
||||
}
|
||||
|
||||
// NewMockHealthClient creates a new mock instance.
|
||||
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
|
||||
mock := &MockHealthClient{ctrl: ctrl}
|
||||
mock.recorder = &MockHealthClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
@@ -57,8 +57,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
b := bytes.NewBuffer(nil)
|
||||
if r.Body == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"body-base64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
return nil
|
||||
}
|
||||
@@ -74,8 +74,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
}
|
||||
r.Body = io.NopCloser(b)
|
||||
log.WithFields(log.Fields{
|
||||
"body-base64": string(body),
|
||||
"url": r.URL.String(),
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
|
||||
return nil
|
||||
|
||||
@@ -21,6 +21,9 @@ var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
||||
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
|
||||
var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
|
||||
// ErrConnectionIssue represents a connection problem.
|
||||
var ErrConnectionIssue = errors.New("could not connect")
|
||||
|
||||
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
|
||||
func Non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
|
||||
24
api/client/event/BUILD.bazel
Normal file
24
api/client/event/BUILD.bazel
Normal file
@@ -0,0 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["event_stream.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/event",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["event_stream_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
148
api/client/event/event_stream.go
Normal file
148
api/client/event/event_stream.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
EventHead = "head"
|
||||
EventBlock = "block"
|
||||
EventAttestation = "attestation"
|
||||
EventVoluntaryExit = "voluntary_exit"
|
||||
EventBlsToExecutionChange = "bls_to_execution_change"
|
||||
EventProposerSlashing = "proposer_slashing"
|
||||
EventAttesterSlashing = "attester_slashing"
|
||||
EventFinalizedCheckpoint = "finalized_checkpoint"
|
||||
EventChainReorg = "chain_reorg"
|
||||
EventContributionAndProof = "contribution_and_proof"
|
||||
EventLightClientFinalityUpdate = "light_client_finality_update"
|
||||
EventLightClientOptimisticUpdate = "light_client_optimistic_update"
|
||||
EventPayloadAttributes = "payload_attributes"
|
||||
EventBlobSidecar = "blob_sidecar"
|
||||
EventError = "error"
|
||||
EventConnectionError = "connection_error"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = EventStreamClient(&EventStream{})
|
||||
)
|
||||
|
||||
var DefaultEventTopics = []string{EventHead}
|
||||
|
||||
type EventStreamClient interface {
|
||||
Subscribe(eventsChannel chan<- *Event)
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
EventType string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// EventStream is responsible for subscribing to the Beacon API events endpoint
|
||||
// and dispatching received events to subscribers.
|
||||
type EventStream struct {
|
||||
ctx context.Context
|
||||
httpClient *http.Client
|
||||
host string
|
||||
topics []string
|
||||
}
|
||||
|
||||
func NewEventStream(ctx context.Context, httpClient *http.Client, host string, topics []string) (*EventStream, error) {
|
||||
// Check if the host is a valid URL
|
||||
_, err := url.ParseRequestURI(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(topics) == 0 {
|
||||
return nil, errors.New("no topics provided")
|
||||
}
|
||||
|
||||
return &EventStream{
|
||||
ctx: ctx,
|
||||
httpClient: httpClient,
|
||||
host: host,
|
||||
topics: topics,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
allTopics := strings.Join(h.topics, ",")
|
||||
log.WithField("topics", allTopics).Info("Listening to Beacon API events")
|
||||
fullUrl := h.host + "/eth/v1/events?topics=" + allTopics
|
||||
req, err := http.NewRequestWithContext(h.ctx, http.MethodGet, fullUrl, nil)
|
||||
if err != nil {
|
||||
eventsChannel <- &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
|
||||
}
|
||||
}
|
||||
req.Header.Set("Accept", api.EventStreamMediaType)
|
||||
req.Header.Set("Connection", api.KeepAlive)
|
||||
resp, err := h.httpClient.Do(req)
|
||||
if err != nil {
|
||||
eventsChannel <- &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if closeErr := resp.Body.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Error("Failed to close events response body")
|
||||
}
|
||||
}()
|
||||
// Create a new scanner to read lines from the response body
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
|
||||
var eventType, data string // Variables to store event type and data
|
||||
|
||||
// Iterate over lines of the event stream
|
||||
for scanner.Scan() {
|
||||
select {
|
||||
case <-h.ctx.Done():
|
||||
log.Info("Context canceled, stopping event stream")
|
||||
close(eventsChannel)
|
||||
return
|
||||
default:
|
||||
line := scanner.Text() // TODO(13730): scanner does not handle /r and does not fully adhere to https://html.spec.whatwg.org/multipage/server-sent-events.html#the-eventsource-interface
|
||||
// Handle the event based on your specific format
|
||||
if line == "" {
|
||||
// Empty line indicates the end of an event
|
||||
if eventType != "" && data != "" {
|
||||
// Process the event when both eventType and data are set
|
||||
eventsChannel <- &Event{EventType: eventType, Data: []byte(data)}
|
||||
}
|
||||
|
||||
// Reset eventType and data for the next event
|
||||
eventType, data = "", ""
|
||||
continue
|
||||
}
|
||||
et, ok := strings.CutPrefix(line, "event: ")
|
||||
if ok {
|
||||
// Extract event type from the "event" field
|
||||
eventType = et
|
||||
}
|
||||
d, ok := strings.CutPrefix(line, "data: ")
|
||||
if ok {
|
||||
// Extract data from the "data" field
|
||||
data = d
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
eventsChannel <- &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, errors.Wrap(client.ErrConnectionIssue, "scanner failed").Error()).Error()),
|
||||
}
|
||||
}
|
||||
}
|
||||
80
api/client/event/event_stream_test.go
Normal file
80
api/client/event/event_stream_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestNewEventStream(t *testing.T) {
|
||||
validURL := "http://localhost:8080"
|
||||
invalidURL := "://invalid"
|
||||
topics := []string{"topic1", "topic2"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
host string
|
||||
topics []string
|
||||
wantErr bool
|
||||
}{
|
||||
{"Valid input", validURL, topics, false},
|
||||
{"Invalid URL", invalidURL, topics, true},
|
||||
{"No topics", validURL, []string{}, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventStream(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
|
||||
flusher, ok := w.(http.Flusher)
|
||||
require.Equal(t, true, ok)
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := fmt.Fprintf(w, "event: head\ndata: data%d\n\n", i)
|
||||
require.NoError(t, err)
|
||||
flusher.Flush() // Trigger flush to simulate streaming data
|
||||
time.Sleep(100 * time.Millisecond) // Simulate delay between events
|
||||
}
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics)
|
||||
require.NoError(t, err)
|
||||
go stream.Subscribe(eventsChannel)
|
||||
|
||||
// Collect events
|
||||
var events []*Event
|
||||
|
||||
for len(events) != 2 {
|
||||
select {
|
||||
case event := <-eventsChannel:
|
||||
log.Info(event)
|
||||
events = append(events, event)
|
||||
}
|
||||
}
|
||||
|
||||
// Assertions to verify the events content
|
||||
expectedData := []string{"data1", "data2"}
|
||||
for i, event := range events {
|
||||
if string(event.Data) != expectedData[i] {
|
||||
t.Errorf("Expected event data %q, got %q", expectedData[i], string(event.Data))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -343,35 +343,7 @@ func (s *Service) ForkChoicer() f.ForkChoicer {
|
||||
|
||||
// IsOptimistic returns true if the current head is optimistic.
|
||||
func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
|
||||
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return false, nil
|
||||
}
|
||||
s.headLock.RLock()
|
||||
if s.head == nil {
|
||||
s.headLock.RUnlock()
|
||||
return false, ErrNilHead
|
||||
}
|
||||
headRoot := s.head.root
|
||||
headSlot := s.head.slot
|
||||
headOptimistic := s.head.optimistic
|
||||
s.headLock.RUnlock()
|
||||
// we trust the head package for recent head slots, otherwise fallback to forkchoice
|
||||
if headSlot+2 >= s.CurrentSlot() {
|
||||
return headOptimistic, nil
|
||||
}
|
||||
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
}
|
||||
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
|
||||
return true, err
|
||||
}
|
||||
// If fockchoice does not have the headroot, then the node is considered
|
||||
// optimistic
|
||||
return true, nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// IsFinalized returns true if the input root is finalized.
|
||||
@@ -563,3 +535,9 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
|
||||
func (s *Service) inRegularSync() bool {
|
||||
return s.cfg.SyncChecker.Synced()
|
||||
}
|
||||
|
||||
// validating returns true if the beacon is tracking some validators that have
|
||||
// registered for proposing.
|
||||
func (s *Service) validating() bool {
|
||||
return s.cfg.TrackedValidatorsCache.Validating()
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -238,7 +238,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
|
||||
@@ -163,7 +163,7 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p, bytesutil.ToBytes32(parentHash))
|
||||
require.Equal(t, td, totalDifficulty.String())
|
||||
require.Equal(t, td, totalDifficulty.Hex())
|
||||
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
|
||||
require.ErrorContains(t, "could not get pow block: block not found", err)
|
||||
|
||||
@@ -307,16 +307,16 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, st, e); err != nil {
|
||||
return errors.Wrap(err, "could not update proposer index cache")
|
||||
}
|
||||
go func() {
|
||||
go func(ep primitives.Epoch) {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := helpers.UpdateCommitteeCache(slotCtx, st, e+1); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(slotCtx, st, ep+1); err != nil {
|
||||
log.WithError(err).Warn("Could not update committee cache")
|
||||
}
|
||||
}()
|
||||
}(e)
|
||||
// The latest block header is from the previous epoch
|
||||
r, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
|
||||
@@ -1531,6 +1531,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
// 12 and recover. Notice that it takes two epochs to fully recover, and we stay
|
||||
// optimistic for the whole time.
|
||||
func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
t.Skip("Requires #13664 to be fixed")
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
@@ -2114,7 +2115,7 @@ func TestMissingIndices(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present))
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present...))
|
||||
missing, err := missingIndices(bs, c.root, c.expected)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
|
||||
@@ -95,7 +95,9 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
|
||||
@@ -32,6 +32,9 @@ import (
|
||||
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
|
||||
var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
|
||||
|
||||
// This defines how many epochs since finality the run time will begin to expand our respective cache sizes.
|
||||
var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
@@ -94,6 +97,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
var postState state.BeaconState
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
@@ -102,6 +106,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
})
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
@@ -188,6 +193,11 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
|
||||
// We apply the same heuristic to some of our more important caches.
|
||||
if err := s.handleCaches(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
@@ -361,6 +371,27 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
|
||||
}
|
||||
|
||||
func (s *Service) handleCaches() error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality primitives.Epoch
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
if currentEpoch > finalized.Epoch {
|
||||
sinceFinality = currentEpoch - finalized.Epoch
|
||||
}
|
||||
|
||||
if sinceFinality >= epochsSinceFinalityExpandCache {
|
||||
helpers.ExpandCommitteeCache()
|
||||
return nil
|
||||
}
|
||||
|
||||
helpers.CompressCommitteeCache()
|
||||
return nil
|
||||
}
|
||||
|
||||
// This performs the state transition function and returns the poststate or an
|
||||
// error if the block fails to verify the consensus rules
|
||||
func (s *Service) validateStateTransition(ctx context.Context, preState state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
|
||||
@@ -308,6 +308,29 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestHandleCaches_EnablingLargeSize(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
require.NoError(t, s.handleCaches())
|
||||
assert.LogsContain(t, hook, "Expanding committee cache size")
|
||||
}
|
||||
|
||||
func TestHandleCaches_DisablingLargeSize(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
require.NoError(t, s.handleCaches())
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.handleCaches())
|
||||
assert.LogsContain(t, hook, "Reducing committee cache size")
|
||||
}
|
||||
|
||||
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
pool := tr.blsPool
|
||||
|
||||
@@ -199,6 +199,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
defer s.removeStartupState()
|
||||
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.StartFromSavedState(saved); err != nil {
|
||||
@@ -289,10 +290,18 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
if params.BeaconConfig().ConfigName != params.PraterName {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
@@ -418,7 +427,7 @@ func (s *Service) startFromExecutionChain() error {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
s.onExecutionChainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
@@ -550,6 +559,10 @@ func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
return s.cfg.BeaconDB.HasBlock(ctx, root)
|
||||
}
|
||||
|
||||
func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
|
||||
34
beacon-chain/cache/committee.go
vendored
34
beacon-chain/cache/committee.go
vendored
@@ -17,12 +17,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxCommitteesCacheSize = int(32)
|
||||
maxCommitteesCacheSize = int(4)
|
||||
// expandedCommitteeCacheSize defines the expanded size of the committee cache in the event we
|
||||
// do not have finality to deal with long forks better.
|
||||
expandedCommitteeCacheSize = int(32)
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -43,6 +47,7 @@ type CommitteeCache struct {
|
||||
CommitteeCache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
inProgress map[string]bool
|
||||
size int
|
||||
}
|
||||
|
||||
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
@@ -67,6 +72,33 @@ func (c *CommitteeCache) Clear() {
|
||||
defer c.lock.Unlock()
|
||||
c.CommitteeCache = lruwrpr.New(maxCommitteesCacheSize)
|
||||
c.inProgress = make(map[string]bool)
|
||||
c.size = maxCommitteesCacheSize
|
||||
}
|
||||
|
||||
// ExpandCommitteeCache expands the size of the committee cache.
|
||||
func (c *CommitteeCache) ExpandCommitteeCache() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.size == expandedCommitteeCacheSize {
|
||||
return
|
||||
}
|
||||
c.CommitteeCache.Resize(expandedCommitteeCacheSize)
|
||||
c.size = expandedCommitteeCacheSize
|
||||
log.Warnf("Expanding committee cache size from %d to %d", maxCommitteesCacheSize, expandedCommitteeCacheSize)
|
||||
}
|
||||
|
||||
// CompressCommitteeCache compresses the size of the committee cache.
|
||||
func (c *CommitteeCache) CompressCommitteeCache() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.size == maxCommitteesCacheSize {
|
||||
return
|
||||
}
|
||||
c.CommitteeCache.Resize(maxCommitteesCacheSize)
|
||||
c.size = maxCommitteesCacheSize
|
||||
log.Warnf("Reducing committee cache size from %d to %d", expandedCommitteeCacheSize, maxCommitteesCacheSize)
|
||||
}
|
||||
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
|
||||
8
beacon-chain/cache/committee_disabled.go
vendored
8
beacon-chain/cache/committee_disabled.go
vendored
@@ -74,3 +74,11 @@ func (c *FakeCommitteeCache) MarkNotInProgress(seed [32]byte) error {
|
||||
func (c *FakeCommitteeCache) Clear() {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *FakeCommitteeCache) ExpandCommitteeCache() {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *FakeCommitteeCache) CompressCommitteeCache() {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,10 +74,10 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"depositRoot": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
|
||||
@@ -1189,11 +1189,3 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func emptyEth1data() *ethpb.Eth1Data {
|
||||
return ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
DepositCount: 0,
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,10 +33,10 @@ func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum ui
|
||||
}
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"depositRoot": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
|
||||
@@ -15,8 +15,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrEmptyExecutionBlock occurs when the execution block is nil.
|
||||
ErrEmptyExecutionBlock = errors.New("empty execution block")
|
||||
// ErrInvalidSnapshotRoot occurs when the snapshot root does not match the calculated root.
|
||||
ErrInvalidSnapshotRoot = errors.New("snapshot root is invalid")
|
||||
// ErrInvalidDepositCount occurs when the value for mix in length is 0.
|
||||
|
||||
6
beacon-chain/cache/proposer_indices_type.go
vendored
6
beacon-chain/cache/proposer_indices_type.go
vendored
@@ -1,15 +1,9 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// ErrNotProposerIndices will be returned when a cache object is not a pointer to
|
||||
// a ProposerIndices struct.
|
||||
var ErrNotProposerIndices = errors.New("object is not a proposer indices struct")
|
||||
|
||||
// ProposerIndices defines the cached struct for proposer indices.
|
||||
type ProposerIndices struct {
|
||||
BlockRoot [32]byte
|
||||
|
||||
6
beacon-chain/cache/tracked_validators.go
vendored
6
beacon-chain/cache/tracked_validators.go
vendored
@@ -41,3 +41,9 @@ func (t *TrackedValidatorsCache) Prune() {
|
||||
defer t.Unlock()
|
||||
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Validating() bool {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
return len(t.trackedValidators) > 0
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
currentEpoch := slots.ToEpoch(blk.Block().Slot())
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
@@ -115,7 +115,9 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
sig := blk.Signature()
|
||||
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, blk.Block().HashTreeRoot)
|
||||
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, func() ([32]byte, error) {
|
||||
return blkRoot, nil
|
||||
})
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
|
||||
@@ -79,11 +79,13 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
|
||||
}
|
||||
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorsRoot())
|
||||
assert.NoError(t, err)
|
||||
blkRoot, err := altairBlk.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
rt, err := signing.ComputeSigningRoot(altairBlk.Block, domain)
|
||||
assert.NoError(t, err)
|
||||
sig := keys[0].Sign(rt[:]).Marshal()
|
||||
altairBlk.Signature = sig
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(altairBlk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb))
|
||||
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb, blkRoot))
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
|
||||
@@ -391,6 +391,16 @@ func UpdateCachedCheckpointToStateRoot(state state.ReadOnlyBeaconState, cp *fork
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpandCommitteeCache resizes the cache to a higher limit.
|
||||
func ExpandCommitteeCache() {
|
||||
committeeCache.ExpandCommitteeCache()
|
||||
}
|
||||
|
||||
// CompressCommitteeCache resizes the cache to a lower limit.
|
||||
func CompressCommitteeCache() {
|
||||
committeeCache.CompressCommitteeCache()
|
||||
}
|
||||
|
||||
// ClearCache clears the beacon committee cache and sync committee cache.
|
||||
func ClearCache() {
|
||||
committeeCache.Clear()
|
||||
|
||||
@@ -96,6 +96,7 @@ go_test(
|
||||
"//testing/benchmark:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
@@ -48,7 +49,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
epoch := time.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
|
||||
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
@@ -135,7 +136,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
|
||||
epoch := time.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
|
||||
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
@@ -50,7 +51,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
epoch := time.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
|
||||
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
@@ -124,7 +125,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
|
||||
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(params.BeaconConfig().SlotsPerEpoch)))
|
||||
e := beaconState.Eth1Data()
|
||||
e.DepositCount = 100
|
||||
require.NoError(t, beaconState.SetEth1Data(e))
|
||||
@@ -137,7 +138,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
|
||||
epoch := time.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
|
||||
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -40,7 +40,6 @@ go_test(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -114,16 +113,6 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func daAlwaysSucceeds(_ [][]byte, _ []*ethpb.BlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockDA struct {
|
||||
t *testing.T
|
||||
scs []blocks.ROBlob
|
||||
err error
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
@@ -19,9 +19,6 @@ var ErrNotFoundState = kv.ErrNotFoundState
|
||||
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
|
||||
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
|
||||
|
||||
// ErrNotFoundBackfillBlockRoot wraps ErrNotFound for an error specific to the backfill block root.
|
||||
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
|
||||
|
||||
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
|
||||
// as equivalent to db.ErrNotFound.
|
||||
func IsNotFound(err error) bool {
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"ephemeral.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"pruner.go",
|
||||
],
|
||||
@@ -43,7 +44,6 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -16,12 +16,15 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
var (
|
||||
errIndexOutOfBounds = errors.New("blob index in file name >= MaxBlobsPerBlock")
|
||||
errIndexOutOfBounds = errors.New("blob index in file name >= MaxBlobsPerBlock")
|
||||
errEmptyBlobWritten = errors.New("zero bytes written to disk when saving blob sidecar")
|
||||
errSidecarEmptySSZData = errors.New("sidecar marshalled to an empty ssz byte slice")
|
||||
errNoBasePath = errors.New("BlobStorage base path not specified in init")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,14 +37,26 @@ const (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
b.base = base
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobRetentionEpochs is an option that changes the number of epochs blobs will be persisted.
|
||||
func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
pruner, err := newBlobPruner(b.fs, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.pruner = pruner
|
||||
b.retentionEpochs = e
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSaveFsync is an option that causes Save to call fsync before renaming part files for improved durability.
|
||||
func WithSaveFsync(fsync bool) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
b.fsync = fsync
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -49,30 +64,36 @@ func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption {
|
||||
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(base string, opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
base = path.Clean(base)
|
||||
if err := file.MkdirAll(base); err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
|
||||
}
|
||||
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
|
||||
b := &BlobStorage{
|
||||
fs: fs,
|
||||
}
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{}
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
}
|
||||
}
|
||||
if b.pruner == nil {
|
||||
log.Warn("Initializing blob filesystem storage with pruning disabled")
|
||||
if b.base == "" {
|
||||
return nil, errNoBasePath
|
||||
}
|
||||
b.base = path.Clean(b.base)
|
||||
if err := file.MkdirAll(b.base); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create blob storage at %s", b.base)
|
||||
}
|
||||
b.fs = afero.NewBasePathFs(afero.NewOsFs(), b.base)
|
||||
pruner, err := newBlobPruner(b.fs, b.retentionEpochs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.pruner = pruner
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
|
||||
type BlobStorage struct {
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
base string
|
||||
retentionEpochs primitives.Epoch
|
||||
fsync bool
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
@@ -83,7 +104,7 @@ func (bs *BlobStorage) WarmCache() {
|
||||
}
|
||||
go func() {
|
||||
if err := bs.pruner.prune(0); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob pruner cache.")
|
||||
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -98,7 +119,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("ignoring a duplicate blob sidecar Save attempt")
|
||||
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("Ignoring a duplicate blob sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
if bs.pruner != nil {
|
||||
@@ -111,11 +132,14 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath()
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
// Ensure the partial file is deleted.
|
||||
@@ -126,9 +150,9 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("removed partial file")
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -138,7 +162,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
_, err = partialFile.Write(sidecarData)
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
@@ -146,11 +170,24 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
err = partialFile.Close()
|
||||
if err != nil {
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
@@ -257,16 +294,12 @@ func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
func (p blobNamer) fname(ext string) string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, ext))
|
||||
}
|
||||
|
||||
func (p blobNamer) partPath() string {
|
||||
return p.fname(partExt)
|
||||
func (p blobNamer) partPath(entropy string) string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%s-%d.%s", entropy, p.index, partExt))
|
||||
}
|
||||
|
||||
func (p blobNamer) path() string {
|
||||
return p.fname(sszExt)
|
||||
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, sszExt))
|
||||
}
|
||||
|
||||
func rootString(root [32]byte) string {
|
||||
|
||||
@@ -4,10 +4,9 @@ import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
@@ -101,32 +100,33 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, err = b.Get(blob.BlockRoot(), blob.Index)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
})
|
||||
|
||||
t.Run("race conditions", func(t *testing.T) {
|
||||
// There was a bug where saving the same blob in multiple go routines would cause a partial blob
|
||||
// to be empty. This test ensures that several routines can safely save the same blob at the
|
||||
// same time. This isn't ideal behavior from the caller, but should be handled safely anyway.
|
||||
// See https://github.com/prysmaticlabs/prysm/pull/13648
|
||||
b, err := NewBlobStorage(WithBasePath(t.TempDir()))
|
||||
require.NoError(t, err)
|
||||
blob := testSidecars[0]
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
require.NoError(t, b.Save(blob))
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
res, err := b.Get(blob.BlockRoot(), blob.Index)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, blob, res)
|
||||
})
|
||||
}
|
||||
|
||||
// pollUntil polls a condition function until it returns true or a timeout is reached.
|
||||
func pollUntil(t *testing.T, fs afero.Fs, expected int) error {
|
||||
var remainingFolders []os.FileInfo
|
||||
var err error
|
||||
// Define the condition function for polling
|
||||
conditionFunc := func() bool {
|
||||
remainingFolders, err = afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
return len(remainingFolders) == expected
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
for {
|
||||
if conditionFunc() {
|
||||
break // Condition met, exit the loop
|
||||
}
|
||||
if time.Since(startTime) > 30*time.Second {
|
||||
return errors.New("timeout")
|
||||
}
|
||||
time.Sleep(1 * time.Second) // Adjust the sleep interval as needed
|
||||
}
|
||||
require.Equal(t, expected, len(remainingFolders))
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
@@ -243,6 +243,8 @@ func BenchmarkPruning(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestNewBlobStorage(t *testing.T) {
|
||||
_, err := NewBlobStorage(path.Join(t.TempDir(), "good"))
|
||||
_, err := NewBlobStorage()
|
||||
require.ErrorIs(t, err, errNoBasePath)
|
||||
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ type BlobMocker struct {
|
||||
|
||||
// CreateFakeIndices creates empty blob sidecar files at the expected path for the given
|
||||
// root and indices to influence the result of Indices().
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices []uint64) error {
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices ...uint64) error {
|
||||
for i := range indices {
|
||||
n := blobNamer{root: root, index: indices[i]}
|
||||
if err := bm.fs.MkdirAll(n.dir(), directoryPermissions); err != nil {
|
||||
|
||||
5
beacon-chain/db/filesystem/log.go
Normal file
5
beacon-chain/db/filesystem/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package filesystem
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "filesystem")
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
@@ -87,7 +87,7 @@ func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
|
||||
}()
|
||||
} else {
|
||||
defer func() {
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"upToEpoch": slots.ToEpoch(pruneBefore),
|
||||
"duration": time.Since(start).String(),
|
||||
"filesRemoved": totalPruned,
|
||||
|
||||
@@ -201,21 +201,20 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
|
||||
return err
|
||||
}
|
||||
encs[i-1] = penc
|
||||
|
||||
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
|
||||
// the parent_root value stored in the index data for finalizedChildRoot.
|
||||
if i == len(blocks)-1 {
|
||||
fbrs[i].ChildRoot = finalizedChildRoot[:]
|
||||
// Final element is complete, so it is pre-encoded like the others.
|
||||
enc, err := encode(ctx, fbrs[i])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
encs[i] = enc
|
||||
}
|
||||
}
|
||||
|
||||
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
|
||||
// the parent_root value stored in the index data for finalizedChildRoot.
|
||||
lastIdx := len(blocks) - 1
|
||||
fbrs[lastIdx].ChildRoot = finalizedChildRoot[:]
|
||||
// Final element is complete, so it is pre-encoded like the others.
|
||||
enc, err := encode(ctx, fbrs[lastIdx])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
encs[lastIdx] = enc
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
child := bkt.Get(finalizedChildRoot[:])
|
||||
|
||||
@@ -237,6 +237,50 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte)
|
||||
return ifaceBlocks
|
||||
}
|
||||
|
||||
func TestStore_BackfillFinalizedIndexSingle(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
// we're making 4 blocks so we can test an element without a valid child at the end
|
||||
blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 4, [32]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
// existing is the child that we'll set up in the index by hand to seed the index.
|
||||
existing := blks[3]
|
||||
|
||||
// toUpdate is a single item update, emulating a backfill batch size of 1. it is the parent of `existing`.
|
||||
toUpdate := blks[2]
|
||||
|
||||
// set up existing finalized block
|
||||
ebpr := existing.Block().ParentRoot()
|
||||
ebr := existing.Root()
|
||||
ebf := ðpb.FinalizedBlockRootContainer{
|
||||
ParentRoot: ebpr[:],
|
||||
ChildRoot: make([]byte, 32), // we're bypassing validation to seed the db, so we don't need a valid child.
|
||||
}
|
||||
enc, err := encode(ctx, ebf)
|
||||
require.NoError(t, err)
|
||||
// writing this to the index outside of the validating function to seed the test.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
return bkt.Put(ebr[:], enc)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{toUpdate}, ebr))
|
||||
|
||||
// make sure that we still correctly validate descendents in the single item case.
|
||||
noChild := blks[0] // will fail to update because we don't have blks[1] in the db.
|
||||
// test wrong child param
|
||||
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, ebr), errNotConnectedToFinalized)
|
||||
// test parent of child that isn't finalized
|
||||
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, blks[1].Root()), errFinalizedChildNotFound)
|
||||
|
||||
// now make it work by writing the missing block
|
||||
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[1]}, blks[2].Root()))
|
||||
// since blks[1] is now in the index, we should be able to update blks[0]
|
||||
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[0]}, blks[1].Root()))
|
||||
}
|
||||
|
||||
func TestStore_BackfillFinalizedIndex(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
@@ -252,23 +296,23 @@ func TestStore_BackfillFinalizedIndex(t *testing.T) {
|
||||
ParentRoot: ebpr[:],
|
||||
ChildRoot: chldr[:],
|
||||
}
|
||||
disjoint := []consensusblocks.ROBlock{
|
||||
blks[0],
|
||||
blks[2],
|
||||
}
|
||||
enc, err := encode(ctx, ebf)
|
||||
require.NoError(t, err)
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
return bkt.Put(ebr[:], enc)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// reslice to remove the existing blocks
|
||||
blks = blks[0:64]
|
||||
// check the other error conditions with a descendent root that really doesn't exist
|
||||
require.NoError(t, err)
|
||||
|
||||
disjoint := []consensusblocks.ROBlock{
|
||||
blks[0],
|
||||
blks[2],
|
||||
}
|
||||
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, disjoint, [32]byte{}), errIncorrectBlockParent)
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, errFinalizedChildNotFound, db.BackfillFinalizedIndex(ctx, blks, [32]byte{}))
|
||||
|
||||
// use the real root so that it succeeds
|
||||
|
||||
@@ -100,37 +100,24 @@ func StoreDatafilePath(dirPath string) string {
|
||||
}
|
||||
|
||||
var Buckets = [][]byte{
|
||||
attestationsBucket,
|
||||
blocksBucket,
|
||||
stateBucket,
|
||||
proposerSlashingsBucket,
|
||||
attesterSlashingsBucket,
|
||||
voluntaryExitsBucket,
|
||||
chainMetadataBucket,
|
||||
checkpointBucket,
|
||||
powchainBucket,
|
||||
stateSummaryBucket,
|
||||
stateValidatorsBucket,
|
||||
// Indices buckets.
|
||||
attestationHeadBlockRootBucket,
|
||||
attestationSourceRootIndicesBucket,
|
||||
attestationSourceEpochIndicesBucket,
|
||||
attestationTargetRootIndicesBucket,
|
||||
attestationTargetEpochIndicesBucket,
|
||||
blockSlotIndicesBucket,
|
||||
stateSlotIndicesBucket,
|
||||
blockParentRootIndicesBucket,
|
||||
finalizedBlockRootsIndexBucket,
|
||||
blockRootValidatorHashesBucket,
|
||||
// State management service bucket.
|
||||
newStateServiceCompatibleBucket,
|
||||
// Migrations
|
||||
migrationsBucket,
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
|
||||
blobsBucket,
|
||||
}
|
||||
|
||||
// KVStoreOption is a functional option that modifies a kv.Store.
|
||||
@@ -150,7 +137,7 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
}
|
||||
}
|
||||
datafile := StoreDatafilePath(dirPath)
|
||||
log.Infof("Opening Bolt DB at %s", datafile)
|
||||
log.WithField("path", datafile).Info("Opening Bolt DB")
|
||||
boltDB, err := bolt.Open(
|
||||
datafile,
|
||||
params.BeaconIoConfig().ReadWritePermissions,
|
||||
|
||||
@@ -7,20 +7,15 @@ package kv
|
||||
// it easy to scan for keys that have a certain shard number as a prefix and return those
|
||||
// corresponding attestations.
|
||||
var (
|
||||
attestationsBucket = []byte("attestations")
|
||||
blobsBucket = []byte("blobs")
|
||||
blocksBucket = []byte("blocks")
|
||||
stateBucket = []byte("state")
|
||||
stateSummaryBucket = []byte("state-summary")
|
||||
proposerSlashingsBucket = []byte("proposer-slashings")
|
||||
attesterSlashingsBucket = []byte("attester-slashings")
|
||||
voluntaryExitsBucket = []byte("voluntary-exits")
|
||||
chainMetadataBucket = []byte("chain-metadata")
|
||||
checkpointBucket = []byte("check-point")
|
||||
powchainBucket = []byte("powchain")
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
blocksBucket = []byte("blocks")
|
||||
stateBucket = []byte("state")
|
||||
stateSummaryBucket = []byte("state-summary")
|
||||
chainMetadataBucket = []byte("chain-metadata")
|
||||
checkpointBucket = []byte("check-point")
|
||||
powchainBucket = []byte("powchain")
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
|
||||
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
|
||||
slotsHasObjectBucket = []byte("slots-has-objects")
|
||||
@@ -28,16 +23,11 @@ var (
|
||||
archivedRootBucket = []byte("archived-index-root")
|
||||
|
||||
// Key indices buckets.
|
||||
blockParentRootIndicesBucket = []byte("block-parent-root-indices")
|
||||
blockSlotIndicesBucket = []byte("block-slot-indices")
|
||||
stateSlotIndicesBucket = []byte("state-slot-indices")
|
||||
attestationHeadBlockRootBucket = []byte("attestation-head-block-root-indices")
|
||||
attestationSourceRootIndicesBucket = []byte("attestation-source-root-indices")
|
||||
attestationSourceEpochIndicesBucket = []byte("attestation-source-epoch-indices")
|
||||
attestationTargetRootIndicesBucket = []byte("attestation-target-root-indices")
|
||||
attestationTargetEpochIndicesBucket = []byte("attestation-target-epoch-indices")
|
||||
finalizedBlockRootsIndexBucket = []byte("finalized-block-roots-index")
|
||||
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
|
||||
blockParentRootIndicesBucket = []byte("block-parent-root-indices")
|
||||
blockSlotIndicesBucket = []byte("block-slot-indices")
|
||||
stateSlotIndicesBucket = []byte("state-slot-indices")
|
||||
finalizedBlockRootsIndexBucket = []byte("finalized-block-roots-index")
|
||||
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
|
||||
|
||||
// Specific item keys.
|
||||
headBlockRootKey = []byte("head-root")
|
||||
@@ -69,9 +59,6 @@ var (
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
savedStateSlotsKey = []byte("saved-state-slots")
|
||||
|
||||
// New state management service compatibility bucket.
|
||||
newStateServiceCompatibleBucket = []byte("new-state-compatible")
|
||||
|
||||
// Migrations
|
||||
migrationsBucket = []byte("migrations")
|
||||
)
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/dbval"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SaveOrigin loads an ssz serialized Block & BeaconState from an io.Reader
|
||||
@@ -27,7 +28,11 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return fmt.Errorf("config mismatch, beacon node configured to connect to %s, detected state is for %s", params.BeaconConfig().ConfigName, cf.Config.ConfigName)
|
||||
}
|
||||
|
||||
log.Infof("detected supported config for state & block version, config name=%s, fork name=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
log.WithFields(logrus.Fields{
|
||||
"configName": cf.Config.ConfigName,
|
||||
"forkName": version.String(cf.Fork),
|
||||
}).Info("Detected supported config for state & block version")
|
||||
|
||||
state, err := cf.UnmarshalBeaconState(serState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize origin state w/ bytes + config+fork")
|
||||
@@ -57,13 +62,13 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
|
||||
}
|
||||
|
||||
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint block to db")
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
}
|
||||
|
||||
// save state
|
||||
log.Infof("calling SaveState w/ blockRoot=%x", blockRoot)
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Info("Calling SaveState")
|
||||
if err = s.SaveState(ctx, state, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
|
||||
@@ -22,7 +22,14 @@ func Restore(cliCtx *cli.Context) error {
|
||||
targetDir := cliCtx.String(cmd.RestoreTargetDirFlag.Name)
|
||||
|
||||
restoreDir := path.Join(targetDir, kv.BeaconNodeDbDirName)
|
||||
if file.Exists(path.Join(restoreDir, kv.DatabaseFileName)) {
|
||||
restoreFile := path.Join(restoreDir, kv.DatabaseFileName)
|
||||
|
||||
dbExists, err := file.Exists(restoreFile, file.Regular)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check if database exists in %s", restoreFile)
|
||||
}
|
||||
|
||||
if dbExists {
|
||||
resp, err := prompt.ValidatePrompt(
|
||||
os.Stdin, dbExistsYesNoPrompt, prompt.ValidateYesOrNo,
|
||||
)
|
||||
|
||||
@@ -48,7 +48,6 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -23,6 +23,10 @@ import (
|
||||
const (
|
||||
attestationRecordKeySize = 32 // Bytes.
|
||||
rootSize = 32 // Bytes.
|
||||
|
||||
// For database performance reasons, database read/write operations
|
||||
// are chunked into batches of maximum `batchSize` elements.
|
||||
batchSize = 10_000
|
||||
)
|
||||
|
||||
// LastEpochWrittenForValidators given a list of validator indices returns the latest
|
||||
@@ -259,14 +263,23 @@ func (s *Store) AttestationRecordForValidator(
|
||||
// then only the first one is (arbitrarily) saved in the `attestationDataRootsBucket` bucket.
|
||||
func (s *Store) SaveAttestationRecordsForValidators(
|
||||
ctx context.Context,
|
||||
attestations []*slashertypes.IndexedAttestationWrapper,
|
||||
attWrappers []*slashertypes.IndexedAttestationWrapper,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveAttestationRecordsForValidators")
|
||||
defer span.End()
|
||||
encodedTargetEpoch := make([][]byte, len(attestations))
|
||||
encodedRecords := make([][]byte, len(attestations))
|
||||
|
||||
for i, attestation := range attestations {
|
||||
attWrappersCount := len(attWrappers)
|
||||
|
||||
// If no attestations are provided, skip.
|
||||
if attWrappersCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build encoded target epochs and encoded records
|
||||
encodedTargetEpoch := make([][]byte, attWrappersCount)
|
||||
encodedRecords := make([][]byte, attWrappersCount)
|
||||
|
||||
for i, attestation := range attWrappers {
|
||||
encEpoch := encodeTargetEpoch(attestation.IndexedAttestation.Data.Target.Epoch)
|
||||
|
||||
value, err := encodeAttestationRecord(attestation)
|
||||
@@ -278,60 +291,115 @@ func (s *Store) SaveAttestationRecordsForValidators(
|
||||
encodedRecords[i] = value
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
dataRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
// Save attestation records in the database by batch.
|
||||
for stop := attWrappersCount; stop >= 0; stop -= batchSize {
|
||||
start := max(0, stop-batchSize)
|
||||
|
||||
for i := len(attestations) - 1; i >= 0; i-- {
|
||||
attestation := attestations[i]
|
||||
attWrappersBatch := attWrappers[start:stop]
|
||||
encodedTargetEpochBatch := encodedTargetEpoch[start:stop]
|
||||
encodedRecordsBatch := encodedRecords[start:stop]
|
||||
|
||||
if err := attRecordsBkt.Put(attestation.DataRoot[:], encodedRecords[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, valIdx := range attestation.IndexedAttestation.AttestingIndices {
|
||||
encIdx := encodeValidatorIndex(primitives.ValidatorIndex(valIdx))
|
||||
|
||||
key := append(encodedTargetEpoch[i], encIdx...)
|
||||
if err := dataRootsBkt.Put(key, attestation.DataRoot[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Perform basic check.
|
||||
if len(encodedTargetEpochBatch) != len(encodedRecordsBatch) {
|
||||
return fmt.Errorf(
|
||||
"cannot save attestation records, got %d target epochs and %d records",
|
||||
len(encodedTargetEpochBatch), len(encodedRecordsBatch),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
currentBatchSize := len(encodedTargetEpochBatch)
|
||||
|
||||
// Save attestation records in the database.
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
dataRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
|
||||
for i := currentBatchSize - 1; i >= 0; i-- {
|
||||
attWrapper := attWrappersBatch[i]
|
||||
dataRoot := attWrapper.DataRoot
|
||||
|
||||
encodedTargetEpoch := encodedTargetEpochBatch[i]
|
||||
encodedRecord := encodedRecordsBatch[i]
|
||||
|
||||
if err := attRecordsBkt.Put(dataRoot[:], encodedRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, validatorIndex := range attWrapper.IndexedAttestation.AttestingIndices {
|
||||
encodedIndex := encodeValidatorIndex(primitives.ValidatorIndex(validatorIndex))
|
||||
|
||||
key := append(encodedTargetEpoch, encodedIndex...)
|
||||
if err := dataRootsBkt.Put(key, dataRoot[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "failed to save attestation records")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadSlasherChunks given a chunk kind and a disk keys, retrieves chunks for a validator
|
||||
// min or max span used by slasher from our database.
|
||||
func (s *Store) LoadSlasherChunks(
|
||||
ctx context.Context, kind slashertypes.ChunkKind, diskKeys [][]byte,
|
||||
ctx context.Context, kind slashertypes.ChunkKind, chunkKeys [][]byte,
|
||||
) ([][]uint16, []bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LoadSlasherChunk")
|
||||
defer span.End()
|
||||
chunks := make([][]uint16, 0)
|
||||
var exists []bool
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(slasherChunksBucket)
|
||||
for _, diskKey := range diskKeys {
|
||||
key := append(ssz.MarshalUint8(make([]byte, 0), uint8(kind)), diskKey...)
|
||||
chunkBytes := bkt.Get(key)
|
||||
if chunkBytes == nil {
|
||||
chunks = append(chunks, []uint16{})
|
||||
exists = append(exists, false)
|
||||
continue
|
||||
|
||||
keysCount := len(chunkKeys)
|
||||
|
||||
chunks := make([][]uint16, 0, keysCount)
|
||||
exists := make([]bool, 0, keysCount)
|
||||
encodedKeys := make([][]byte, 0, keysCount)
|
||||
|
||||
// Encode kind.
|
||||
encodedKind := ssz.MarshalUint8(make([]byte, 0), uint8(kind))
|
||||
|
||||
// Encode keys.
|
||||
for _, chunkKey := range chunkKeys {
|
||||
encodedKey := append(encodedKind, chunkKey...)
|
||||
encodedKeys = append(encodedKeys, encodedKey)
|
||||
}
|
||||
|
||||
// Read chunks from the database by batch.
|
||||
for start := 0; start < keysCount; start += batchSize {
|
||||
stop := min(start+batchSize, len(encodedKeys))
|
||||
encodedKeysBatch := encodedKeys[start:stop]
|
||||
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(slasherChunksBucket)
|
||||
|
||||
for _, encodedKey := range encodedKeysBatch {
|
||||
chunkBytes := bkt.Get(encodedKey)
|
||||
|
||||
if chunkBytes == nil {
|
||||
chunks = append(chunks, []uint16{})
|
||||
exists = append(exists, false)
|
||||
continue
|
||||
}
|
||||
|
||||
chunk, err := decodeSlasherChunk(chunkBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chunks = append(chunks, chunk)
|
||||
exists = append(exists, true)
|
||||
}
|
||||
chunk, err := decodeSlasherChunk(chunkBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
exists = append(exists, true)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return chunks, exists, err
|
||||
}
|
||||
|
||||
return chunks, exists, nil
|
||||
}
|
||||
|
||||
// SaveSlasherChunks given a chunk kind, list of disk keys, and list of chunks,
|
||||
@@ -341,25 +409,60 @@ func (s *Store) SaveSlasherChunks(
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveSlasherChunks")
|
||||
defer span.End()
|
||||
encodedKeys := make([][]byte, len(chunkKeys))
|
||||
encodedChunks := make([][]byte, len(chunkKeys))
|
||||
for i := 0; i < len(chunkKeys); i++ {
|
||||
encodedKeys[i] = append(ssz.MarshalUint8(make([]byte, 0), uint8(kind)), chunkKeys[i]...)
|
||||
encodedChunk, err := encodeSlasherChunk(chunks[i])
|
||||
|
||||
// Ensure we have the same number of keys and chunks.
|
||||
if len(chunkKeys) != len(chunks) {
|
||||
return fmt.Errorf(
|
||||
"cannot save slasher chunks, got %d keys and %d chunks",
|
||||
len(chunkKeys), len(chunks),
|
||||
)
|
||||
}
|
||||
|
||||
chunksCount := len(chunks)
|
||||
|
||||
// Encode kind.
|
||||
encodedKind := ssz.MarshalUint8(make([]byte, 0), uint8(kind))
|
||||
|
||||
// Encode keys and chunks.
|
||||
encodedKeys := make([][]byte, chunksCount)
|
||||
encodedChunks := make([][]byte, chunksCount)
|
||||
|
||||
for i := 0; i < chunksCount; i++ {
|
||||
chunkKey, chunk := chunkKeys[i], chunks[i]
|
||||
encodedKey := append(encodedKind, chunkKey...)
|
||||
|
||||
encodedChunk, err := encodeSlasherChunk(chunk)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "failed to encode slasher chunk for key %v", chunkKey)
|
||||
}
|
||||
|
||||
encodedKeys[i] = encodedKey
|
||||
encodedChunks[i] = encodedChunk
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(slasherChunksBucket)
|
||||
for i := 0; i < len(chunkKeys); i++ {
|
||||
if err := bkt.Put(encodedKeys[i], encodedChunks[i]); err != nil {
|
||||
return err
|
||||
|
||||
// Save chunks in the database by batch.
|
||||
for start := 0; start < chunksCount; start += batchSize {
|
||||
stop := min(start+batchSize, len(encodedKeys))
|
||||
encodedKeysBatch := encodedKeys[start:stop]
|
||||
encodedChunksBatch := encodedChunks[start:stop]
|
||||
batchSize := len(encodedKeysBatch)
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(slasherChunksBucket)
|
||||
|
||||
for i := 0; i < batchSize; i++ {
|
||||
if err := bkt.Put(encodedKeysBatch[i], encodedChunksBatch[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "failed to save slasher chunks")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckDoubleBlockProposals takes in a list of proposals and for each,
|
||||
|
||||
@@ -14,33 +14,56 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := setupDB(t)
|
||||
valIdx := primitives.ValidatorIndex(1)
|
||||
target := primitives.Epoch(5)
|
||||
source := primitives.Epoch(4)
|
||||
attRecord, err := beaconDB.AttestationRecordForValidator(ctx, valIdx, target)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, attRecord == nil)
|
||||
const attestationsCount = 11_000
|
||||
|
||||
sr := [32]byte{1}
|
||||
err = beaconDB.SaveAttestationRecordsForValidators(
|
||||
ctx,
|
||||
[]*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(source, target, []uint64{uint64(valIdx)}, sr[:]),
|
||||
},
|
||||
)
|
||||
// Create context.
|
||||
ctx := context.Background()
|
||||
|
||||
// Create database.
|
||||
beaconDB := setupDB(t)
|
||||
|
||||
// Define the validator index.
|
||||
validatorIndex := primitives.ValidatorIndex(1)
|
||||
|
||||
// Defines attestations to save and retrieve.
|
||||
attWrappers := make([]*slashertypes.IndexedAttestationWrapper, attestationsCount)
|
||||
for i := 0; i < attestationsCount; i++ {
|
||||
var dataRoot [32]byte
|
||||
binary.LittleEndian.PutUint64(dataRoot[:], uint64(i))
|
||||
|
||||
attWrapper := createAttestationWrapper(
|
||||
primitives.Epoch(i),
|
||||
primitives.Epoch(i+1),
|
||||
[]uint64{uint64(validatorIndex)},
|
||||
dataRoot[:],
|
||||
)
|
||||
|
||||
attWrappers[i] = attWrapper
|
||||
}
|
||||
|
||||
// Check on a sample of validators that no attestation records are available.
|
||||
for i := 0; i < attestationsCount; i += 100 {
|
||||
attRecord, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, attRecord == nil)
|
||||
}
|
||||
|
||||
// Save the attestation records to the database.
|
||||
err := beaconDB.SaveAttestationRecordsForValidators(ctx, attWrappers)
|
||||
require.NoError(t, err)
|
||||
attRecord, err = beaconDB.AttestationRecordForValidator(ctx, valIdx, target)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, target, attRecord.IndexedAttestation.Data.Target.Epoch)
|
||||
assert.DeepEqual(t, source, attRecord.IndexedAttestation.Data.Source.Epoch)
|
||||
assert.DeepEqual(t, sr, attRecord.DataRoot)
|
||||
|
||||
// Check on a sample of validators that attestation records are available.
|
||||
for i := 0; i < attestationsCount; i += 100 {
|
||||
expected := attWrappers[i]
|
||||
actual, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected.IndexedAttestation.Data.Source.Epoch, actual.IndexedAttestation.Data.Source.Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LastEpochWrittenForValidators(t *testing.T) {
|
||||
@@ -138,61 +161,113 @@ func TestStore_CheckAttesterDoubleVotes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_SlasherChunk_SaveRetrieve(t *testing.T) {
|
||||
// Define test parameters.
|
||||
const (
|
||||
elemsPerChunk = 16
|
||||
totalChunks = 11_000
|
||||
)
|
||||
|
||||
// Create context.
|
||||
ctx := context.Background()
|
||||
|
||||
// Create database.
|
||||
beaconDB := setupDB(t)
|
||||
elemsPerChunk := 16
|
||||
totalChunks := 64
|
||||
chunkKeys := make([][]byte, totalChunks)
|
||||
chunks := make([][]uint16, totalChunks)
|
||||
|
||||
// Create min chunk keys and chunks.
|
||||
minChunkKeys := make([][]byte, totalChunks)
|
||||
minChunks := make([][]uint16, totalChunks)
|
||||
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
// Create chunk key.
|
||||
chunkKey := ssz.MarshalUint64(make([]byte, 0), uint64(i))
|
||||
minChunkKeys[i] = chunkKey
|
||||
|
||||
// Create chunk.
|
||||
chunk := make([]uint16, elemsPerChunk)
|
||||
|
||||
for j := 0; j < len(chunk); j++ {
|
||||
chunk[j] = uint16(0)
|
||||
chunk[j] = uint16(i + j)
|
||||
}
|
||||
chunks[i] = chunk
|
||||
chunkKeys[i] = ssz.MarshalUint64(make([]byte, 0), uint64(i))
|
||||
|
||||
minChunks[i] = chunk
|
||||
}
|
||||
|
||||
// We save chunks for min spans.
|
||||
err := beaconDB.SaveSlasherChunks(ctx, slashertypes.MinSpan, chunkKeys, chunks)
|
||||
// Create max chunk keys and chunks.
|
||||
maxChunkKeys := make([][]byte, totalChunks)
|
||||
maxChunks := make([][]uint16, totalChunks)
|
||||
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
// Create chunk key.
|
||||
chunkKey := ssz.MarshalUint64(make([]byte, 0), uint64(i+1))
|
||||
maxChunkKeys[i] = chunkKey
|
||||
|
||||
// Create chunk.
|
||||
chunk := make([]uint16, elemsPerChunk)
|
||||
|
||||
for j := 0; j < len(chunk); j++ {
|
||||
chunk[j] = uint16(i + j + 1)
|
||||
}
|
||||
|
||||
maxChunks[i] = chunk
|
||||
}
|
||||
|
||||
// Save chunks for min spans.
|
||||
err := beaconDB.SaveSlasherChunks(ctx, slashertypes.MinSpan, minChunkKeys, minChunks)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect no chunks to be stored for max spans.
|
||||
// Expect no chunks to be stored for max spans.
|
||||
_, chunksExist, err := beaconDB.LoadSlasherChunks(
|
||||
ctx, slashertypes.MaxSpan, chunkKeys,
|
||||
ctx, slashertypes.MaxSpan, minChunkKeys,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(chunks), len(chunksExist))
|
||||
require.Equal(t, len(minChunks), len(chunksExist))
|
||||
|
||||
for _, exists := range chunksExist {
|
||||
require.Equal(t, false, exists)
|
||||
}
|
||||
|
||||
// We check we saved the right chunks.
|
||||
// Check the right chunks are saved.
|
||||
retrievedChunks, chunksExist, err := beaconDB.LoadSlasherChunks(
|
||||
ctx, slashertypes.MinSpan, chunkKeys,
|
||||
ctx, slashertypes.MinSpan, minChunkKeys,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(chunks), len(retrievedChunks))
|
||||
require.Equal(t, len(chunks), len(chunksExist))
|
||||
require.Equal(t, len(minChunks), len(retrievedChunks))
|
||||
require.Equal(t, len(minChunks), len(chunksExist))
|
||||
|
||||
for i, exists := range chunksExist {
|
||||
require.Equal(t, true, exists)
|
||||
require.DeepEqual(t, chunks[i], retrievedChunks[i])
|
||||
require.DeepEqual(t, minChunks[i], retrievedChunks[i])
|
||||
}
|
||||
|
||||
// We save chunks for max spans.
|
||||
err = beaconDB.SaveSlasherChunks(ctx, slashertypes.MaxSpan, chunkKeys, chunks)
|
||||
// Save chunks for max spans.
|
||||
err = beaconDB.SaveSlasherChunks(ctx, slashertypes.MaxSpan, maxChunkKeys, maxChunks)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We check we saved the right chunks.
|
||||
// Check right chunks are saved.
|
||||
retrievedChunks, chunksExist, err = beaconDB.LoadSlasherChunks(
|
||||
ctx, slashertypes.MaxSpan, chunkKeys,
|
||||
ctx, slashertypes.MaxSpan, maxChunkKeys,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(chunks), len(retrievedChunks))
|
||||
require.Equal(t, len(chunks), len(chunksExist))
|
||||
|
||||
require.Equal(t, len(maxChunks), len(retrievedChunks))
|
||||
require.Equal(t, len(maxChunks), len(chunksExist))
|
||||
|
||||
for i, exists := range chunksExist {
|
||||
require.Equal(t, true, exists)
|
||||
require.DeepEqual(t, chunks[i], retrievedChunks[i])
|
||||
require.DeepEqual(t, maxChunks[i], retrievedChunks[i])
|
||||
}
|
||||
|
||||
// Check the right chunks are still saved for min span.
|
||||
retrievedChunks, chunksExist, err = beaconDB.LoadSlasherChunks(
|
||||
ctx, slashertypes.MinSpan, minChunkKeys,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(minChunks), len(retrievedChunks))
|
||||
require.Equal(t, len(minChunks), len(chunksExist))
|
||||
|
||||
for i, exists := range chunksExist {
|
||||
require.Equal(t, true, exists)
|
||||
require.DeepEqual(t, minChunks[i], retrievedChunks[i])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
return nil, errors.Wrap(errBlockTimeTooLate, fmt.Sprintf("(%d > %d)", time, latestBlkTime))
|
||||
}
|
||||
// Initialize a pointer to eth1 chain's history to start our search from.
|
||||
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
|
||||
cursorNum := new(big.Int).SetUint64(latestBlkHeight)
|
||||
cursorTime := latestBlkTime
|
||||
|
||||
var numOfBlocks uint64
|
||||
@@ -156,15 +156,15 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
|
||||
}
|
||||
if cursorTime > time {
|
||||
return s.findMaxTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
return s.findMaxTargetEth1Block(ctx, new(big.Int).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
return s.findMinTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
return s.findMinTargetEth1Block(ctx, new(big.Int).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
|
||||
// Performs a search to find a target eth1 block which is earlier than or equal to the
|
||||
// target time. This method is used when head.time > targetTime
|
||||
func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := upperBoundBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
|
||||
for bn := upperBoundBlk; ; bn = new(big.Int).Sub(bn, big.NewInt(1)) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -181,7 +181,7 @@ func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big
|
||||
// Performs a search to find a target eth1 block which is just earlier than or equal to the
|
||||
// target time. This method is used when head.time < targetTime
|
||||
func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := lowerBoundBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
|
||||
for bn := lowerBoundBlk; ; bn = new(big.Int).Add(bn, big.NewInt(1)) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -201,7 +201,7 @@ func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big
|
||||
}
|
||||
|
||||
func (s *Service) retrieveHeaderInfo(ctx context.Context, bNum uint64) (*types.HeaderInfo, error) {
|
||||
bn := big.NewInt(0).SetUint64(bNum)
|
||||
bn := new(big.Int).SetUint64(bNum)
|
||||
exists, info, err := s.headerCache.HeaderInfoByHeight(bn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -23,9 +23,6 @@ var (
|
||||
ErrInvalidPayloadAttributes = errors.New("payload attributes are invalid / inconsistent")
|
||||
// ErrUnknownPayloadStatus when the payload status is unknown.
|
||||
ErrUnknownPayloadStatus = errors.New("unknown payload status")
|
||||
// ErrConfigMismatch when the execution node's terminal total difficulty or
|
||||
// terminal block hash received via the API mismatches Prysm's configuration value.
|
||||
ErrConfigMismatch = errors.New("execution client configuration mismatch")
|
||||
// ErrAcceptedSyncingPayloadStatus when the status of the payload is syncing or accepted.
|
||||
ErrAcceptedSyncingPayloadStatus = errors.New("payload status is SYNCING or ACCEPTED")
|
||||
// ErrInvalidPayloadStatus when the status of the payload is invalid.
|
||||
|
||||
@@ -269,7 +269,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ChainStartTime": chainStartTime,
|
||||
"chainStartTime": chainStartTime,
|
||||
}).Info("Minimum number of validators reached for beacon-chain to start")
|
||||
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.ChainStarted,
|
||||
@@ -298,9 +298,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
// Start from the deployment block if our last requested block
|
||||
// is behind it. This is as the deposit logs can only start from the
|
||||
// block of the deployment of the deposit contract.
|
||||
if deploymentBlock > currentBlockNum {
|
||||
currentBlockNum = deploymentBlock
|
||||
}
|
||||
currentBlockNum = max(currentBlockNum, deploymentBlock)
|
||||
// To store all blocks.
|
||||
headersMap := make(map[uint64]*types.HeaderInfo)
|
||||
rawLogCount, err := s.depositContractCaller.GetDepositCount(&bind.CallOpts{})
|
||||
@@ -384,15 +382,13 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
|
||||
end := currentBlockNum + batchSize
|
||||
// Appropriately bound the request, as we do not
|
||||
// want request blocks beyond the current follow distance.
|
||||
if end > latestFollowHeight {
|
||||
end = latestFollowHeight
|
||||
}
|
||||
end = min(end, latestFollowHeight)
|
||||
query := ethereum.FilterQuery{
|
||||
Addresses: []common.Address{
|
||||
s.cfg.depositContractAddr,
|
||||
},
|
||||
FromBlock: big.NewInt(0).SetUint64(start),
|
||||
ToBlock: big.NewInt(0).SetUint64(end),
|
||||
FromBlock: new(big.Int).SetUint64(start),
|
||||
ToBlock: new(big.Int).SetUint64(end),
|
||||
}
|
||||
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
|
||||
// only change the end block if the remaining logs are below the required log limit.
|
||||
@@ -400,7 +396,7 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
|
||||
withinLimit := remainingLogs < depositLogRequestLimit
|
||||
aboveFollowHeight := end >= latestFollowHeight
|
||||
if withinLimit && aboveFollowHeight {
|
||||
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
|
||||
query.ToBlock = new(big.Int).SetUint64(latestFollowHeight)
|
||||
end = latestFollowHeight
|
||||
}
|
||||
logs, err := s.httpLogger.FilterLogs(ctx, query)
|
||||
@@ -482,11 +478,11 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
|
||||
}
|
||||
for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ {
|
||||
// Cache eth1 block header here.
|
||||
_, err := s.BlockHashByHeight(ctx, big.NewInt(0).SetUint64(i))
|
||||
_, err := s.BlockHashByHeight(ctx, new(big.Int).SetUint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.ProcessETH1Block(ctx, big.NewInt(0).SetUint64(i))
|
||||
err = s.ProcessETH1Block(ctx, new(big.Int).SetUint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
if currClient != nil {
|
||||
currClient.Close()
|
||||
}
|
||||
log.Infof("Connected to new endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
log.WithField("endpoint", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url)).Info("Connected to new endpoint")
|
||||
return
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
|
||||
@@ -415,14 +415,11 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*types.Hea
|
||||
requestRange := (endBlock - startBlock) + 1
|
||||
elems := make([]gethRPC.BatchElem, 0, requestRange)
|
||||
headers := make([]*types.HeaderInfo, 0, requestRange)
|
||||
if requestRange == 0 {
|
||||
return headers, nil
|
||||
}
|
||||
for i := startBlock; i <= endBlock; i++ {
|
||||
header := &types.HeaderInfo{}
|
||||
elems = append(elems, gethRPC.BatchElem{
|
||||
Method: "eth_getBlockByNumber",
|
||||
Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(i)), false},
|
||||
Args: []interface{}{hexutil.EncodeBig(new(big.Int).SetUint64(i)), false},
|
||||
Result: header,
|
||||
Error: error(nil),
|
||||
})
|
||||
@@ -583,6 +580,9 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
s.runError = nil
|
||||
|
||||
s.initPOWService()
|
||||
// Do not keep storing the finalized state as it is
|
||||
// no longer of use.
|
||||
s.removeStartupState()
|
||||
|
||||
chainstartTicker := time.NewTicker(logPeriod)
|
||||
defer chainstartTicker.Stop()
|
||||
@@ -636,7 +636,7 @@ func (s *Service) logTillChainStart(ctx context.Context) {
|
||||
}
|
||||
|
||||
fields := logrus.Fields{
|
||||
"Additional validators needed": valNeeded,
|
||||
"additionalValidatorsNeeded": valNeeded,
|
||||
}
|
||||
if secondsLeft > 0 {
|
||||
fields["Generating genesis state in"] = time.Duration(secondsLeft) * time.Second
|
||||
@@ -672,9 +672,7 @@ func (s *Service) cacheBlockHeaders(start, end uint64) error {
|
||||
// the allotted limit.
|
||||
endReq -= 1
|
||||
}
|
||||
if endReq > end {
|
||||
endReq = end
|
||||
}
|
||||
endReq = min(endReq, end)
|
||||
// We call batchRequestHeaders for its header caching side-effect, so we don't need the return value.
|
||||
_, err := s.batchRequestHeaders(startReq, endReq)
|
||||
if err != nil {
|
||||
@@ -910,3 +908,7 @@ func (s *Service) migrateOldDepositTree(eth1DataInDB *ethpb.ETH1ChainData) error
|
||||
s.depositTrie = newDepositTrie
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) removeStartupState() {
|
||||
s.cfg.finalizedStateAtStartup = nil
|
||||
}
|
||||
|
||||
@@ -82,6 +82,15 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// Return early if we are checking before 10 seconds into the slot
|
||||
secs, err := slots.SecondsSinceSlotStart(head.slot, f.store.genesisTime, uint64(time.Now().Unix()))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check current slot")
|
||||
return true
|
||||
}
|
||||
if secs < ProcessAttestationsThreshold {
|
||||
return true
|
||||
}
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return
|
||||
|
||||
@@ -85,11 +85,19 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent = saved
|
||||
})
|
||||
t.Run("parent is weak", func(t *testing.T) {
|
||||
t.Run("parent is weak early call", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.weight
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, true, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.weight = saved
|
||||
})
|
||||
t.Run("parent is weak late call", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.weight
|
||||
driftGenesisTime(f, 2, 11)
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.weight = saved
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
})
|
||||
t.Run("Head is strong", func(t *testing.T) {
|
||||
f.store.headNode.weight = f.store.committeeWeight
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
// MuxConfig contains configuration that should be used when registering the beacon node in the gateway.
|
||||
type MuxConfig struct {
|
||||
Handler gateway.MuxHandler
|
||||
EthPbMux *gateway.PbMux
|
||||
V1AlphaPbMux *gateway.PbMux
|
||||
}
|
||||
|
||||
@@ -44,11 +44,11 @@ func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.A
|
||||
// logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target)
|
||||
func logMessageTimelyFlagsForIndex(idx primitives.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": data.Slot,
|
||||
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
|
||||
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
|
||||
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
|
||||
"validatorIndex": idx,
|
||||
"slot": data.Slot,
|
||||
"source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
|
||||
"target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
|
||||
"head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,12 +146,12 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
|
||||
aggregatedPerf.totalCorrectTarget++
|
||||
}
|
||||
}
|
||||
logFields["CorrectHead"] = latestPerf.timelyHead
|
||||
logFields["CorrectSource"] = latestPerf.timelySource
|
||||
logFields["CorrectTarget"] = latestPerf.timelyTarget
|
||||
logFields["InclusionSlot"] = latestPerf.inclusionSlot
|
||||
logFields["NewBalance"] = balance
|
||||
logFields["BalanceChange"] = balanceChg
|
||||
logFields["correctHead"] = latestPerf.timelyHead
|
||||
logFields["correctSource"] = latestPerf.timelySource
|
||||
logFields["correctTarget"] = latestPerf.timelyTarget
|
||||
logFields["inclusionSlot"] = latestPerf.inclusionSlot
|
||||
logFields["newBalance"] = balance
|
||||
logFields["balanceChange"] = balanceChg
|
||||
|
||||
s.latestPerformance[primitives.ValidatorIndex(idx)] = latestPerf
|
||||
s.aggregatedPerformance[primitives.ValidatorIndex(idx)] = aggregatedPerf
|
||||
@@ -167,7 +167,7 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb
|
||||
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if st == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping unaggregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
@@ -190,13 +190,13 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
|
||||
defer s.Unlock()
|
||||
if s.trackedIndex(att.AggregatorIndex) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"AggregatorIndex": att.AggregatorIndex,
|
||||
"Slot": att.Aggregate.Data.Slot,
|
||||
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
"aggregatorIndex": att.AggregatorIndex,
|
||||
"slot": att.Aggregate.Data.Slot,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.BeaconBlockRoot)),
|
||||
"SourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
"sourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.Source.Root)),
|
||||
"TargetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.Target.Root)),
|
||||
}).Info("Processed attestation aggregation")
|
||||
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
|
||||
@@ -209,7 +209,7 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
|
||||
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
|
||||
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if st == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping aggregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -55,8 +55,8 @@ func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processIncludedAttestation(context.Background(), state, att)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
|
||||
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
@@ -124,8 +124,8 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processUnaggregatedAttestation(context.Background(), att)
|
||||
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
wanted1 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
|
||||
wanted2 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
|
||||
},
|
||||
}
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x000000000000 prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
|
||||
require.LogsContain(t, hook, "Skipping aggregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
@@ -200,9 +200,9 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
||||
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x68656c6c6f2d prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12")
|
||||
}
|
||||
|
||||
func TestProcessAttestations(t *testing.T) {
|
||||
@@ -240,8 +240,8 @@ func TestProcessAttestations(t *testing.T) {
|
||||
wrappedBlock, err := blocks.NewBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.processAttestations(ctx, state, wrappedBlock)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
|
||||
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
|
||||
}
|
||||
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if st == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping block collection due to state not found in cache")
|
||||
return
|
||||
}
|
||||
@@ -90,13 +90,13 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b
|
||||
|
||||
parentRoot := blk.ParentRoot()
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": blk.ProposerIndex(),
|
||||
"Slot": blk.Slot(),
|
||||
"Version": blk.Version(),
|
||||
"ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
|
||||
"BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"NewBalance": balance,
|
||||
"BalanceChange": balanceChg,
|
||||
"proposerIndex": blk.ProposerIndex(),
|
||||
"slot": blk.Slot(),
|
||||
"version": blk.Version(),
|
||||
"parentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"newBalance": balance,
|
||||
"balanceChange": balanceChg,
|
||||
}).Info("Proposed beacon block was included")
|
||||
}
|
||||
}
|
||||
@@ -109,11 +109,11 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
idx := slashing.Header_1.Header.ProposerIndex
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": idx,
|
||||
"Slot": blk.Slot(),
|
||||
"SlashingSlot": slashing.Header_1.Header.Slot,
|
||||
"BodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
|
||||
"BodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
|
||||
"proposerIndex": idx,
|
||||
"slot": blk.Slot(),
|
||||
"slashingSlot": slashing.Header_1.Header.Slot,
|
||||
"bodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
|
||||
"bodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
|
||||
}).Info("Proposer slashing was included")
|
||||
}
|
||||
}
|
||||
@@ -122,16 +122,16 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
|
||||
if s.trackedIndex(primitives.ValidatorIndex(idx)) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"AttesterIndex": idx,
|
||||
"BlockInclusionSlot": blk.Slot(),
|
||||
"AttestationSlot1": slashing.Attestation_1.Data.Slot,
|
||||
"BeaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
|
||||
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
|
||||
"AttestationSlot2": slashing.Attestation_2.Data.Slot,
|
||||
"BeaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
|
||||
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
|
||||
"attesterIndex": idx,
|
||||
"blockInclusionSlot": blk.Slot(),
|
||||
"attestationSlot1": slashing.Attestation_1.Data.Slot,
|
||||
"beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
|
||||
"sourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
|
||||
"targetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
|
||||
"attestationSlot2": slashing.Attestation_2.Data.Slot,
|
||||
"beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
|
||||
"sourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
|
||||
"targetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
|
||||
}).Info("Attester slashing was included")
|
||||
}
|
||||
}
|
||||
@@ -159,19 +159,19 @@ func (s *Service) logAggregatedPerformance() {
|
||||
percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"StartEpoch": p.startEpoch,
|
||||
"StartBalance": p.startBalance,
|
||||
"TotalRequested": p.totalRequestedCount,
|
||||
"AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
|
||||
"BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
|
||||
"CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
|
||||
"CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
|
||||
"CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
|
||||
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
|
||||
"TotalProposedBlocks": p.totalProposedCount,
|
||||
"TotalAggregations": p.totalAggregations,
|
||||
"TotalSyncContributions": p.totalSyncCommitteeContributions,
|
||||
"validatorIndex": idx,
|
||||
"startEpoch": p.startEpoch,
|
||||
"startBalance": p.startBalance,
|
||||
"totalRequested": p.totalRequestedCount,
|
||||
"attestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
|
||||
"balanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
|
||||
"correctlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
|
||||
"correctlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
|
||||
"correctlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
|
||||
"averageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
|
||||
"totalProposedBlocks": p.totalProposedCount,
|
||||
"totalAggregations": p.totalAggregations,
|
||||
"totalSyncContributions": p.totalSyncCommitteeContributions,
|
||||
}).Info("Aggregated performance since launch")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestProcessSlashings(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Proposer slashing was included\" BodyRoot1= BodyRoot2= ProposerIndex=2",
|
||||
wantedErr: "\"Proposer slashing was included\" bodyRoot1= bodyRoot2= prefix=monitor proposerIndex=2",
|
||||
},
|
||||
{
|
||||
name: "Proposer slashing an untracked index",
|
||||
@@ -89,8 +89,8 @@ func TestProcessSlashings(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Attester slashing was included\" AttestationSlot1=0 AttestationSlot2=0 AttesterIndex=1 " +
|
||||
"BeaconBlockRoot1=0x000000000000 BeaconBlockRoot2=0x000000000000 BlockInclusionSlot=0 SourceEpoch1=1 SourceEpoch2=0 TargetEpoch1=0 TargetEpoch2=0",
|
||||
wantedErr: "\"Attester slashing was included\" attestationSlot1=0 attestationSlot2=0 attesterIndex=1 " +
|
||||
"beaconBlockRoot1=0x000000000000 beaconBlockRoot2=0x000000000000 blockInclusionSlot=0 prefix=monitor sourceEpoch1=1 sourceEpoch2=0 targetEpoch1=0 targetEpoch2=0",
|
||||
},
|
||||
{
|
||||
name: "Attester slashing untracked index",
|
||||
@@ -150,7 +150,7 @@ func TestProcessProposedBlock(t *testing.T) {
|
||||
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
},
|
||||
wantedErr: "\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=0x68656c6c6f2d NewBalance=32000000000 ParentRoot=0x68656c6c6f2d ProposerIndex=12 Slot=6 Version=0 prefix=monitor",
|
||||
wantedErr: "\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=0x68656c6c6f2d newBalance=32000000000 parentRoot=0x68656c6c6f2d prefix=monitor proposerIndex=12 slot=6 version=0",
|
||||
},
|
||||
{
|
||||
name: "Block proposed by untracked validator",
|
||||
@@ -225,10 +225,10 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
|
||||
root, err := b.GetBlock().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
|
||||
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" BodyRoot1=0x000100000000 BodyRoot2=0x000200000000 ProposerIndex=%d SlashingSlot=0 Slot=1 prefix=monitor", idx)
|
||||
wanted3 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=3 ExpectedContribCount=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor"
|
||||
wanted4 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor"
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
|
||||
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" bodyRoot1=0x000100000000 bodyRoot2=0x000200000000 prefix=monitor proposerIndex=%d slashingSlot=0 slot=1", idx)
|
||||
wanted3 := "\"Sync committee contribution included\" balanceChange=0 contribCount=3 expectedContribCount=3 newBalance=32000000000 prefix=monitor validatorIndex=1"
|
||||
wanted4 := "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=1 newBalance=32000000000 prefix=monitor validatorIndex=2"
|
||||
wrapped, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
s.processBlock(ctx, wrapped)
|
||||
@@ -278,10 +278,10 @@ func TestLogAggregatedPerformance(t *testing.T) {
|
||||
}
|
||||
|
||||
s.logAggregatedPerformance()
|
||||
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" +
|
||||
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " +
|
||||
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " +
|
||||
"StartEpoch=0 TotalAggregations=0 TotalProposedBlocks=1 TotalRequested=15 TotalSyncContributions=0 " +
|
||||
"ValidatorIndex=1 prefix=monitor"
|
||||
wanted := "\"Aggregated performance since launch\" attestationInclusion=\"80.00%\"" +
|
||||
" averageInclusionDistance=1.2 balanceChangePct=\"0.95%\" correctlyVotedHeadPct=\"66.67%\" " +
|
||||
"correctlyVotedSourcePct=\"91.67%\" correctlyVotedTargetPct=\"100.00%\" prefix=monitor startBalance=31700000000 " +
|
||||
"startEpoch=0 totalAggregations=0 totalProposedBlocks=1 totalRequested=15 totalSyncContributions=0 " +
|
||||
"validatorIndex=1"
|
||||
require.LogsContain(t, hook, wanted)
|
||||
}
|
||||
|
||||
@@ -14,8 +14,8 @@ func (s *Service) processExitsFromBlock(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
idx := exit.Exit.ValidatorIndex
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": blk.Slot(),
|
||||
"validatorIndex": idx,
|
||||
"slot": blk.Slot(),
|
||||
}).Info("Voluntary exit was included")
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
|
||||
defer s.RUnlock()
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"validatorIndex": idx,
|
||||
}).Info("Voluntary exit was processed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
|
||||
wb, err := blocks.NewBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.processExitsFromBlock(wb)
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was included\" Slot=0 ValidatorIndex=2")
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was included\" prefix=monitor slot=0 validatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
|
||||
@@ -99,7 +99,7 @@ func TestProcessExitP2PTrackedIndices(t *testing.T) {
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
s.processExit(exit)
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was processed\" ValidatorIndex=1")
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was processed\" prefix=monitor validatorIndex=1")
|
||||
}
|
||||
|
||||
func TestProcessExitP2PUntrackedIndices(t *testing.T) {
|
||||
|
||||
@@ -21,7 +21,7 @@ func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedCon
|
||||
aggPerf.totalSyncCommitteeAggregations++
|
||||
s.aggregatedPerformance[idx] = aggPerf
|
||||
|
||||
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
|
||||
log.WithField("validatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,11 +69,11 @@ func (s *Service) processSyncAggregate(state state.BeaconState, blk interfaces.R
|
||||
fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib))
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": validatorIdx,
|
||||
"ExpectedContribCount": len(committeeIndices),
|
||||
"ContribCount": contrib,
|
||||
"NewBalance": balance,
|
||||
"BalanceChange": balanceChg,
|
||||
"validatorIndex": validatorIdx,
|
||||
"expectedContribCount": len(committeeIndices),
|
||||
"contribCount": contrib,
|
||||
"newBalance": balance,
|
||||
"balanceChange": balanceChg,
|
||||
}).Info("Sync committee contribution included")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,8 +22,8 @@ func TestProcessSyncCommitteeContribution(t *testing.T) {
|
||||
}
|
||||
|
||||
s.processSyncCommitteeContribution(contrib)
|
||||
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1")
|
||||
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
|
||||
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" prefix=monitor validatorIndex=1")
|
||||
require.LogsDoNotContain(t, hook, "validatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessSyncAggregate(t *testing.T) {
|
||||
@@ -53,7 +53,7 @@ func TestProcessSyncAggregate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s.processSyncAggregate(beaconState, wrappedBlock)
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 ContribCount=2 ExpectedContribCount=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=4 newBalance=32000000000 prefix=monitor validatorIndex=1")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=100000000 contribCount=2 expectedContribCount=2 newBalance=32000000000 prefix=monitor validatorIndex=12")
|
||||
require.LogsDoNotContain(t, hook, "validatorIndex=2")
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ func (s *Service) Start() {
|
||||
sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] })
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndices": tracked,
|
||||
"validatorIndices": tracked,
|
||||
}).Info("Starting service")
|
||||
|
||||
go s.run()
|
||||
@@ -134,7 +134,7 @@ func (s *Service) run() {
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
log.WithField("Epoch", epoch).Info("Synced to head epoch, starting reporting performance")
|
||||
log.WithField("epoch", epoch).Info("Synced to head epoch, starting reporting performance")
|
||||
|
||||
s.Lock()
|
||||
s.initializePerformanceStructures(st, epoch)
|
||||
@@ -157,7 +157,7 @@ func (s *Service) initializePerformanceStructures(state state.BeaconState, epoch
|
||||
for idx := range s.TrackedValidators {
|
||||
balance, err := state.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("ValidatorIndex", idx).Error(
|
||||
log.WithError(err).WithField("validatorIndex", idx).Error(
|
||||
"Could not fetch starting balance, skipping aggregated logs.")
|
||||
balance = 0
|
||||
}
|
||||
@@ -276,7 +276,7 @@ func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) {
|
||||
for idx := range s.TrackedValidators {
|
||||
syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("ValidatorIndex", idx).Error(
|
||||
log.WithError(err).WithField("validatorIndex", idx).Error(
|
||||
"Sync committee assignments will not be reported")
|
||||
delete(s.trackedSyncCommitteeIndices, idx)
|
||||
} else if len(syncIdx) == 0 {
|
||||
|
||||
@@ -148,7 +148,7 @@ func TestStart(t *testing.T) {
|
||||
// wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
|
||||
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"")
|
||||
require.LogsContain(t, hook, "\"Starting service\" prefix=monitor validatorIndices=\"[1 2 12 15]\"")
|
||||
s.Lock()
|
||||
require.Equal(t, s.isLogging, true, "monitor is not running")
|
||||
s.Unlock()
|
||||
@@ -237,7 +237,7 @@ func TestMonitorRoutine(t *testing.T) {
|
||||
|
||||
// Wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
|
||||
}
|
||||
|
||||
@@ -151,19 +151,19 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalTotalDifficulty = cliCtx.String(flags.TerminalTotalDifficultyOverride.Name)
|
||||
log.WithField("terminal block difficult", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
|
||||
log.WithField("terminalBlockDifficulty", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name))
|
||||
log.WithField("terminal block hash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
|
||||
log.WithField("terminalBlockHash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHashActivationEpoch = primitives.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
|
||||
log.WithField("terminal block hash activation epoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
|
||||
log.WithField("terminalBlockHashActivationEpoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,9 +7,11 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -118,6 +120,7 @@ type BeaconNode struct {
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
blobRetentionEpochs primitives.Epoch
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
@@ -126,51 +129,16 @@ type BeaconNode struct {
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
// every required service to the node.
|
||||
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
|
||||
if err := configureTracing(cliCtx); err != nil {
|
||||
return nil, err
|
||||
if err := configureBeacon(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set beacon configuration options")
|
||||
}
|
||||
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
|
||||
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
return nil, fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
|
||||
}
|
||||
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
flags.ConfigureGlobalFlags(cliCtx)
|
||||
if err := configureChainConfig(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureHistoricalSlasher(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := configureBuilderCircuitBreaker(cliCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureEth1Config(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configureNetwork(cliCtx)
|
||||
if err := configureInteropConfig(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
registry := runtime.NewServiceRegistry()
|
||||
|
||||
ctx := cliCtx.Context
|
||||
|
||||
beacon := &BeaconNode{
|
||||
cliCtx: cliCtx,
|
||||
ctx: ctx,
|
||||
@@ -190,10 +158,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
slasherAttestationsFeed: new(event.Feed),
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
}
|
||||
|
||||
beacon.initialSyncComplete = make(chan struct{})
|
||||
beacon.syncChecker = &initialsync.SyncChecker{}
|
||||
for _, opt := range opts {
|
||||
if err := opt(beacon); err != nil {
|
||||
return nil, err
|
||||
@@ -202,119 +170,46 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
|
||||
synchronizer := startup.NewClockSynchronizer()
|
||||
beacon.clockWaiter = synchronizer
|
||||
|
||||
beacon.forkChoicer = doublylinkedtree.New()
|
||||
|
||||
depositAddress, err := execution.DepositContractAddress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
beacon.BlobStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bfs, err := backfill.NewUpdater(ctx, beacon.db)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "backfill status initialization error")
|
||||
}
|
||||
|
||||
log.Debugln("Starting State Gen")
|
||||
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
|
||||
if errors.Is(err, stategen.ErrNoGenesisBlock) {
|
||||
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
|
||||
"state bundled in the application. You must provide the --%s or --%s flag to load "+
|
||||
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
|
||||
// Allow tests to set it as an opt.
|
||||
if beacon.BlobStorage == nil {
|
||||
beacon.BlobStorageOptions = append(beacon.BlobStorageOptions, filesystem.WithSaveFsync(features.Get().BlobSaveFsync))
|
||||
blobs, err := filesystem.NewBlobStorage(beacon.BlobStorageOptions...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
}
|
||||
|
||||
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
|
||||
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
|
||||
|
||||
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
|
||||
beacon.BackfillOpts = append(beacon.BackfillOpts, backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
|
||||
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)))
|
||||
|
||||
beacon.BackfillOpts = append(
|
||||
beacon.BackfillOpts,
|
||||
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
|
||||
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
|
||||
)
|
||||
|
||||
bf, err := backfill.NewService(ctx, bfs, beacon.BlobStorage, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
if err := beacon.services.RegisterService(bf); err != nil {
|
||||
return nil, errors.Wrap(err, "error registering backfill service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering POW Chain Service")
|
||||
if err := beacon.registerPOWChainService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Attestation Pool Service")
|
||||
if err := beacon.registerAttestationPool(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Deterministic Genesis Service")
|
||||
if err := beacon.registerDeterministicGenesisService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Initial Sync Service")
|
||||
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Sync Service")
|
||||
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Slasher Service")
|
||||
if err := beacon.registerSlasherService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering builder service")
|
||||
if err := beacon.registerBuilderService(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering RPC Service")
|
||||
router := newRouter(cliCtx)
|
||||
if err := beacon.registerRPCService(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering GRPC Gateway Service")
|
||||
if err := beacon.registerGRPCGateway(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Validator Monitoring Service")
|
||||
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
|
||||
log.Debugln("Registering Prometheus Service")
|
||||
if err := beacon.registerPrometheusService(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := registerServices(cliCtx, beacon, synchronizer, bf, bfs); err != nil {
|
||||
return nil, errors.Wrap(err, "could not register services")
|
||||
}
|
||||
|
||||
// db.DatabasePath is the path to the containing directory
|
||||
@@ -326,8 +221,176 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
}
|
||||
beacon.collector = c
|
||||
|
||||
// Do not store the finalized state as it has been provided to the respective services during
|
||||
// their initialization.
|
||||
beacon.finalizedStateAtStartUp = nil
|
||||
|
||||
return beacon, nil
|
||||
}
|
||||
|
||||
func configureBeacon(cliCtx *cli.Context) error {
|
||||
if err := configureTracing(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure tracing")
|
||||
}
|
||||
|
||||
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
|
||||
|
||||
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
return fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
|
||||
}
|
||||
|
||||
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure beacon chain")
|
||||
}
|
||||
|
||||
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure beacon chain")
|
||||
}
|
||||
|
||||
flags.ConfigureGlobalFlags(cliCtx)
|
||||
|
||||
if err := configureChainConfig(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure chain config")
|
||||
}
|
||||
|
||||
if err := configureHistoricalSlasher(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure historical slasher")
|
||||
}
|
||||
|
||||
if err := configureBuilderCircuitBreaker(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure builder circuit breaker")
|
||||
}
|
||||
|
||||
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure slots per archived point")
|
||||
}
|
||||
|
||||
if err := configureEth1Config(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure eth1 config")
|
||||
}
|
||||
|
||||
configureNetwork(cliCtx)
|
||||
|
||||
if err := configureInteropConfig(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure interop config")
|
||||
}
|
||||
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure execution setting")
|
||||
}
|
||||
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
|
||||
ctx := cliCtx.Context
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start DB")
|
||||
}
|
||||
beacon.BlobStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start slashing DB")
|
||||
}
|
||||
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not register P2P service")
|
||||
}
|
||||
|
||||
bfs, err := backfill.NewUpdater(ctx, beacon.db)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create backfill updater")
|
||||
}
|
||||
|
||||
log.Debugln("Starting State Gen")
|
||||
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
|
||||
if errors.Is(err, stategen.ErrNoGenesisBlock) {
|
||||
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
|
||||
"state bundled in the application. You must provide the --%s or --%s flag to load "+
|
||||
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not start state generation")
|
||||
}
|
||||
|
||||
return bfs, nil
|
||||
}
|
||||
|
||||
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bf *backfill.Service, bfs *backfill.Store) error {
|
||||
if err := beacon.services.RegisterService(bf); err != nil {
|
||||
return errors.Wrap(err, "could not register backfill service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering POW Chain Service")
|
||||
if err := beacon.registerPOWChainService(); err != nil {
|
||||
return errors.Wrap(err, "could not register POW chain service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Attestation Pool Service")
|
||||
if err := beacon.registerAttestationPool(); err != nil {
|
||||
return errors.Wrap(err, "could not register attestation pool service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Deterministic Genesis Service")
|
||||
if err := beacon.registerDeterministicGenesisService(); err != nil {
|
||||
return errors.Wrap(err, "could not register deterministic genesis service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
|
||||
return errors.Wrap(err, "could not register blockchain service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Initial Sync Service")
|
||||
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
|
||||
return errors.Wrap(err, "could not register initial sync service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Sync Service")
|
||||
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
|
||||
return errors.Wrap(err, "could not register sync service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Slasher Service")
|
||||
if err := beacon.registerSlasherService(); err != nil {
|
||||
return errors.Wrap(err, "could not register slasher service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering builder service")
|
||||
if err := beacon.registerBuilderService(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not register builder service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering RPC Service")
|
||||
router := newRouter(cliCtx)
|
||||
if err := beacon.registerRPCService(router); err != nil {
|
||||
return errors.Wrap(err, "could not register RPC service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering GRPC Gateway Service")
|
||||
if err := beacon.registerGRPCGateway(router); err != nil {
|
||||
return errors.Wrap(err, "could not register GRPC gateway service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Validator Monitoring Service")
|
||||
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
|
||||
return errors.Wrap(err, "could not register validator monitoring service")
|
||||
}
|
||||
|
||||
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
|
||||
log.Debugln("Registering Prometheus Service")
|
||||
if err := beacon.registerPrometheusService(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not register prometheus service")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func initSyncWaiter(ctx context.Context, complete chan struct{}) func() error {
|
||||
return func() error {
|
||||
select {
|
||||
@@ -416,39 +479,85 @@ func (b *BeaconNode) Close() {
|
||||
close(b.stop)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
|
||||
var err error
|
||||
clearDBConfirmed := false
|
||||
|
||||
if clearDB && !forceClearDB {
|
||||
actionText := "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
deniedText := "Database will not be deleted. No changes have been made."
|
||||
const (
|
||||
actionText = "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
|
||||
deniedText = "Database will not be deleted. No changes have been made."
|
||||
)
|
||||
|
||||
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, errors.Wrapf(err, "could not confirm action")
|
||||
}
|
||||
}
|
||||
|
||||
if clearDBConfirmed || forceClearDB {
|
||||
log.Warning("Removing database")
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
if err := b.BlobStorage.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear blob storage")
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
|
||||
knownContract, err := b.db.DepositContractAddress(b.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get deposit contract address")
|
||||
}
|
||||
|
||||
addr := common.HexToAddress(depositAddress)
|
||||
if len(knownContract) == 0 {
|
||||
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
|
||||
return errors.Wrap(err, "could not save deposit contract")
|
||||
}
|
||||
}
|
||||
|
||||
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
|
||||
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
|
||||
"you are trying to run on a different network than what the database contains. You can run once with "+
|
||||
"--%s to wipe the old database or use an alternative data directory with --%s",
|
||||
knownContract, addr.Bytes(), cmd.ClearDB.Name, cmd.DataDirFlag.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
var depositCache cache.DepositCache
|
||||
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||
}
|
||||
|
||||
if clearDBRequired || forceClearDBRequired {
|
||||
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -458,7 +567,6 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
|
||||
b.db = d
|
||||
|
||||
var depositCache cache.DepositCache
|
||||
if features.Get().EnableEIP4881 {
|
||||
depositCache, err = depositsnapshot.New()
|
||||
} else {
|
||||
@@ -473,16 +581,17 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
if b.GenesisInitializer != nil {
|
||||
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
|
||||
if err == db.ErrExistingGenesisState {
|
||||
return errors.New("Genesis state flag specified but a genesis state " +
|
||||
"exists already. Run again with --clear-db and/or ensure you are using the " +
|
||||
"appropriate testnet flag to load the given genesis state.")
|
||||
return errors.Errorf("Genesis state flag specified but a genesis state "+
|
||||
"exists already. Run again with --%s and/or ensure you are using the "+
|
||||
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
|
||||
}
|
||||
|
||||
return errors.Wrap(err, "could not load genesis from file")
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not ensure embedded genesis")
|
||||
}
|
||||
|
||||
if b.CheckpointInitializer != nil {
|
||||
@@ -491,23 +600,11 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
}
|
||||
}
|
||||
|
||||
knownContract, err := b.db.DepositContractAddress(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := b.checkAndSaveDepositContract(depositAddress); err != nil {
|
||||
return errors.Wrap(err, "could not check and save deposit contract")
|
||||
}
|
||||
addr := common.HexToAddress(depositAddress)
|
||||
if len(knownContract) == 0 {
|
||||
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
|
||||
return errors.Wrap(err, "could not save deposit contract")
|
||||
}
|
||||
}
|
||||
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
|
||||
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
|
||||
"you are trying to run on a different network than what the database contains. You can run once with "+
|
||||
"'--clear-db' to wipe the old database or use an alternative data directory with '--datadir'",
|
||||
knownContract, addr.Bytes())
|
||||
}
|
||||
log.Infof("Deposit contract: %#x", addr.Bytes())
|
||||
|
||||
log.WithField("address", depositAddress).Info("Deposit contract")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -525,7 +622,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
@@ -595,31 +692,31 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBl
|
||||
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
bootstrapNodeAddrs, dataDir, err := registration.P2PPreregistration(cliCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "could not register p2p service")
|
||||
}
|
||||
|
||||
svc, err := p2p.NewService(b.ctx, &p2p.Config{
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
BootstrapNodeAddr: bootstrapNodeAddrs,
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -961,11 +1058,13 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
|
||||
if b.cliCtx.Bool(flags.DisableGRPCGateway.Name) {
|
||||
return nil
|
||||
}
|
||||
gatewayPort := b.cliCtx.Int(flags.GRPCGatewayPort.Name)
|
||||
gatewayHost := b.cliCtx.String(flags.GRPCGatewayHost.Name)
|
||||
gatewayPort := b.cliCtx.Int(flags.GRPCGatewayPort.Name)
|
||||
rpcHost := b.cliCtx.String(flags.RPCHost.Name)
|
||||
selfAddress := fmt.Sprintf("%s:%d", rpcHost, b.cliCtx.Int(flags.RPCPort.Name))
|
||||
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
||||
rpcPort := b.cliCtx.Int(flags.RPCPort.Name)
|
||||
|
||||
selfAddress := net.JoinHostPort(rpcHost, strconv.Itoa(rpcPort))
|
||||
gatewayAddress := net.JoinHostPort(gatewayHost, strconv.Itoa(gatewayPort))
|
||||
allowedOrigins := strings.Split(b.cliCtx.String(flags.GPRCGatewayCorsDomain.Name), ",")
|
||||
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
|
||||
selfCert := b.cliCtx.String(flags.CertFlag.Name)
|
||||
@@ -987,7 +1086,6 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
|
||||
apigateway.WithGatewayAddr(gatewayAddress),
|
||||
apigateway.WithRemoteAddr(selfAddress),
|
||||
apigateway.WithPbHandlers(muxs),
|
||||
apigateway.WithMuxHandler(gatewayConfig.Handler),
|
||||
apigateway.WithRemoteCert(selfCert),
|
||||
apigateway.WithMaxCallRecvMsgSize(maxCallSize),
|
||||
apigateway.WithAllowedOrigins(allowedOrigins),
|
||||
@@ -1060,9 +1158,9 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := append(b.serviceFlagOpts.builderOpts,
|
||||
builder.WithHeadFetcher(chainService),
|
||||
builder.WithDatabase(b.db))
|
||||
opts := b.serviceFlagOpts.builderOpts
|
||||
opts = append(opts, builder.WithHeadFetcher(chainService), builder.WithDatabase(b.db))
|
||||
|
||||
// make cache the default.
|
||||
if !cliCtx.Bool(features.DisableRegistrationCache.Name) {
|
||||
opts = append(opts, builder.WithRegistrationCache())
|
||||
|
||||
@@ -43,6 +43,15 @@ func WithBlobStorage(bs *filesystem.BlobStorage) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobStorageOptions appends 1 or more filesystem.BlobStorageOption on the beacon node,
|
||||
// to be used when initializing blob storage.
|
||||
func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.BlobStorageOptions = append(bn.BlobStorageOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobRetentionEpochs sets the blobRetentionEpochs value, used in kv store initialization.
|
||||
func WithBlobRetentionEpochs(e primitives.Epoch) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -31,9 +32,9 @@ func P2PPreregistration(cliCtx *cli.Context) (bootstrapNodeAddrs []string, dataD
|
||||
if dataDir == "" {
|
||||
dataDir = cmd.DefaultDataDir()
|
||||
if dataDir == "" {
|
||||
log.Fatal(
|
||||
"Could not determine your system's HOME path, please specify a --datadir you wish " +
|
||||
"to use for your chain data",
|
||||
err = errors.Errorf(
|
||||
"Could not determine your system's HOME path, please specify a --%s you wish to use for your chain data",
|
||||
cmd.DataDirFlag.Name,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func (s *Service) prepareForkChoiceAtts() {
|
||||
switch slotInterval.Interval {
|
||||
case 0:
|
||||
duration := time.Since(t)
|
||||
log.WithField("Duration", duration).Debug("Aggregated unaggregated attestations")
|
||||
log.WithField("duration", duration).Debug("Aggregated unaggregated attestations")
|
||||
batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds()))
|
||||
case 1:
|
||||
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))
|
||||
|
||||
@@ -240,9 +240,8 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
var hosts []host.Host
|
||||
// setup other nodes.
|
||||
cfg = &Config{
|
||||
BootstrapNodeAddr: []string{bootNode.String()},
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
}
|
||||
// Setup 2 different hosts
|
||||
for i := 1; i <= 2; i++ {
|
||||
|
||||
@@ -12,28 +12,27 @@ const defaultPubsubQueueSize = 600
|
||||
// Config for the p2p service. These parameters are set from application level flags
|
||||
// to initialize the p2p service.
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
StaticPeers []string
|
||||
BootstrapNodeAddr []string
|
||||
Discv5BootStrapAddr []string
|
||||
RelayNodeAddr string
|
||||
LocalIP string
|
||||
HostAddress string
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
MetaDataDir string
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ClockWaiter startup.ClockWaiter
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
StaticPeers []string
|
||||
Discv5BootStrapAddrs []string
|
||||
RelayNodeAddr string
|
||||
LocalIP string
|
||||
HostAddress string
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
MetaDataDir string
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
|
||||
@@ -25,7 +25,7 @@ const (
|
||||
)
|
||||
|
||||
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
|
||||
func (_ *Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -63,12 +63,12 @@ func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
|
||||
|
||||
// InterceptSecured tests whether a given connection, now authenticated,
|
||||
// is allowed.
|
||||
func (_ *Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
|
||||
func (*Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptUpgraded tests whether a fully capable connection is allowed.
|
||||
func (_ *Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
func (*Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,11 @@ type Listener interface {
|
||||
LocalNode() *enode.LocalNode
|
||||
}
|
||||
|
||||
const (
|
||||
udp4 = iota
|
||||
udp6
|
||||
)
|
||||
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
@@ -62,8 +67,14 @@ func (s *Service) RefreshENR() {
|
||||
// Compare current epoch with our fork epochs
|
||||
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
|
||||
switch {
|
||||
// Altair Behaviour
|
||||
case currEpoch >= altairForkEpoch:
|
||||
case currEpoch < altairForkEpoch:
|
||||
// Phase 0 behaviour.
|
||||
if bytes.Equal(bitV, currentBitV) {
|
||||
// return early if bitfield hasn't changed
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
default:
|
||||
// Retrieve sync subnets from application level
|
||||
// cache.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
@@ -82,13 +93,6 @@ func (s *Service) RefreshENR() {
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
default:
|
||||
// Phase 0 behaviour.
|
||||
if bytes.Equal(bitV, currentBitV) {
|
||||
// return early if bitfield hasn't changed
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
}
|
||||
// ping all peers to inform them of new metadata
|
||||
s.pingPeers()
|
||||
@@ -140,9 +144,9 @@ func (s *Service) createListener(
|
||||
// by default we will listen to all interfaces.
|
||||
var bindIP net.IP
|
||||
switch udpVersionFromIP(ipAddr) {
|
||||
case "udp4":
|
||||
case udp4:
|
||||
bindIP = net.IPv4zero
|
||||
case "udp6":
|
||||
case udp6:
|
||||
bindIP = net.IPv6zero
|
||||
default:
|
||||
return nil, errors.New("invalid ip provided")
|
||||
@@ -160,6 +164,7 @@ func (s *Service) createListener(
|
||||
IP: bindIP,
|
||||
Port: int(s.cfg.UDPPort),
|
||||
}
|
||||
|
||||
// Listen to all network interfaces
|
||||
// for both ip protocols.
|
||||
networkVersion := "udp"
|
||||
@@ -177,44 +182,27 @@ func (s *Service) createListener(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create local node")
|
||||
}
|
||||
if s.cfg.HostAddress != "" {
|
||||
hostIP := net.ParseIP(s.cfg.HostAddress)
|
||||
if hostIP.To4() == nil && hostIP.To16() == nil {
|
||||
log.Errorf("Invalid host address given: %s", hostIP.String())
|
||||
} else {
|
||||
localNode.SetFallbackIP(hostIP)
|
||||
localNode.SetStaticIP(hostIP)
|
||||
}
|
||||
}
|
||||
if s.cfg.HostDNS != "" {
|
||||
host := s.cfg.HostDNS
|
||||
ips, err := net.LookupIP(host)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not resolve host address")
|
||||
}
|
||||
if len(ips) > 0 {
|
||||
// Use first IP returned from the
|
||||
// resolver.
|
||||
firstIP := ips[0]
|
||||
localNode.SetFallbackIP(firstIP)
|
||||
}
|
||||
}
|
||||
dv5Cfg := discover.Config{
|
||||
PrivateKey: privKey,
|
||||
}
|
||||
dv5Cfg.Bootnodes = []*enode.Node{}
|
||||
for _, addr := range s.cfg.Discv5BootStrapAddr {
|
||||
|
||||
bootNodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
|
||||
for _, addr := range s.cfg.Discv5BootStrapAddrs {
|
||||
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not bootstrap addr")
|
||||
}
|
||||
dv5Cfg.Bootnodes = append(dv5Cfg.Bootnodes, bootNode)
|
||||
|
||||
bootNodes = append(bootNodes, bootNode)
|
||||
}
|
||||
|
||||
dv5Cfg := discover.Config{
|
||||
PrivateKey: privKey,
|
||||
Bootnodes: bootNodes,
|
||||
}
|
||||
|
||||
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not listen to discV5")
|
||||
}
|
||||
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
@@ -242,8 +230,35 @@ func (s *Service) createLocalNode(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
|
||||
}
|
||||
|
||||
localNode = initializeAttSubnets(localNode)
|
||||
return initializeSyncCommSubnets(localNode), nil
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
hostIP := net.ParseIP(s.cfg.HostAddress)
|
||||
if hostIP.To4() == nil && hostIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid host address: %s", s.cfg.HostAddress)
|
||||
} else {
|
||||
localNode.SetFallbackIP(hostIP)
|
||||
localNode.SetStaticIP(hostIP)
|
||||
}
|
||||
}
|
||||
|
||||
if s.cfg != nil && s.cfg.HostDNS != "" {
|
||||
host := s.cfg.HostDNS
|
||||
ips, err := net.LookupIP(host)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not resolve host address: %s", host)
|
||||
}
|
||||
if len(ips) > 0 {
|
||||
// Use first IP returned from the
|
||||
// resolver.
|
||||
firstIP := ips[0]
|
||||
localNode.SetFallbackIP(firstIP)
|
||||
}
|
||||
}
|
||||
|
||||
return localNode, nil
|
||||
}
|
||||
|
||||
func (s *Service) startDiscoveryV5(
|
||||
@@ -363,7 +378,7 @@ func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
return allAddrs, nil
|
||||
}
|
||||
|
||||
func parseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
|
||||
func ParseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
|
||||
discv5Nodes, _ = parseGenericAddrs(addrs)
|
||||
if len(discv5Nodes) == 0 {
|
||||
log.Warn("No bootstrap addresses supplied")
|
||||
@@ -483,9 +498,9 @@ func multiAddrFromString(address string) (ma.Multiaddr, error) {
|
||||
return ma.NewMultiaddr(address)
|
||||
}
|
||||
|
||||
func udpVersionFromIP(ipAddr net.IP) string {
|
||||
func udpVersionFromIP(ipAddr net.IP) int {
|
||||
if ipAddr.To4() != nil {
|
||||
return "udp4"
|
||||
return udp4
|
||||
}
|
||||
return "udp6"
|
||||
return udp6
|
||||
}
|
||||
|
||||
@@ -42,10 +42,6 @@ import (
|
||||
|
||||
var discoveryWaitTime = 1 * time.Second
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
}
|
||||
|
||||
func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
|
||||
ip, err := prysmNetwork.ExternalIPv4()
|
||||
require.NoError(t, err, "Could not get ip")
|
||||
@@ -103,8 +99,8 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
for i := 1; i <= 5; i++ {
|
||||
port = 3000 + i
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
}
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s = &Service{
|
||||
@@ -134,6 +130,106 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateLocalNode(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
cfg: nil,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host address",
|
||||
cfg: &Config{HostAddress: "invalid"},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host address",
|
||||
cfg: &Config{HostAddress: "192.168.0.1"},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host DNS",
|
||||
cfg: &Config{HostDNS: "invalid"},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host DNS",
|
||||
cfg: &Config{HostDNS: "www.google.com"},
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Define ports.
|
||||
const (
|
||||
udpPort = 2000
|
||||
tcpPort = 3000
|
||||
)
|
||||
|
||||
// Create a private key.
|
||||
address, privKey := createAddrAndPrivKey(t)
|
||||
|
||||
// Create a service.
|
||||
service := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: tt.cfg,
|
||||
}
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort)
|
||||
if tt.expectedError {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedAddress := address
|
||||
if tt.cfg != nil && tt.cfg.HostAddress != "" {
|
||||
expectedAddress = net.ParseIP(tt.cfg.HostAddress)
|
||||
}
|
||||
|
||||
// Check IP.
|
||||
// IP is not checked int case of DNS, since it can be resolved to different IPs.
|
||||
if tt.cfg == nil || tt.cfg.HostDNS == "" {
|
||||
ip := new(net.IP)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("ip", ip)))
|
||||
require.Equal(t, true, ip.Equal(expectedAddress))
|
||||
require.Equal(t, true, localNode.Node().IP().Equal(expectedAddress))
|
||||
}
|
||||
|
||||
// Check UDP.
|
||||
udp := new(uint16)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("udp", udp)))
|
||||
require.Equal(t, udpPort, localNode.Node().UDP())
|
||||
|
||||
// Check TCP.
|
||||
tcp := new(uint16)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("tcp", tcp)))
|
||||
require.Equal(t, tcpPort, localNode.Node().TCP())
|
||||
|
||||
// Check fork is set.
|
||||
fork := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2ENRKey, fork)))
|
||||
require.NotEmpty(t, *fork)
|
||||
|
||||
// Check att subnets.
|
||||
attSubnets := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(attSubnetEnrKey, attSubnets)))
|
||||
require.DeepSSZEqual(t, []byte{0, 0, 0, 0, 0, 0, 0, 0}, *attSubnets)
|
||||
|
||||
// Check sync committees subnets.
|
||||
syncSubnets := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
|
||||
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
|
||||
addr := net.ParseIP("invalidIP")
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
@@ -310,12 +406,12 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCorrectUDPVersion(t *testing.T) {
|
||||
assert.Equal(t, "udp4", udpVersionFromIP(net.IPv4zero), "incorrect network version")
|
||||
assert.Equal(t, "udp6", udpVersionFromIP(net.IPv6zero), "incorrect network version")
|
||||
assert.Equal(t, "udp4", udpVersionFromIP(net.IP{200, 20, 12, 255}), "incorrect network version")
|
||||
assert.Equal(t, "udp6", udpVersionFromIP(net.IP{22, 23, 24, 251, 17, 18, 0, 0, 0, 0, 12, 14, 212, 213, 16, 22}), "incorrect network version")
|
||||
assert.Equal(t, udp4, udpVersionFromIP(net.IPv4zero), "incorrect network version")
|
||||
assert.Equal(t, udp6, udpVersionFromIP(net.IPv6zero), "incorrect network version")
|
||||
assert.Equal(t, udp4, udpVersionFromIP(net.IP{200, 20, 12, 255}), "incorrect network version")
|
||||
assert.Equal(t, udp6, udpVersionFromIP(net.IP{22, 23, 24, 251, 17, 18, 0, 0, 0, 0, 12, 14, 212, 213, 16, 22}), "incorrect network version")
|
||||
// v4 in v6
|
||||
assert.Equal(t, "udp4", udpVersionFromIP(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 212, 213, 16, 22}), "incorrect network version")
|
||||
assert.Equal(t, udp4, udpVersionFromIP(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 212, 213, 16, 22}), "incorrect network version")
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
|
||||
@@ -46,9 +46,9 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
@@ -132,8 +132,8 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
}
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
|
||||
@@ -20,7 +20,7 @@ self=%s
|
||||
%d peers
|
||||
%v
|
||||
`,
|
||||
s.cfg.BootstrapNodeAddr,
|
||||
s.cfg.Discv5BootStrapAddrs,
|
||||
s.selfAddresses(),
|
||||
len(s.host.Network().Peers()),
|
||||
formatPeers(s.host), // Must be last. Writes one entry per row.
|
||||
|
||||
@@ -31,29 +31,31 @@ func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
}
|
||||
|
||||
// buildOptions for the libp2p host.
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Option {
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
|
||||
cfg := s.cfg
|
||||
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to p2p listen")
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip.String(), cfg.TCPPort)
|
||||
}
|
||||
if cfg.LocalIP != "" {
|
||||
if net.ParseIP(cfg.LocalIP) == nil {
|
||||
log.Fatalf("Invalid local ip provided: %s", cfg.LocalIP)
|
||||
return nil, errors.Wrapf(err, "invalid local ip provided: %s:%d", cfg.LocalIP, cfg.TCPPort)
|
||||
}
|
||||
|
||||
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to p2p listen")
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", cfg.LocalIP, cfg.TCPPort)
|
||||
}
|
||||
}
|
||||
ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(priKey)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to retrieve private key")
|
||||
return nil, errors.Wrap(err, "cannot convert private key to interface private key. (Private key not displayed in logs for security reasons)")
|
||||
}
|
||||
id, err := peer.IDFromPublicKey(ifaceKey.GetPublic())
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to retrieve peer id")
|
||||
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
|
||||
}
|
||||
|
||||
log.Infof("Running node with peer id of %s ", id.String())
|
||||
|
||||
options := []libp2p.Option{
|
||||
@@ -64,10 +66,10 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.DefaultMuxers,
|
||||
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.Ping(false), // Disable Ping Service.
|
||||
}
|
||||
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
|
||||
if cfg.EnableUPnP {
|
||||
options = append(options, libp2p.NATPortMap()) // Allow to use UPnP
|
||||
}
|
||||
@@ -99,12 +101,11 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
return addrs
|
||||
}))
|
||||
}
|
||||
// Disable Ping Service.
|
||||
options = append(options, libp2p.Ping(false))
|
||||
|
||||
if features.Get().DisableResourceManager {
|
||||
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||
}
|
||||
return options
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
|
||||
@@ -119,7 +119,9 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
svc.privKey, err = privKey(svc.cfg)
|
||||
assert.NoError(t, err)
|
||||
ipAddr := network.IPAddr()
|
||||
opts := svc.buildOptions(ipAddr, svc.privKey)
|
||||
opts, err := svc.buildOptions(ipAddr, svc.privKey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
||||
@@ -107,5 +107,4 @@ func TestStore_TrustedPeers(t *testing.T) {
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid1))
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid2))
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid3))
|
||||
|
||||
}
|
||||
|
||||
@@ -56,12 +56,12 @@ func newBadResponsesScorer(store *peerdata.Store, config *BadResponsesScorerConf
|
||||
func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.score(pid)
|
||||
return s.scoreNoLock(pid)
|
||||
}
|
||||
|
||||
// score is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) score(pid peer.ID) float64 {
|
||||
if s.isBadPeer(pid) {
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -87,11 +87,11 @@ func (s *BadResponsesScorer) Params() *BadResponsesScorerConfig {
|
||||
func (s *BadResponsesScorer) Count(pid peer.ID) (int, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.count(pid)
|
||||
return s.countNoLock(pid)
|
||||
}
|
||||
|
||||
// count is a lock-free version of Count.
|
||||
func (s *BadResponsesScorer) count(pid peer.ID) (int, error) {
|
||||
// countNoLock is a lock-free version of Count.
|
||||
func (s *BadResponsesScorer) countNoLock(pid peer.ID) (int, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.BadResponses, nil
|
||||
}
|
||||
@@ -119,11 +119,11 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeer(pid)
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeer is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeer(pid peer.ID) bool {
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.BadResponses >= s.config.Threshold
|
||||
}
|
||||
@@ -137,7 +137,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeer(pid) {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ import (
|
||||
)
|
||||
|
||||
func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
const pid = "peer1"
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -28,15 +30,23 @@ func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
|
||||
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score for unregistered peer")
|
||||
scorer.Increment("peer1")
|
||||
assert.Equal(t, -2.5, scorer.Score("peer1"))
|
||||
scorer.Increment("peer1")
|
||||
assert.Equal(t, float64(-5), scorer.Score("peer1"))
|
||||
scorer.Increment("peer1")
|
||||
scorer.Increment("peer1")
|
||||
assert.Equal(t, -100.0, scorer.Score("peer1"))
|
||||
assert.Equal(t, true, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
|
||||
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
|
||||
|
||||
@@ -98,11 +98,11 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo
|
||||
func (s *BlockProviderScorer) Score(pid peer.ID) float64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.score(pid)
|
||||
return s.scoreNoLock(pid)
|
||||
}
|
||||
|
||||
// score is a lock-free version of Score.
|
||||
func (s *BlockProviderScorer) score(pid peer.ID) float64 {
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *BlockProviderScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
score := float64(0)
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
// Boost score of new peers or peers that haven't been accessed for too long.
|
||||
@@ -126,7 +126,7 @@ func (s *BlockProviderScorer) Params() *BlockProviderScorerConfig {
|
||||
func (s *BlockProviderScorer) IncrementProcessedBlocks(pid peer.ID, cnt uint64) {
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
defer s.touch(pid)
|
||||
defer s.touchNoLock(pid)
|
||||
|
||||
if cnt <= 0 {
|
||||
return
|
||||
@@ -145,11 +145,11 @@ func (s *BlockProviderScorer) IncrementProcessedBlocks(pid peer.ID, cnt uint64)
|
||||
func (s *BlockProviderScorer) Touch(pid peer.ID, t ...time.Time) {
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
s.touch(pid, t...)
|
||||
s.touchNoLock(pid, t...)
|
||||
}
|
||||
|
||||
// touch is a lock-free version of Touch.
|
||||
func (s *BlockProviderScorer) touch(pid peer.ID, t ...time.Time) {
|
||||
// touchNoLock is a lock-free version of Touch.
|
||||
func (s *BlockProviderScorer) touchNoLock(pid peer.ID, t ...time.Time) {
|
||||
peerData := s.store.PeerDataGetOrCreate(pid)
|
||||
if len(t) == 1 {
|
||||
peerData.BlockProviderUpdated = t[0]
|
||||
@@ -162,11 +162,11 @@ func (s *BlockProviderScorer) touch(pid peer.ID, t ...time.Time) {
|
||||
func (s *BlockProviderScorer) ProcessedBlocks(pid peer.ID) uint64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.processedBlocks(pid)
|
||||
return s.processedBlocksNoLock(pid)
|
||||
}
|
||||
|
||||
// processedBlocks is a lock-free version of ProcessedBlocks.
|
||||
func (s *BlockProviderScorer) processedBlocks(pid peer.ID) uint64 {
|
||||
// processedBlocksNoLock is a lock-free version of ProcessedBlocks.
|
||||
func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.ProcessedBlocks
|
||||
}
|
||||
@@ -177,13 +177,13 @@ func (s *BlockProviderScorer) processedBlocks(pid peer.ID) uint64 {
|
||||
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
|
||||
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
|
||||
// out low-scorers (see WeightSorted method).
|
||||
func (_ *BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
// No peers are considered bad by block providers scorer.
|
||||
func (_ *BlockProviderScorer) BadPeers() []peer.ID {
|
||||
func (*BlockProviderScorer) BadPeers() []peer.ID {
|
||||
return []peer.ID{}
|
||||
}
|
||||
|
||||
@@ -277,9 +277,9 @@ func (s *BlockProviderScorer) mapScoresAndPeers(
|
||||
peers := make([]peer.ID, len(pids))
|
||||
for i, pid := range pids {
|
||||
if scoreFn != nil {
|
||||
scores[pid] = scoreFn(pid, s.score(pid))
|
||||
scores[pid] = scoreFn(pid, s.scoreNoLock(pid))
|
||||
} else {
|
||||
scores[pid] = s.score(pid)
|
||||
scores[pid] = s.scoreNoLock(pid)
|
||||
}
|
||||
peers[i] = pid
|
||||
}
|
||||
@@ -293,9 +293,9 @@ func (s *BlockProviderScorer) FormatScorePretty(pid peer.ID) string {
|
||||
if !features.Get().EnablePeerScorer {
|
||||
return "disabled"
|
||||
}
|
||||
score := s.score(pid)
|
||||
score := s.scoreNoLock(pid)
|
||||
return fmt.Sprintf("[%0.1f%%, raw: %0.2f, blocks: %d/%d]",
|
||||
(score/s.MaxScore())*100, score, s.processedBlocks(pid), s.config.ProcessedBlocksCap)
|
||||
(score/s.MaxScore())*100, score, s.processedBlocksNoLock(pid), s.config.ProcessedBlocksCap)
|
||||
}
|
||||
|
||||
// MaxScore exposes maximum score attainable by peers.
|
||||
|
||||
@@ -38,11 +38,11 @@ func newGossipScorer(store *peerdata.Store, config *GossipScorerConfig) *GossipS
|
||||
func (s *GossipScorer) Score(pid peer.ID) float64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.score(pid)
|
||||
return s.scoreNoLock(pid)
|
||||
}
|
||||
|
||||
// score is a lock-free version of Score.
|
||||
func (s *GossipScorer) score(pid peer.ID) float64 {
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return 0
|
||||
@@ -54,11 +54,11 @@ func (s *GossipScorer) score(pid peer.ID) float64 {
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeer(pid)
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeer is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) isBadPeer(pid peer.ID) bool {
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
@@ -73,7 +73,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeer(pid) {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
@@ -98,11 +98,11 @@ func (s *GossipScorer) SetGossipData(pid peer.ID, gScore float64,
|
||||
func (s *GossipScorer) GossipData(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.gossipData(pid)
|
||||
return s.gossipDataNoLock(pid)
|
||||
}
|
||||
|
||||
// gossipData lock-free version of GossipData.
|
||||
func (s *GossipScorer) gossipData(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
|
||||
// gossipDataNoLock lock-free version of GossipData.
|
||||
func (s *GossipScorer) gossipDataNoLock(pid peer.ID) (float64, float64, map[string]*pbrpc.TopicScoreSnapshot, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.GossipScore, peerData.BehaviourPenalty, peerData.TopicScores, nil
|
||||
}
|
||||
|
||||
@@ -41,12 +41,12 @@ func newPeerStatusScorer(store *peerdata.Store, config *PeerStatusScorerConfig)
|
||||
func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.score(pid)
|
||||
return s.scoreNoLock(pid)
|
||||
}
|
||||
|
||||
// score is a lock-free version of Score.
|
||||
func (s *PeerStatusScorer) score(pid peer.ID) float64 {
|
||||
if s.isBadPeer(pid) {
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -70,11 +70,11 @@ func (s *PeerStatusScorer) score(pid peer.ID) float64 {
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeer(pid)
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeer is lock-free version of IsBadPeer.
|
||||
func (s *PeerStatusScorer) isBadPeer(pid peer.ID) bool {
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
@@ -100,7 +100,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeer(pid) {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
@@ -129,11 +129,11 @@ func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, val
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.peerStatus(pid)
|
||||
return s.peerStatusNoLock(pid)
|
||||
}
|
||||
|
||||
// peerStatus lock-free version of PeerStatus.
|
||||
func (s *PeerStatusScorer) peerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
// peerStatusNoLock lock-free version of PeerStatus.
|
||||
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.Status, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.ChainState == nil {
|
||||
return nil, peerdata.ErrNoPeerStatus
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user