Compare commits

..

1 Commits

Author SHA1 Message Date
Preston Van Loon
c0993f69bf Testing 2024-02-21 08:49:59 -06:00
545 changed files with 20223 additions and 23375 deletions

View File

@@ -6,12 +6,6 @@ import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# hermetic_cc_toolchain v3.0.1 required changes.
common --enable_platform_specific_config
build:linux --sandbox_add_mount_pair=/tmp
build:macos --sandbox_add_mount_pair=/var/tmp
build:windows --sandbox_add_mount_pair=C:\Temp
# E2E run with debug gotag
test:e2e --define gotags=debug

View File

@@ -1 +1 @@
7.1.0
7.0.0

821
MODULE.bazel.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.4.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.4.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.4.0)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)

View File

@@ -16,14 +16,12 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
http_archive(
name = "hermetic_cc_toolchain",
sha256 = "3bc6ec127622fdceb4129cb06b6f7ab098c4d539124dde96a6318e7c32a53f7a",
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
urls = [
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
],
)
@@ -83,10 +81,10 @@ bazel_skylib_workspace()
http_archive(
name = "bazel_gazelle",
integrity = "sha256-MpOL2hbmcABjA1R5Bj2dJMYO2o15/Uc5Vj9Q0zHLMgk=",
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
],
)
@@ -115,13 +113,6 @@ http_archive(
url = "https://github.com/GoogleContainerTools/distroless/archive/9dc924b9fe812eec2fa0061824dcad39eb09d0d6.tar.gz", # 2024-01-24
)
http_archive(
name = "aspect_bazel_lib",
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
strip_prefix = "bazel-lib-2.5.0",
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
)
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
aspect_bazel_lib_dependencies()
@@ -130,9 +121,9 @@ aspect_bazel_lib_register_toolchains()
http_archive(
name = "rules_oci",
sha256 = "4a276e9566c03491649eef63f27c2816cc222f41ccdebd97d2c5159e84917c3b",
strip_prefix = "rules_oci-1.7.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.7.4/rules_oci-v1.7.4.tar.gz",
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
strip_prefix = "rules_oci-1.3.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
)
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
@@ -153,13 +144,17 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
sha256 = "d6ab6b57e48c09523e93050f13698f708428cfd5e619252e369d377af6597707",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
],
)
load("//:distroless_deps.bzl", "distroless_deps")
distroless_deps()
# Override default import in rules_go with special patch until
# https://github.com/gogo/protobuf/pull/582 is merged.
git_repository(
@@ -198,14 +193,10 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.21.8",
go_version = "1.21.6",
nogo = "@//:nogo",
)
load("//:distroless_deps.bzl", "distroless_deps")
distroless_deps()
http_archive(
name = "io_kubernetes_build",
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
@@ -243,7 +234,9 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0"
consensus_spec_version = "v1.4.0-beta.7"
consensus_spec_test_version = "v1.4.0-beta.7-hotfix"
bls_test_version = "v0.1.1"
@@ -260,7 +253,7 @@ filegroup(
)
""",
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -276,7 +269,7 @@ filegroup(
)
""",
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -292,7 +285,7 @@ filegroup(
)
""",
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -306,7 +299,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
sha256 = "049c29267310e6b88280f4f834a75866c2f5b9036fa97acb9d9c6db8f64d9118",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -6,14 +6,11 @@ go_library(
"checkpoint.go",
"client.go",
"doc.go",
"health.go",
"log.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon/iface:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
@@ -39,12 +36,10 @@ go_test(
srcs = [
"checkpoint_test.go",
"client_test.go",
"health_test.go",
],
embed = [":go_default_library"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -58,6 +53,5 @@ go_test(
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -17,7 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/io/file"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
@@ -74,12 +74,7 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
}
log.WithFields(logrus.Fields{
"name": vu.Config.ConfigName,
"fork": version.String(vu.Fork),
}).Info("Detected supported config in remote finalized state")
log.Printf("detected supported config in remote finalized state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
s, err := vu.UnmarshalBeaconState(sb)
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
@@ -113,10 +108,10 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
}
log.
WithField("blockSlot", b.Block().Slot()).
WithField("stateSlot", s.Slot()).
WithField("stateRoot", hexutil.Encode(sr[:])).
WithField("blockRoot", hexutil.Encode(br[:])).
WithField("block_slot", b.Block().Slot()).
WithField("state_slot", s.Slot()).
WithField("state_root", hexutil.Encode(sr[:])).
WithField("block_root", hexutil.Encode(br[:])).
Info("Downloaded checkpoint sync state and block.")
return &OriginData{
st: s,

View File

@@ -22,7 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
)
const (
@@ -309,9 +309,9 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
}
for _, failure := range errorJson.Failures {
w := request[failure.Index].Message
log.WithFields(logrus.Fields{
"validatorIndex": w.ValidatorIndex,
"withdrawalAddress": w.ToExecutionAddress,
log.WithFields(log.Fields{
"validator_index": w.ValidatorIndex,
"withdrawal_address": w.ToExecutionAddress,
}).Error(failure.Message)
}
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
@@ -341,9 +341,9 @@ type forkScheduleResponse struct {
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
epoch, err := strconv.Atoi(d.Epoch)
if err != nil {
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
return nil, err
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
@@ -355,7 +355,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(epoch),
Epoch: primitives.Epoch(uint64(epoch)),
})
}
sort.Sort(ofs)

View File

@@ -1,55 +0,0 @@
package beacon
import (
"context"
"sync"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
)
type NodeHealthTracker struct {
isHealthy *bool
healthChan chan bool
node iface.HealthNode
sync.RWMutex
}
func NewNodeHealthTracker(node iface.HealthNode) *NodeHealthTracker {
return &NodeHealthTracker{
node: node,
healthChan: make(chan bool, 1),
}
}
// HealthUpdates provides a read-only channel for health updates.
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
return n.healthChan
}
func (n *NodeHealthTracker) IsHealthy() bool {
n.RLock()
defer n.RUnlock()
if n.isHealthy == nil {
return false
}
return *n.isHealthy
}
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
n.RLock()
newStatus := n.node.IsHealthy(ctx)
if n.isHealthy == nil {
n.isHealthy = &newStatus
}
isStatusChanged := newStatus != *n.isHealthy
n.RUnlock()
if isStatusChanged {
n.Lock()
// Double-check the condition to ensure it hasn't changed since the first check.
n.isHealthy = &newStatus
n.Unlock() // It's better to unlock as soon as the protected section is over.
n.healthChan <- newStatus
}
return newStatus
}

View File

@@ -1,118 +0,0 @@
package beacon
import (
"context"
"sync"
"testing"
healthTesting "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing"
"go.uber.org/mock/gomock"
)
func TestNodeHealth_IsHealthy(t *testing.T) {
tests := []struct {
name string
isHealthy bool
want bool
}{
{"initially healthy", true, true},
{"initially unhealthy", false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
n := &NodeHealthTracker{
isHealthy: &tt.isHealthy,
healthChan: make(chan bool, 1),
}
if got := n.IsHealthy(); got != tt.want {
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
}
})
}
}
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
tests := []struct {
name string
initial bool // Initial health status
newStatus bool // Status to update to
shouldSend bool // Should a message be sent through the channel
}{
{"healthy to unhealthy", true, false, true},
{"unhealthy to healthy", false, true, true},
{"remain healthy", true, true, false},
{"remain unhealthy", false, false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := healthTesting.NewMockHealthClient(ctrl)
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
n := &NodeHealthTracker{
isHealthy: &tt.initial,
node: client,
healthChan: make(chan bool, 1),
}
s := n.CheckHealth(context.Background())
// Check if health status was updated
if s != tt.newStatus {
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
}
select {
case status := <-n.HealthUpdates():
if !tt.shouldSend {
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
} else if status != tt.newStatus {
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
}
default:
if tt.shouldSend {
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
}
}
})
}
}
func TestNodeHealth_Concurrency(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := healthTesting.NewMockHealthClient(ctrl)
n := NewNodeHealthTracker(client)
var wg sync.WaitGroup
// Number of goroutines to spawn for both reading and writing
numGoroutines := 6
go func() {
for range n.HealthUpdates() {
// Consume values to avoid blocking on channel send.
}
}()
wg.Add(numGoroutines * 2) // for readers and writers
// Concurrently update health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
client.EXPECT().IsHealthy(gomock.Any()).Return(false)
n.CheckHealth(context.Background())
client.EXPECT().IsHealthy(gomock.Any()).Return(true)
n.CheckHealth(context.Background())
}()
}
// Concurrently read health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
_ = n.IsHealthy() // Just read the value
}()
}
wg.Wait() // Wait for all goroutines to finish
}

View File

@@ -1,8 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["health.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface",
visibility = ["//visibility:public"],
)

View File

@@ -1,13 +0,0 @@
package iface
import "context"
type HealthTracker interface {
HealthUpdates() <-chan bool
IsHealthy() bool
CheckHealth(ctx context.Context) bool
}
type HealthNode interface {
IsHealthy(ctx context.Context) bool
}

View File

@@ -1,5 +0,0 @@
package beacon
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "beacon")

View File

@@ -1,12 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["mock.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing",
visibility = ["//visibility:public"],
deps = [
"//api/client/beacon/iface:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -1,53 +0,0 @@
package testing
import (
"context"
"reflect"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
"go.uber.org/mock/gomock"
)
var (
_ = iface.HealthNode(&MockHealthClient{})
)
// MockHealthClient is a mock of HealthClient interface.
type MockHealthClient struct {
ctrl *gomock.Controller
recorder *MockHealthClientMockRecorder
}
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
type MockHealthClientMockRecorder struct {
mock *MockHealthClient
}
// IsHealthy mocks base method.
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsHealthy", arg0)
ret0, ok := ret[0].(bool)
if !ok {
return false
}
return ret0
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
return m.recorder
}
// IsHealthy indicates an expected call of IsHealthy.
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
}
// NewMockHealthClient creates a new mock instance.
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
mock := &MockHealthClient{ctrl: ctrl}
mock.recorder = &MockHealthClientMockRecorder{mock}
return mock
}

View File

@@ -57,8 +57,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
b := bytes.NewBuffer(nil)
if r.Body == nil {
log.WithFields(log.Fields{
"bodyBase64": "(nil value)",
"url": r.URL.String(),
"body-base64": "(nil value)",
"url": r.URL.String(),
}).Info("builder http request")
return nil
}
@@ -74,8 +74,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
}
r.Body = io.NopCloser(b)
log.WithFields(log.Fields{
"bodyBase64": string(body),
"url": r.URL.String(),
"body-base64": string(body),
"url": r.URL.String(),
}).Info("builder http request")
return nil
@@ -304,8 +304,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
@@ -343,8 +341,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
@@ -383,8 +379,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {

View File

@@ -321,8 +321,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
@@ -349,8 +347,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
@@ -380,8 +376,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
var req structs.SignedBlindedBeaconBlockDeneb
err := json.NewDecoder(r.Body).Decode(&req)
require.NoError(t, err)

View File

@@ -21,9 +21,6 @@ var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")
// ErrConnectionIssue represents a connection problem.
var ErrConnectionIssue = errors.New("could not connect")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)

View File

@@ -1,24 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["event_stream.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/event",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/client:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["event_stream_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,148 +0,0 @@
package event
import (
"bufio"
"context"
"net/http"
"net/url"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/client"
log "github.com/sirupsen/logrus"
)
const (
EventHead = "head"
EventBlock = "block"
EventAttestation = "attestation"
EventVoluntaryExit = "voluntary_exit"
EventBlsToExecutionChange = "bls_to_execution_change"
EventProposerSlashing = "proposer_slashing"
EventAttesterSlashing = "attester_slashing"
EventFinalizedCheckpoint = "finalized_checkpoint"
EventChainReorg = "chain_reorg"
EventContributionAndProof = "contribution_and_proof"
EventLightClientFinalityUpdate = "light_client_finality_update"
EventLightClientOptimisticUpdate = "light_client_optimistic_update"
EventPayloadAttributes = "payload_attributes"
EventBlobSidecar = "blob_sidecar"
EventError = "error"
EventConnectionError = "connection_error"
)
var (
_ = EventStreamClient(&EventStream{})
)
var DefaultEventTopics = []string{EventHead}
type EventStreamClient interface {
Subscribe(eventsChannel chan<- *Event)
}
type Event struct {
EventType string
Data []byte
}
// EventStream is responsible for subscribing to the Beacon API events endpoint
// and dispatching received events to subscribers.
type EventStream struct {
ctx context.Context
httpClient *http.Client
host string
topics []string
}
func NewEventStream(ctx context.Context, httpClient *http.Client, host string, topics []string) (*EventStream, error) {
// Check if the host is a valid URL
_, err := url.ParseRequestURI(host)
if err != nil {
return nil, err
}
if len(topics) == 0 {
return nil, errors.New("no topics provided")
}
return &EventStream{
ctx: ctx,
httpClient: httpClient,
host: host,
topics: topics,
}, nil
}
func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
allTopics := strings.Join(h.topics, ",")
log.WithField("topics", allTopics).Info("Listening to Beacon API events")
fullUrl := h.host + "/eth/v1/events?topics=" + allTopics
req, err := http.NewRequestWithContext(h.ctx, http.MethodGet, fullUrl, nil)
if err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
}
}
req.Header.Set("Accept", api.EventStreamMediaType)
req.Header.Set("Connection", api.KeepAlive)
resp, err := h.httpClient.Do(req)
if err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
}
}
defer func() {
if closeErr := resp.Body.Close(); closeErr != nil {
log.WithError(closeErr).Error("Failed to close events response body")
}
}()
// Create a new scanner to read lines from the response body
scanner := bufio.NewScanner(resp.Body)
var eventType, data string // Variables to store event type and data
// Iterate over lines of the event stream
for scanner.Scan() {
select {
case <-h.ctx.Done():
log.Info("Context canceled, stopping event stream")
close(eventsChannel)
return
default:
line := scanner.Text() // TODO(13730): scanner does not handle /r and does not fully adhere to https://html.spec.whatwg.org/multipage/server-sent-events.html#the-eventsource-interface
// Handle the event based on your specific format
if line == "" {
// Empty line indicates the end of an event
if eventType != "" && data != "" {
// Process the event when both eventType and data are set
eventsChannel <- &Event{EventType: eventType, Data: []byte(data)}
}
// Reset eventType and data for the next event
eventType, data = "", ""
continue
}
et, ok := strings.CutPrefix(line, "event: ")
if ok {
// Extract event type from the "event" field
eventType = et
}
d, ok := strings.CutPrefix(line, "data: ")
if ok {
// Extract data from the "data" field
data = d
}
}
}
if err := scanner.Err(); err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, errors.Wrap(client.ErrConnectionIssue, "scanner failed").Error()).Error()),
}
}
}

View File

@@ -1,80 +0,0 @@
package event
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v5/testing/require"
log "github.com/sirupsen/logrus"
)
func TestNewEventStream(t *testing.T) {
validURL := "http://localhost:8080"
invalidURL := "://invalid"
topics := []string{"topic1", "topic2"}
tests := []struct {
name string
host string
topics []string
wantErr bool
}{
{"Valid input", validURL, topics, false},
{"Invalid URL", invalidURL, topics, true},
{"No topics", validURL, []string{}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics)
if (err != nil) != tt.wantErr {
t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestEventStream(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
require.Equal(t, true, ok)
for i := 1; i <= 2; i++ {
_, err := fmt.Fprintf(w, "event: head\ndata: data%d\n\n", i)
require.NoError(t, err)
flusher.Flush() // Trigger flush to simulate streaming data
time.Sleep(100 * time.Millisecond) // Simulate delay between events
}
})
server := httptest.NewServer(mux)
defer server.Close()
topics := []string{"head"}
eventsChannel := make(chan *Event, 1)
stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics)
require.NoError(t, err)
go stream.Subscribe(eventsChannel)
// Collect events
var events []*Event
for len(events) != 2 {
select {
case event := <-eventsChannel:
log.Info(event)
events = append(events, event)
}
}
// Assertions to verify the events content
expectedData := []string{"data1", "data2"}
for i, event := range events {
if string(event.Data) != expectedData[i] {
t.Errorf("Expected event data %q, got %q", expectedData[i], string(event.Data))
}
}
}

View File

@@ -563,9 +563,3 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
func (s *Service) inRegularSync() bool {
return s.cfg.SyncChecker.Synced()
}
// validating returns true if the beacon is tracking some validators that have
// registered for proposing.
func (s *Service) validating() bool {
return s.cfg.TrackedValidatorsCache.Validating()
}

View File

@@ -63,7 +63,7 @@ func TestSaveHead_Different(t *testing.T) {
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -238,7 +238,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
state, blkRoot, err := prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))

View File

@@ -82,20 +82,19 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
"deposits": len(block.Body().Deposits()),
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"deposits": len(block.Body().Deposits()),
}
log.WithFields(lf).Debug("Synced new block")
} else {

View File

@@ -163,7 +163,7 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
require.NoError(t, err)
require.Equal(t, p, bytesutil.ToBytes32(parentHash))
require.Equal(t, td, totalDifficulty.Hex())
require.Equal(t, td, totalDifficulty.String())
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
require.ErrorContains(t, "could not get pow block: block not found", err)

View File

@@ -18,63 +18,17 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch < headEpoch {
return nil
}
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err != nil {
return nil
}
if slots.ToEpoch(targetSlot)+1 < headEpoch {
return nil
}
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return nil
}
return st
}
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil
}
// Try if we have already set the checkpoint cache
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
lock := async.NewMultilock(string(c.Root) + epochKey)
lock.Lock()
defer lock.Unlock()
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
if err != nil {
return nil
}
if cachedState != nil && !cachedState.IsNil() {
return cachedState
}
st, err := s.HeadState(ctx)
if err != nil {
return nil
}
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, c.Root, slot)
if err != nil {
return nil
}
if err := s.checkpointStateCache.AddCheckpointState(c, st); err != nil {
return nil
}
return st
}
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
if st := s.getRecentPreState(ctx, c); st != nil {
return st, nil
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return s.HeadStateReadOnly(ctx)
}
}
}
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.

View File

@@ -146,28 +146,6 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
func TestService_GetRecentPreState(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 31,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -308,16 +307,16 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
if err := helpers.UpdateProposerIndicesInCache(ctx, st, e); err != nil {
return errors.Wrap(err, "could not update proposer index cache")
}
go func(ep primitives.Epoch) {
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := helpers.UpdateCommitteeCache(slotCtx, st, ep+1); err != nil {
if err := helpers.UpdateCommitteeCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Could not update committee cache")
}
}(e)
}()
// The latest block header is from the previous epoch
r, err := st.LatestBlockHeader().HashTreeRoot()
if err != nil {
@@ -559,20 +558,6 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
// The gossip handler for blobs writes the index of each verified blob referencing the given
// root to the channel returned by blobNotifiers.forRoot.
nc := s.blobNotifiers.forRoot(root)
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
nst := time.AfterFunc(time.Until(nextSlot), func() {
if len(missing) == 0 {
return
}
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
Error("Still waiting for DA check at slot end.")
})
defer nst.Stop()
}
for {
select {
case idx := <-nc:
@@ -586,20 +571,11 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
s.blobNotifiers.delete(root)
return nil
case <-ctx.Done():
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
}
}
}
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,
"root": fmt.Sprintf("%#x", root),
"blobsExpected": expected,
"blobsWaiting": missing,
}
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot

View File

@@ -1531,7 +1531,6 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
// 12 and recover. Notice that it takes two epochs to fully recover, and we stay
// optimistic for the whole time.
func TestStore_NoViableHead_Liveness(t *testing.T) {
t.Skip("Requires #13664 to be fixed")
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.SlotsPerEpoch = 6
@@ -2115,7 +2114,7 @@ func TestMissingIndices(t *testing.T) {
for _, c := range cases {
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
t.Run(c.name, func(t *testing.T) {
require.NoError(t, bm.CreateFakeIndices(c.root, c.present...))
require.NoError(t, bm.CreateFakeIndices(c.root, c.present))
missing, err := missingIndices(bs, c.root, c.expected)
if c.err != nil {
require.ErrorIs(t, err, c.err)

View File

@@ -95,9 +95,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
s.UpdateHead(s.ctx, slotInterval.Slot+1)
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {

View File

@@ -32,9 +32,6 @@ import (
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
// This defines how many epochs since finality the run time will begin to expand our respective cache sizes.
var epochsSinceFinalityExpandCache = primitives.Epoch(4)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
@@ -97,7 +94,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
var err error
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
@@ -106,7 +102,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
})
var isValidPayload bool
eg.Go(func() error {
var err error
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
@@ -193,11 +188,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
// We apply the same heuristic to some of our more important caches.
if err := s.handleCaches(); err != nil {
return err
}
// Reports on block and fork choice metrics.
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
@@ -371,27 +361,6 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
}
func (s *Service) handleCaches() error {
currentEpoch := slots.ToEpoch(s.CurrentSlot())
// Prevent `sinceFinality` going underflow.
var sinceFinality primitives.Epoch
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
if finalized == nil {
return errNilFinalizedInStore
}
if currentEpoch > finalized.Epoch {
sinceFinality = currentEpoch - finalized.Epoch
}
if sinceFinality >= epochsSinceFinalityExpandCache {
helpers.ExpandCommitteeCache()
return nil
}
helpers.CompressCommitteeCache()
return nil
}
// This performs the state transition function and returns the poststate or an
// error if the block fails to verify the consensus rules
func (s *Service) validateStateTransition(ctx context.Context, preState state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {

View File

@@ -308,29 +308,6 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
}
func TestHandleCaches_EnablingLargeSize(t *testing.T) {
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.handleCaches())
assert.LogsContain(t, hook, "Expanding committee cache size")
}
func TestHandleCaches_DisablingLargeSize(t *testing.T) {
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.handleCaches())
s.genesisTime = time.Now()
require.NoError(t, s.handleCaches())
assert.LogsContain(t, hook, "Reducing committee cache size")
}
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
service, tr := minimalTestService(t)
pool := tr.blsPool

View File

@@ -199,7 +199,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Start a blockchain service's main event loop.
func (s *Service) Start() {
saved := s.cfg.FinalizedStateAtStartUp
defer s.removeStartupState()
if saved != nil && !saved.IsNil() {
if err := s.StartFromSavedState(saved); err != nil {
@@ -419,7 +418,7 @@ func (s *Service) startFromExecutionChain() error {
log.Error("event data is not type *statefeed.ChainStartedData")
return
}
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
s.onExecutionChainStart(s.ctx, data.StartTime)
return
}
@@ -551,10 +550,6 @@ func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
return s.cfg.BeaconDB.HasBlock(ctx, root)
}
func (s *Service) removeStartupState() {
s.cfg.FinalizedStateAtStartUp = nil
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
currentTime := prysmTime.Now()
if currentTime.After(genesisTime) {

View File

@@ -17,16 +17,12 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
mathutil "github.com/prysmaticlabs/prysm/v5/math"
log "github.com/sirupsen/logrus"
)
const (
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
maxCommitteesCacheSize = int(4)
// expandedCommitteeCacheSize defines the expanded size of the committee cache in the event we
// do not have finality to deal with long forks better.
expandedCommitteeCacheSize = int(32)
maxCommitteesCacheSize = int(32)
)
var (
@@ -47,7 +43,6 @@ type CommitteeCache struct {
CommitteeCache *lru.Cache
lock sync.RWMutex
inProgress map[string]bool
size int
}
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
@@ -72,33 +67,6 @@ func (c *CommitteeCache) Clear() {
defer c.lock.Unlock()
c.CommitteeCache = lruwrpr.New(maxCommitteesCacheSize)
c.inProgress = make(map[string]bool)
c.size = maxCommitteesCacheSize
}
// ExpandCommitteeCache expands the size of the committee cache.
func (c *CommitteeCache) ExpandCommitteeCache() {
c.lock.Lock()
defer c.lock.Unlock()
if c.size == expandedCommitteeCacheSize {
return
}
c.CommitteeCache.Resize(expandedCommitteeCacheSize)
c.size = expandedCommitteeCacheSize
log.Warnf("Expanding committee cache size from %d to %d", maxCommitteesCacheSize, expandedCommitteeCacheSize)
}
// CompressCommitteeCache compresses the size of the committee cache.
func (c *CommitteeCache) CompressCommitteeCache() {
c.lock.Lock()
defer c.lock.Unlock()
if c.size == maxCommitteesCacheSize {
return
}
c.CommitteeCache.Resize(maxCommitteesCacheSize)
c.size = maxCommitteesCacheSize
log.Warnf("Reducing committee cache size from %d to %d", expandedCommitteeCacheSize, maxCommitteesCacheSize)
}
// Committee fetches the shuffled indices by slot and committee index. Every list of indices

View File

@@ -74,11 +74,3 @@ func (c *FakeCommitteeCache) MarkNotInProgress(seed [32]byte) error {
func (c *FakeCommitteeCache) Clear() {
return
}
func (c *FakeCommitteeCache) ExpandCommitteeCache() {
return
}
func (c *FakeCommitteeCache) CompressCommitteeCache() {
return
}

View File

@@ -74,10 +74,10 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
"index": index,
"depositRoot": hex.EncodeToString(depositRoot[:]),
"block": blockNum,
"deposit": d,
"index": index,
"deposit root": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache")
}

View File

@@ -1189,3 +1189,11 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
require.NoError(b, err)
}
}
func emptyEth1data() *ethpb.Eth1Data {
return &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
DepositCount: 0,
BlockHash: make([]byte, 32),
}
}

View File

@@ -33,10 +33,10 @@ func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum ui
}
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
"index": index,
"depositRoot": hex.EncodeToString(depositRoot[:]),
"block": blockNum,
"deposit": d,
"index": index,
"deposit root": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache")
}

View File

@@ -15,6 +15,8 @@ import (
)
var (
// ErrEmptyExecutionBlock occurs when the execution block is nil.
ErrEmptyExecutionBlock = errors.New("empty execution block")
// ErrInvalidSnapshotRoot occurs when the snapshot root does not match the calculated root.
ErrInvalidSnapshotRoot = errors.New("snapshot root is invalid")
// ErrInvalidDepositCount occurs when the value for mix in length is 0.

View File

@@ -1,9 +1,15 @@
package cache
import (
"errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// ErrNotProposerIndices will be returned when a cache object is not a pointer to
// a ProposerIndices struct.
var ErrNotProposerIndices = errors.New("object is not a proposer indices struct")
// ProposerIndices defines the cached struct for proposer indices.
type ProposerIndices struct {
BlockRoot [32]byte

View File

@@ -109,6 +109,10 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
if c.disabled {
return nil
}
c.lock.Lock()
defer c.lock.Unlock()
@@ -122,6 +126,10 @@ func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
if c.disabled {
return
}
c.lock.Lock()
defer c.lock.Unlock()

View File

@@ -2,7 +2,6 @@ package cache_test
import (
"context"
"sync"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
@@ -36,28 +35,3 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
require.NoError(t, err)
assert.DeepEqual(t, res.ToProto(), s.ToProto(), "Expected equal protos to return from cache")
}
func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
ctx := context.Background()
c := cache.NewSkipSlotCache()
r := [32]byte{'a'}
c.Disable()
require.NoError(t, c.MarkInProgress(r))
c.Enable()
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
// Get call will only terminate when
// it is not longer in progress.
obj, err := c.Get(ctx, r)
require.NoError(t, err)
require.IsNil(t, obj)
wg.Done()
}()
c.MarkNotInProgress(r)
wg.Wait()
}

View File

@@ -41,9 +41,3 @@ func (t *TrackedValidatorsCache) Prune() {
defer t.Unlock()
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
}
func (t *TrackedValidatorsCache) Validating() bool {
t.Lock()
defer t.Unlock()
return len(t.trackedValidators) > 0
}

View File

@@ -99,7 +99,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
// from the above method by not using fork data from the state and instead retrieving it
// via the respective epoch.
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock) error {
currentEpoch := slots.ToEpoch(blk.Block().Slot())
fork, err := forks.Fork(currentEpoch)
if err != nil {
@@ -115,9 +115,7 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
}
proposerPubKey := proposer.PublicKey
sig := blk.Signature()
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, func() ([32]byte, error) {
return blkRoot, nil
})
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, blk.Block().HashTreeRoot)
}
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.

View File

@@ -79,13 +79,11 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
}
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorsRoot())
assert.NoError(t, err)
blkRoot, err := altairBlk.Block.HashTreeRoot()
assert.NoError(t, err)
rt, err := signing.ComputeSigningRoot(altairBlk.Block, domain)
assert.NoError(t, err)
sig := keys[0].Sign(rt[:]).Marshal()
altairBlk.Signature = sig
wsb, err := consensusblocks.NewSignedBeaconBlock(altairBlk)
require.NoError(t, err)
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb, blkRoot))
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb))
}

View File

@@ -14,7 +14,6 @@ go_library(
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
"//tools:__subpackages__",
],
deps = [
"//beacon-chain/core/helpers:go_default_library",

View File

@@ -391,16 +391,6 @@ func UpdateCachedCheckpointToStateRoot(state state.ReadOnlyBeaconState, cp *fork
return nil
}
// ExpandCommitteeCache resizes the cache to a higher limit.
func ExpandCommitteeCache() {
committeeCache.ExpandCommitteeCache()
}
// CompressCommitteeCache resizes the cache to a lower limit.
func CompressCommitteeCache() {
committeeCache.CompressCommitteeCache()
}
// ClearCache clears the beacon committee cache and sync committee cache.
func ClearCache() {
committeeCache.Clear()

View File

@@ -96,7 +96,6 @@ go_test(
"//testing/benchmark:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",

View File

@@ -21,7 +21,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
@@ -49,7 +48,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
epoch := time.CurrentEpoch(beaconState)
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
require.NoError(t, err)
@@ -136,7 +135,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
epoch := time.CurrentEpoch(beaconState)
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
require.NoError(t, err)

View File

@@ -23,7 +23,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
@@ -51,7 +50,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
epoch := time.CurrentEpoch(beaconState)
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
require.NoError(t, err)
@@ -125,7 +124,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
BlockHash: make([]byte, 32),
}
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
e := beaconState.Eth1Data()
e.DepositCount = 100
require.NoError(t, beaconState.SetEth1Data(e))
@@ -138,7 +137,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
epoch := time.CurrentEpoch(beaconState)
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
require.NoError(t, err)

View File

@@ -40,6 +40,7 @@ go_test(
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -113,6 +114,16 @@ func Test_commitmentsToCheck(t *testing.T) {
}
}
func daAlwaysSucceeds(_ [][]byte, _ []*ethpb.BlobSidecar) error {
return nil
}
type mockDA struct {
t *testing.T
scs []blocks.ROBlob
err error
}
func TestLazilyPersistent_Missing(t *testing.T) {
ctx := context.Background()
store := filesystem.NewEphemeralBlobStorage(t)

View File

@@ -19,6 +19,9 @@ var ErrNotFoundState = kv.ErrNotFoundState
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
// ErrNotFoundBackfillBlockRoot wraps ErrNotFound for an error specific to the backfill block root.
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
// as equivalent to db.ErrNotFound.
func IsNotFound(err error) bool {

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"blob.go",
"ephemeral.go",
"log.go",
"metrics.go",
"pruner.go",
],
@@ -44,6 +43,7 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_spf13_afero//:go_default_library",
],

View File

@@ -16,15 +16,12 @@ import (
"github.com/prysmaticlabs/prysm/v5/io/file"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
var (
errIndexOutOfBounds = errors.New("blob index in file name >= MaxBlobsPerBlock")
errEmptyBlobWritten = errors.New("zero bytes written to disk when saving blob sidecar")
errSidecarEmptySSZData = errors.New("sidecar marshalled to an empty ssz byte slice")
errNoBasePath = errors.New("BlobStorage base path not specified in init")
errIndexOutOfBounds = errors.New("blob index in file name >= MaxBlobsPerBlock")
)
const (
@@ -37,26 +34,14 @@ const (
// BlobStorageOption is a functional option for configuring a BlobStorage.
type BlobStorageOption func(*BlobStorage) error
// WithBasePath is a required option that sets the base path of blob storage.
func WithBasePath(base string) BlobStorageOption {
return func(b *BlobStorage) error {
b.base = base
return nil
}
}
// WithBlobRetentionEpochs is an option that changes the number of epochs blobs will be persisted.
func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption {
return func(b *BlobStorage) error {
b.retentionEpochs = e
return nil
}
}
// WithSaveFsync is an option that causes Save to call fsync before renaming part files for improved durability.
func WithSaveFsync(fsync bool) BlobStorageOption {
return func(b *BlobStorage) error {
b.fsync = fsync
pruner, err := newBlobPruner(b.fs, e)
if err != nil {
return err
}
b.pruner = pruner
return nil
}
}
@@ -64,36 +49,30 @@ func WithSaveFsync(fsync bool) BlobStorageOption {
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
// initialized once per beacon node.
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
b := &BlobStorage{}
func NewBlobStorage(base string, opts ...BlobStorageOption) (*BlobStorage, error) {
base = path.Clean(base)
if err := file.MkdirAll(base); err != nil {
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
}
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
b := &BlobStorage{
fs: fs,
}
for _, o := range opts {
if err := o(b); err != nil {
return nil, errors.Wrap(err, "failed to create blob storage")
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
}
}
if b.base == "" {
return nil, errNoBasePath
if b.pruner == nil {
log.Warn("Initializing blob filesystem storage with pruning disabled")
}
b.base = path.Clean(b.base)
if err := file.MkdirAll(b.base); err != nil {
return nil, errors.Wrapf(err, "failed to create blob storage at %s", b.base)
}
b.fs = afero.NewBasePathFs(afero.NewOsFs(), b.base)
pruner, err := newBlobPruner(b.fs, b.retentionEpochs)
if err != nil {
return nil, err
}
b.pruner = pruner
return b, nil
}
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
type BlobStorage struct {
base string
retentionEpochs primitives.Epoch
fsync bool
fs afero.Fs
pruner *blobPruner
fs afero.Fs
pruner *blobPruner
}
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
@@ -104,7 +83,7 @@ func (bs *BlobStorage) WarmCache() {
}
go func() {
if err := bs.pruner.prune(0); err != nil {
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
log.WithError(err).Error("Error encountered while warming up blob pruner cache.")
}
}()
}
@@ -119,7 +98,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
return err
}
if exists {
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("Ignoring a duplicate blob sidecar save attempt")
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("ignoring a duplicate blob sidecar Save attempt")
return nil
}
if bs.pruner != nil {
@@ -132,14 +111,11 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
sidecarData, err := sidecar.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "failed to serialize sidecar data")
} else if len(sidecarData) == 0 {
return errSidecarEmptySSZData
}
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
return err
}
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
partPath := fname.partPath()
partialMoved := false
// Ensure the partial file is deleted.
@@ -150,9 +126,9 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
// It's expected to error if the save is successful.
err = bs.fs.Remove(partPath)
if err == nil {
log.WithFields(logrus.Fields{
log.WithFields(log.Fields{
"partPath": partPath,
}).Debugf("Removed partial file")
}).Debugf("removed partial file")
}
}()
@@ -162,7 +138,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
return errors.Wrap(err, "failed to create partial file")
}
n, err := partialFile.Write(sidecarData)
_, err = partialFile.Write(sidecarData)
if err != nil {
closeErr := partialFile.Close()
if closeErr != nil {
@@ -170,24 +146,11 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
}
return errors.Wrap(err, "failed to write to partial file")
}
if bs.fsync {
if err := partialFile.Sync(); err != nil {
return err
}
}
if err := partialFile.Close(); err != nil {
err = partialFile.Close()
if err != nil {
return err
}
if n != len(sidecarData) {
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
}
if n == 0 {
return errEmptyBlobWritten
}
// Atomically rename the partial file to its final name.
err = bs.fs.Rename(partPath, sszPath)
if err != nil {
@@ -294,12 +257,16 @@ func (p blobNamer) dir() string {
return rootString(p.root)
}
func (p blobNamer) partPath(entropy string) string {
return path.Join(p.dir(), fmt.Sprintf("%s-%d.%s", entropy, p.index, partExt))
func (p blobNamer) fname(ext string) string {
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, ext))
}
func (p blobNamer) partPath() string {
return p.fname(partExt)
}
func (p blobNamer) path() string {
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, sszExt))
return p.fname(sszExt)
}
func rootString(root [32]byte) string {

View File

@@ -4,9 +4,10 @@ import (
"bytes"
"os"
"path"
"sync"
"testing"
"time"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
@@ -100,33 +101,32 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
_, err = b.Get(blob.BlockRoot(), blob.Index)
require.ErrorIs(t, err, os.ErrNotExist)
})
t.Run("race conditions", func(t *testing.T) {
// There was a bug where saving the same blob in multiple go routines would cause a partial blob
// to be empty. This test ensures that several routines can safely save the same blob at the
// same time. This isn't ideal behavior from the caller, but should be handled safely anyway.
// See https://github.com/prysmaticlabs/prysm/pull/13648
b, err := NewBlobStorage(WithBasePath(t.TempDir()))
require.NoError(t, err)
blob := testSidecars[0]
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
require.NoError(t, b.Save(blob))
}()
}
wg.Wait()
res, err := b.Get(blob.BlockRoot(), blob.Index)
require.NoError(t, err)
require.DeepSSZEqual(t, blob, res)
})
}
// pollUntil polls a condition function until it returns true or a timeout is reached.
func pollUntil(t *testing.T, fs afero.Fs, expected int) error {
var remainingFolders []os.FileInfo
var err error
// Define the condition function for polling
conditionFunc := func() bool {
remainingFolders, err = afero.ReadDir(fs, ".")
require.NoError(t, err)
return len(remainingFolders) == expected
}
startTime := time.Now()
for {
if conditionFunc() {
break // Condition met, exit the loop
}
if time.Since(startTime) > 30*time.Second {
return errors.New("timeout")
}
time.Sleep(1 * time.Second) // Adjust the sleep interval as needed
}
require.Equal(t, expected, len(remainingFolders))
return nil
}
func TestBlobIndicesBounds(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
@@ -243,8 +243,6 @@ func BenchmarkPruning(b *testing.B) {
}
func TestNewBlobStorage(t *testing.T) {
_, err := NewBlobStorage()
require.ErrorIs(t, err, errNoBasePath)
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
_, err := NewBlobStorage(path.Join(t.TempDir(), "good"))
require.NoError(t, err)
}

View File

@@ -37,7 +37,7 @@ type BlobMocker struct {
// CreateFakeIndices creates empty blob sidecar files at the expected path for the given
// root and indices to influence the result of Indices().
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices ...uint64) error {
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices []uint64) error {
for i := range indices {
n := blobNamer{root: root, index: indices[i]}
if err := bm.fs.MkdirAll(n.dir(), directoryPermissions); err != nil {

View File

@@ -1,5 +0,0 @@
package filesystem
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "filesystem")

View File

@@ -16,7 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
@@ -87,7 +87,7 @@ func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
}()
} else {
defer func() {
log.WithFields(logrus.Fields{
log.WithFields(log.Fields{
"upToEpoch": slots.ToEpoch(pruneBefore),
"duration": time.Since(start).String(),
"filesRemoved": totalPruned,

View File

@@ -224,7 +224,7 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if b := bkt.Get(root[:]); b != nil {
return ErrDeleteFinalized
return ErrDeleteJustifiedAndFinalized
}
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {

View File

@@ -289,7 +289,7 @@ func TestStore_DeleteBlock(t *testing.T) {
require.Equal(t, b, nil)
require.Equal(t, false, db.HasStateSummary(ctx, root2))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_DeleteJustifiedBlock(t *testing.T) {
@@ -309,7 +309,7 @@ func TestStore_DeleteJustifiedBlock(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_DeleteFinalizedBlock(t *testing.T) {
@@ -329,7 +329,7 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) {
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_GenesisBlock(t *testing.T) {
db := setupDB(t)

View File

@@ -2,8 +2,8 @@ package kv
import "github.com/pkg/errors"
// ErrDeleteFinalized is raised when we attempt to delete a finalized block/state
var ErrDeleteFinalized = errors.New("cannot delete finalized block or state")
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
// indicate that a value couldn't be found.

View File

@@ -5,6 +5,7 @@ import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -28,76 +29,72 @@ var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to
// beacon block chain using the finalized root alone as this would exclude all other blocks in the
// finalized epoch from being indexed as "final and canonical".
//
// The main part of the algorithm traverses parent->child block relationships in the
// `blockParentRootIndicesBucket` bucket to find the path between the last finalized checkpoint
// and the current finalized checkpoint. It relies on the invariant that there is a unique path
// between two finalized checkpoints.
// The algorithm for building the index works as follows:
// - De-index all finalized beacon block roots from previous_finalized_epoch to
// new_finalized_epoch. (I.e. delete these roots from the index, to be re-indexed.)
// - Build the canonical finalized chain by walking up the ancestry chain from the finalized block
// root until a parent is found in the index, or the parent is genesis or the origin checkpoint.
// - Add all block roots in the database where epoch(block.slot) == checkpoint.epoch.
//
// This method ensures that all blocks from the current finalized epoch are considered "final" while
// maintaining only canonical and finalized blocks older than the current finalized epoch.
func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, checkpoint *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.updateFinalizedBlockRoots")
defer span.End()
finalizedBkt := tx.Bucket(finalizedBlockRootsIndexBucket)
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
root := checkpoint.Root
var previousRoot []byte
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
initCheckpointRoot := tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)
// De-index recent finalized block roots, to be re-indexed.
previousFinalizedCheckpoint := &ethpb.Checkpoint{}
if b := finalizedBkt.Get(previousFinalizedCheckpointKey); b != nil {
if b := bkt.Get(previousFinalizedCheckpointKey); b != nil {
if err := decode(ctx, b, previousFinalizedCheckpoint); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
// Handle the case of checkpoint sync.
if previousFinalizedCheckpoint.Root == nil && bytes.Equal(checkpoint.Root, tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)) {
container := &ethpb.FinalizedBlockRootContainer{}
enc, err := encode(ctx, container)
blockRoots, err := s.BlockRoots(ctx, filters.NewFilter().
SetStartEpoch(previousFinalizedCheckpoint.Epoch).
SetEndEpoch(checkpoint.Epoch+1),
)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
for _, root := range blockRoots {
if err := bkt.Delete(root[:]); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
// Walk up the ancestry chain until we reach a block root present in the finalized block roots
// index bucket or genesis block root.
for {
if bytes.Equal(root, genesisRoot) {
break
}
signedBlock, err := s.Block(ctx, bytesutil.ToBytes32(root))
if err != nil {
tracing.AnnotateError(span, err)
return err
}
if err = finalizedBkt.Put(checkpoint.Root, enc); err != nil {
if err := blocks.BeaconBlockIsNil(signedBlock); err != nil {
tracing.AnnotateError(span, err)
return err
}
return updatePrevFinalizedCheckpoint(ctx, span, finalizedBkt, checkpoint)
}
block := signedBlock.Block()
var finalized [][]byte
if previousFinalizedCheckpoint.Root == nil {
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
_, finalized = pathToFinalizedCheckpoint(ctx, [][]byte{genesisRoot}, checkpoint.Root, tx)
} else {
if err := updateChildOfPrevFinalizedCheckpoint(
ctx,
span,
finalizedBkt,
tx.Bucket(blockParentRootIndicesBucket), previousFinalizedCheckpoint.Root,
); err != nil {
return err
}
_, finalized = pathToFinalizedCheckpoint(ctx, [][]byte{previousFinalizedCheckpoint.Root}, checkpoint.Root, tx)
}
for i, r := range finalized {
var container *ethpb.FinalizedBlockRootContainer
switch i {
case 0:
container = &ethpb.FinalizedBlockRootContainer{
ParentRoot: previousFinalizedCheckpoint.Root,
}
if len(finalized) > 1 {
container.ChildRoot = finalized[i+1]
}
case len(finalized) - 1:
// We don't know the finalized child of the new finalized checkpoint.
// It will be filled out in the next function call.
container = &ethpb.FinalizedBlockRootContainer{}
if len(finalized) > 1 {
container.ParentRoot = finalized[i-1]
}
default:
container = &ethpb.FinalizedBlockRootContainer{
ParentRoot: finalized[i-1],
ChildRoot: finalized[i+1],
}
parentRoot := block.ParentRoot()
container := &ethpb.FinalizedBlockRootContainer{
ParentRoot: parentRoot[:],
ChildRoot: previousRoot,
}
enc, err := encode(ctx, container)
@@ -105,13 +102,66 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
tracing.AnnotateError(span, err)
return err
}
if err = finalizedBkt.Put(r, enc); err != nil {
if err := bkt.Put(root, enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
// breaking here allows the initial checkpoint root to be correctly inserted,
// but stops the loop from trying to search for its parent.
if bytes.Equal(root, initCheckpointRoot) {
break
}
// Found parent, loop exit condition.
pr := block.ParentRoot()
if parentBytes := bkt.Get(pr[:]); parentBytes != nil {
parent := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, parentBytes, parent); err != nil {
tracing.AnnotateError(span, err)
return err
}
parent.ChildRoot = root
enc, err := encode(ctx, parent)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
if err := bkt.Put(pr[:], enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
break
}
previousRoot = root
root = pr[:]
}
// Upsert blocks from the current finalized epoch.
roots, err := s.BlockRoots(ctx, filters.NewFilter().SetStartEpoch(checkpoint.Epoch).SetEndEpoch(checkpoint.Epoch+1))
if err != nil {
tracing.AnnotateError(span, err)
return err
}
for _, root := range roots {
root := root[:]
if bytes.Equal(root, checkpoint.Root) || bkt.Get(root) != nil {
continue
}
if err := bkt.Put(root, containerFinalizedButNotCanonical); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
return updatePrevFinalizedCheckpoint(ctx, span, finalizedBkt, checkpoint)
// Update previous checkpoint
enc, err := encode(ctx, checkpoint)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
return bkt.Put(previousFinalizedCheckpointKey, enc)
}
// BackfillFinalizedIndex updates the finalized index for a contiguous chain of blocks that are the ancestors of the
@@ -151,19 +201,20 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
return err
}
encs[i-1] = penc
}
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
lastIdx := len(blocks) - 1
fbrs[lastIdx].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[lastIdx])
if err != nil {
tracing.AnnotateError(span, err)
return err
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
if i == len(blocks)-1 {
fbrs[i].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[i])
if err != nil {
tracing.AnnotateError(span, err)
return err
}
encs[i] = enc
}
}
encs[lastIdx] = enc
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
@@ -192,6 +243,8 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
// IsFinalizedBlock returns true if the block root is present in the finalized block root index.
// A beacon block root contained exists in this index if it is considered finalized and canonical.
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
// considered canonical in the "head view" of the beacon node.
func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
_, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
defer span.End()
@@ -244,53 +297,3 @@ func (s *Store) FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (in
tracing.AnnotateError(span, err)
return blk, err
}
func pathToFinalizedCheckpoint(ctx context.Context, roots [][]byte, checkpointRoot []byte, tx *bolt.Tx) (bool, [][]byte) {
if len(roots) == 0 || (len(roots) == 1 && roots[0] == nil) {
return false, nil
}
for _, r := range roots {
if bytes.Equal(r, checkpointRoot) {
return true, [][]byte{r}
}
children := lookupValuesForIndices(ctx, map[string][]byte{string(blockParentRootIndicesBucket): r}, tx)
if len(children) == 0 {
children = [][][]byte{nil}
}
isPath, path := pathToFinalizedCheckpoint(ctx, children[0], checkpointRoot, tx)
if isPath {
return true, append([][]byte{r}, path...)
}
}
return false, nil
}
func updatePrevFinalizedCheckpoint(ctx context.Context, span *trace.Span, finalizedBkt *bolt.Bucket, checkpoint *ethpb.Checkpoint) error {
enc, err := encode(ctx, checkpoint)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
return finalizedBkt.Put(previousFinalizedCheckpointKey, enc)
}
func updateChildOfPrevFinalizedCheckpoint(ctx context.Context, span *trace.Span, finalizedBkt, parentBkt *bolt.Bucket, checkpointRoot []byte) error {
container := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, finalizedBkt.Get(checkpointRoot), container); err != nil {
tracing.AnnotateError(span, err)
return err
}
container.ChildRoot = parentBkt.Get(checkpointRoot)
enc, err := encode(ctx, container)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
if err = finalizedBkt.Put(checkpointRoot, enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
return nil
}

View File

@@ -26,30 +26,38 @@ func TestStore_IsFinalizedBlock(t *testing.T) {
ctx := context.Background()
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blks := makeBlocks(t, 0, slotsPerEpoch*2, genesisBlockRoot)
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
// a state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
for i := uint64(0); i <= slotsPerEpoch; i++ {
root, err = blks[i].Block().HashTreeRoot()
// All blocks up to slotsPerEpoch*2 should be in the finalized index.
for i := uint64(0); i < slotsPerEpoch*2; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
}
for i := slotsPerEpoch + 1; i < uint64(len(blks)); i++ {
root, err = blks[i].Block().HashTreeRoot()
for i := slotsPerEpoch * 3; i < uint64(len(blks)); i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index %d was considered finalized, but should not have", i)
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index %d was considered finalized in the index, but should not have", i)
}
}
func TestStore_IsFinalizedGenesisBlock(t *testing.T) {
func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
@@ -61,114 +69,136 @@ func TestStore_IsFinalizedGenesisBlock(t *testing.T) {
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root))
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Finalized genesis block doesn't exist in db")
}
// This test scenario is to test a specific edge case where the finalized block root is not part of
// the finalized and canonical chain.
//
// Example:
// 0 1 2 3 4 5 6 slot
// a <- b <-- d <- e <- f <- g roots
//
// ^- c
//
// Imagine that epochs are 2 slots and that epoch 1, 2, and 3 are finalized. Checkpoint roots would
// be c, e, and g. In this scenario, c was a finalized checkpoint root but no block built upon it so
// it should not be considered "final and canonical" in the view at slot 6.
func TestStore_IsFinalized_ForkEdgeCase(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
blocks0 := makeBlocks(t, slotsPerEpoch*0, slotsPerEpoch, genesisBlockRoot)
blocks1 := append(
makeBlocks(t, slotsPerEpoch*1, 1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1]))), // No block builds off of the first block in epoch.
makeBlocks(t, slotsPerEpoch*1+1, slotsPerEpoch-1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1])))...,
)
blocks2 := makeBlocks(t, slotsPerEpoch*2, slotsPerEpoch, bytesutil.ToBytes32(sszRootOrDie(t, blocks1[len(blocks1)-1])))
db := setupDB(t)
ctx := context.Background()
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
require.NoError(t, db.SaveBlocks(ctx, blocks0))
require.NoError(t, db.SaveBlocks(ctx, blocks1))
require.NoError(t, db.SaveBlocks(ctx, blocks2))
// First checkpoint
checkpoint1 := &ethpb.Checkpoint{
Root: sszRootOrDie(t, blocks1[0]),
Epoch: 1,
}
st, err := util.NewBeaconState()
require.NoError(t, err)
// A state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint1.Root)))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, checkpoint1))
// All blocks in blocks0 and blocks1 should be finalized and canonical.
for i, block := range append(blocks0, blocks1...) {
root := sszRootOrDie(t, block)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)), "%d - Expected block %#x to be finalized", i, root)
}
// Second checkpoint
checkpoint2 := &ethpb.Checkpoint{
Root: sszRootOrDie(t, blocks2[0]),
Epoch: 2,
}
// A state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint2.Root)))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, checkpoint2))
// All blocks in blocks0 and blocks2 should be finalized and canonical.
for i, block := range append(blocks0, blocks2...) {
root := sszRootOrDie(t, block)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)), "%d - Expected block %#x to be finalized", i, root)
}
// All blocks in blocks1 should be finalized and canonical, except blocks1[0].
for i, block := range blocks1 {
root := sszRootOrDie(t, block)
if db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)) == (i == 0) {
t.Errorf("Expected db.IsFinalizedBlock(ctx, blocks1[%d]) to be %v", i, i != 0)
}
}
}
func TestStore_IsFinalizedChildBlock(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
ctx := context.Background()
db := setupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blks := makeBlocks(t, 0, slotsPerEpoch*2, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
for i := uint64(0); i < slotsPerEpoch; i++ {
root, err = blks[i].Block().HashTreeRoot()
eval := func(t testing.TB, ctx context.Context, db *Store, blks []interfaces.ReadOnlySignedBeaconBlock) {
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
blk, err := db.FinalizedChildBlock(ctx, root)
assert.NoError(t, err)
assert.Equal(t, false, blk == nil, "Child block at index %d was not considered finalized", i)
}
}
func TestStore_ChildRootOfPrevFinalizedCheckpointIsUpdated(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
ctx := context.Background()
db := setupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
root2, err := blks[slotsPerEpoch*2].Block().HashTreeRoot()
require.NoError(t, err)
cp = &ethpb.Checkpoint{
Epoch: 2,
Root: root2[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
require.NoError(t, db.db.View(func(tx *bolt.Tx) error {
container := &ethpb.FinalizedBlockRootContainer{}
f := tx.Bucket(finalizedBlockRootsIndexBucket).Get(root[:])
require.NoError(t, decode(ctx, f, container))
r, err := blks[slotsPerEpoch+1].Block().HashTreeRoot()
st, err := util.NewBeaconState()
require.NoError(t, err)
assert.DeepEqual(t, r[:], container.ChildRoot)
return nil
}))
}
// a state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
func TestStore_OrphanedBlockIsNotFinalized(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
db := setupDB(t)
ctx := context.Background()
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blk0 := util.NewBeaconBlock()
blk0.Block.ParentRoot = genesisBlockRoot[:]
blk0Root, err := blk0.Block.HashTreeRoot()
require.NoError(t, err)
blk1 := util.NewBeaconBlock()
blk1.Block.Slot = 1
blk1.Block.ParentRoot = blk0Root[:]
blk2 := util.NewBeaconBlock()
blk2.Block.Slot = 2
// orphan block at index 1
blk2.Block.ParentRoot = blk0Root[:]
blk2Root, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
sBlk0, err := consensusblocks.NewSignedBeaconBlock(blk0)
require.NoError(t, err)
sBlk1, err := consensusblocks.NewSignedBeaconBlock(blk1)
require.NoError(t, err)
sBlk2, err := consensusblocks.NewSignedBeaconBlock(blk2)
require.NoError(t, err)
blks := append([]interfaces.ReadOnlySignedBeaconBlock{sBlk0, sBlk1, sBlk2}, makeBlocks(t, 3, slotsPerEpoch*2-3, blk2Root)...)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
for i := uint64(0); i <= slotsPerEpoch; i++ {
root, err = blks[i].Block().HashTreeRoot()
require.NoError(t, err)
if i == 1 {
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index 1 was considered finalized, but should not have")
} else {
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
// All blocks up to slotsPerEpoch should have a finalized child block.
for i := uint64(0); i < slotsPerEpoch; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
blk, err := db.FinalizedChildBlock(ctx, root)
assert.NoError(t, err)
if blk == nil {
t.Error("Child block doesn't exist for valid finalized block.")
}
}
}
setup := func(t testing.TB) *Store {
db := setupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
return db
}
t.Run("phase0", func(t *testing.T) {
db := setup(t)
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
eval(t, ctx, db, blks)
})
t.Run("altair", func(t *testing.T) {
db := setup(t)
blks := makeBlocksAltair(t, 0, slotsPerEpoch*3, genesisBlockRoot)
eval(t, ctx, db, blks)
})
}
func sszRootOrDie(t *testing.T, block interfaces.ReadOnlySignedBeaconBlock) []byte {
root, err := block.Block().HashTreeRoot()
require.NoError(t, err)
return root[:]
}
func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
@@ -189,48 +219,22 @@ func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.R
return ifaceBlocks
}
func TestStore_BackfillFinalizedIndexSingle(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
// we're making 4 blocks so we can test an element without a valid child at the end
blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 4, [32]byte{}))
require.NoError(t, err)
// existing is the child that we'll set up in the index by hand to seed the index.
existing := blks[3]
// toUpdate is a single item update, emulating a backfill batch size of 1. it is the parent of `existing`.
toUpdate := blks[2]
// set up existing finalized block
ebpr := existing.Block().ParentRoot()
ebr := existing.Root()
ebf := &ethpb.FinalizedBlockRootContainer{
ParentRoot: ebpr[:],
ChildRoot: make([]byte, 32), // we're bypassing validation to seed the db, so we don't need a valid child.
func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
blocks := make([]*ethpb.SignedBeaconBlockAltair, num)
ifaceBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, num)
for j := startIdx; j < num+startIdx; j++ {
parentRoot := make([]byte, fieldparams.RootLength)
copy(parentRoot, previousRoot[:])
blocks[j-startIdx] = util.NewBeaconBlockAltair()
blocks[j-startIdx].Block.Slot = primitives.Slot(j + 1)
blocks[j-startIdx].Block.ParentRoot = parentRoot
var err error
previousRoot, err = blocks[j-startIdx].Block.HashTreeRoot()
require.NoError(t, err)
ifaceBlocks[j-startIdx], err = consensusblocks.NewSignedBeaconBlock(blocks[j-startIdx])
require.NoError(t, err)
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
// writing this to the index outside of the validating function to seed the test.
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
require.NoError(t, err)
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{toUpdate}, ebr))
// make sure that we still correctly validate descendents in the single item case.
noChild := blks[0] // will fail to update because we don't have blks[1] in the db.
// test wrong child param
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, ebr), errNotConnectedToFinalized)
// test parent of child that isn't finalized
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, blks[1].Root()), errFinalizedChildNotFound)
// now make it work by writing the missing block
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[1]}, blks[2].Root()))
// since blks[1] is now in the index, we should be able to update blks[0]
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[0]}, blks[1].Root()))
return ifaceBlocks
}
func TestStore_BackfillFinalizedIndex(t *testing.T) {
@@ -248,23 +252,23 @@ func TestStore_BackfillFinalizedIndex(t *testing.T) {
ParentRoot: ebpr[:],
ChildRoot: chldr[:],
}
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
require.NoError(t, err)
// reslice to remove the existing blocks
blks = blks[0:64]
// check the other error conditions with a descendent root that really doesn't exist
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
require.NoError(t, err)
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, disjoint, [32]byte{}), errIncorrectBlockParent)
require.NoError(t, err)
require.ErrorIs(t, errFinalizedChildNotFound, db.BackfillFinalizedIndex(ctx, blks, [32]byte{}))
// use the real root so that it succeeds

View File

@@ -100,24 +100,37 @@ func StoreDatafilePath(dirPath string) string {
}
var Buckets = [][]byte{
attestationsBucket,
blocksBucket,
stateBucket,
proposerSlashingsBucket,
attesterSlashingsBucket,
voluntaryExitsBucket,
chainMetadataBucket,
checkpointBucket,
powchainBucket,
stateSummaryBucket,
stateValidatorsBucket,
// Indices buckets.
attestationHeadBlockRootBucket,
attestationSourceRootIndicesBucket,
attestationSourceEpochIndicesBucket,
attestationTargetRootIndicesBucket,
attestationTargetEpochIndicesBucket,
blockSlotIndicesBucket,
stateSlotIndicesBucket,
blockParentRootIndicesBucket,
finalizedBlockRootsIndexBucket,
blockRootValidatorHashesBucket,
// State management service bucket.
newStateServiceCompatibleBucket,
// Migrations
migrationsBucket,
feeRecipientBucket,
registrationBucket,
blobsBucket,
}
// KVStoreOption is a functional option that modifies a kv.Store.
@@ -137,7 +150,7 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
}
}
datafile := StoreDatafilePath(dirPath)
log.WithField("path", datafile).Info("Opening Bolt DB")
log.Infof("Opening Bolt DB at %s", datafile)
boltDB, err := bolt.Open(
datafile,
params.BeaconIoConfig().ReadWritePermissions,

View File

@@ -7,15 +7,20 @@ package kv
// it easy to scan for keys that have a certain shard number as a prefix and return those
// corresponding attestations.
var (
blocksBucket = []byte("blocks")
stateBucket = []byte("state")
stateSummaryBucket = []byte("state-summary")
chainMetadataBucket = []byte("chain-metadata")
checkpointBucket = []byte("check-point")
powchainBucket = []byte("powchain")
stateValidatorsBucket = []byte("state-validators")
feeRecipientBucket = []byte("fee-recipient")
registrationBucket = []byte("registration")
attestationsBucket = []byte("attestations")
blobsBucket = []byte("blobs")
blocksBucket = []byte("blocks")
stateBucket = []byte("state")
stateSummaryBucket = []byte("state-summary")
proposerSlashingsBucket = []byte("proposer-slashings")
attesterSlashingsBucket = []byte("attester-slashings")
voluntaryExitsBucket = []byte("voluntary-exits")
chainMetadataBucket = []byte("chain-metadata")
checkpointBucket = []byte("check-point")
powchainBucket = []byte("powchain")
stateValidatorsBucket = []byte("state-validators")
feeRecipientBucket = []byte("fee-recipient")
registrationBucket = []byte("registration")
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
slotsHasObjectBucket = []byte("slots-has-objects")
@@ -23,11 +28,16 @@ var (
archivedRootBucket = []byte("archived-index-root")
// Key indices buckets.
blockParentRootIndicesBucket = []byte("block-parent-root-indices")
blockSlotIndicesBucket = []byte("block-slot-indices")
stateSlotIndicesBucket = []byte("state-slot-indices")
finalizedBlockRootsIndexBucket = []byte("finalized-block-roots-index")
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
blockParentRootIndicesBucket = []byte("block-parent-root-indices")
blockSlotIndicesBucket = []byte("block-slot-indices")
stateSlotIndicesBucket = []byte("state-slot-indices")
attestationHeadBlockRootBucket = []byte("attestation-head-block-root-indices")
attestationSourceRootIndicesBucket = []byte("attestation-source-root-indices")
attestationSourceEpochIndicesBucket = []byte("attestation-source-epoch-indices")
attestationTargetRootIndicesBucket = []byte("attestation-target-root-indices")
attestationTargetEpochIndicesBucket = []byte("attestation-target-epoch-indices")
finalizedBlockRootsIndexBucket = []byte("finalized-block-roots-index")
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
// Specific item keys.
headBlockRootKey = []byte("head-root")
@@ -59,6 +69,9 @@ var (
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
savedStateSlotsKey = []byte("saved-state-slots")
// New state management service compatibility bucket.
newStateServiceCompatibleBucket = []byte("new-state-compatible")
// Migrations
migrationsBucket = []byte("migrations")
)

View File

@@ -458,7 +458,7 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
bkt = tx.Bucket(stateBucket)
// Safeguard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
return ErrDeleteFinalized
return ErrDeleteJustifiedAndFinalized
}
// Nothing to delete if state doesn't exist.

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/proto/dbval"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/sirupsen/logrus"
)
// SaveOrigin loads an ssz serialized Block & BeaconState from an io.Reader
@@ -28,11 +27,7 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
return fmt.Errorf("config mismatch, beacon node configured to connect to %s, detected state is for %s", params.BeaconConfig().ConfigName, cf.Config.ConfigName)
}
log.WithFields(logrus.Fields{
"configName": cf.Config.ConfigName,
"forkName": version.String(cf.Fork),
}).Info("Detected supported config for state & block version")
log.Infof("detected supported config for state & block version, config name=%s, fork name=%s", cf.Config.ConfigName, version.String(cf.Fork))
state, err := cf.UnmarshalBeaconState(serState)
if err != nil {
return errors.Wrap(err, "failed to initialize origin state w/ bytes + config+fork")
@@ -62,13 +57,13 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
}
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint block to db")
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
if err := s.SaveBlock(ctx, wblk); err != nil {
return errors.Wrap(err, "could not save checkpoint block")
}
// save state
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Info("Calling SaveState")
log.Infof("calling SaveState w/ blockRoot=%x", blockRoot)
if err = s.SaveState(ctx, state, blockRoot); err != nil {
return errors.Wrap(err, "could not save state")
}

View File

@@ -22,14 +22,7 @@ func Restore(cliCtx *cli.Context) error {
targetDir := cliCtx.String(cmd.RestoreTargetDirFlag.Name)
restoreDir := path.Join(targetDir, kv.BeaconNodeDbDirName)
restoreFile := path.Join(restoreDir, kv.DatabaseFileName)
dbExists, err := file.Exists(restoreFile, file.Regular)
if err != nil {
return errors.Wrapf(err, "could not check if database exists in %s", restoreFile)
}
if dbExists {
if file.Exists(path.Join(restoreDir, kv.DatabaseFileName)) {
resp, err := prompt.ValidatePrompt(
os.Stdin, dbExistsYesNoPrompt, prompt.ValidateYesOrNo,
)

View File

@@ -48,6 +48,7 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",

View File

@@ -23,10 +23,6 @@ import (
const (
attestationRecordKeySize = 32 // Bytes.
rootSize = 32 // Bytes.
// For database performance reasons, database read/write operations
// are chunked into batches of maximum `batchSize` elements.
batchSize = 10_000
)
// LastEpochWrittenForValidators given a list of validator indices returns the latest
@@ -263,23 +259,14 @@ func (s *Store) AttestationRecordForValidator(
// then only the first one is (arbitrarily) saved in the `attestationDataRootsBucket` bucket.
func (s *Store) SaveAttestationRecordsForValidators(
ctx context.Context,
attWrappers []*slashertypes.IndexedAttestationWrapper,
attestations []*slashertypes.IndexedAttestationWrapper,
) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveAttestationRecordsForValidators")
defer span.End()
encodedTargetEpoch := make([][]byte, len(attestations))
encodedRecords := make([][]byte, len(attestations))
attWrappersCount := len(attWrappers)
// If no attestations are provided, skip.
if attWrappersCount == 0 {
return nil
}
// Build encoded target epochs and encoded records
encodedTargetEpoch := make([][]byte, attWrappersCount)
encodedRecords := make([][]byte, attWrappersCount)
for i, attestation := range attWrappers {
for i, attestation := range attestations {
encEpoch := encodeTargetEpoch(attestation.IndexedAttestation.Data.Target.Epoch)
value, err := encodeAttestationRecord(attestation)
@@ -291,115 +278,60 @@ func (s *Store) SaveAttestationRecordsForValidators(
encodedRecords[i] = value
}
// Save attestation records in the database by batch.
for stop := attWrappersCount; stop >= 0; stop -= batchSize {
start := max(0, stop-batchSize)
return s.db.Update(func(tx *bolt.Tx) error {
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
dataRootsBkt := tx.Bucket(attestationDataRootsBucket)
attWrappersBatch := attWrappers[start:stop]
encodedTargetEpochBatch := encodedTargetEpoch[start:stop]
encodedRecordsBatch := encodedRecords[start:stop]
for i := len(attestations) - 1; i >= 0; i-- {
attestation := attestations[i]
// Perform basic check.
if len(encodedTargetEpochBatch) != len(encodedRecordsBatch) {
return fmt.Errorf(
"cannot save attestation records, got %d target epochs and %d records",
len(encodedTargetEpochBatch), len(encodedRecordsBatch),
)
}
currentBatchSize := len(encodedTargetEpochBatch)
// Save attestation records in the database.
if err := s.db.Update(func(tx *bolt.Tx) error {
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
dataRootsBkt := tx.Bucket(attestationDataRootsBucket)
for i := currentBatchSize - 1; i >= 0; i-- {
attWrapper := attWrappersBatch[i]
dataRoot := attWrapper.DataRoot
encodedTargetEpoch := encodedTargetEpochBatch[i]
encodedRecord := encodedRecordsBatch[i]
if err := attRecordsBkt.Put(dataRoot[:], encodedRecord); err != nil {
return err
}
for _, validatorIndex := range attWrapper.IndexedAttestation.AttestingIndices {
encodedIndex := encodeValidatorIndex(primitives.ValidatorIndex(validatorIndex))
key := append(encodedTargetEpoch, encodedIndex...)
if err := dataRootsBkt.Put(key, dataRoot[:]); err != nil {
return err
}
}
if err := attRecordsBkt.Put(attestation.DataRoot[:], encodedRecords[i]); err != nil {
return err
}
return nil
}); err != nil {
return errors.Wrap(err, "failed to save attestation records")
}
}
for _, valIdx := range attestation.IndexedAttestation.AttestingIndices {
encIdx := encodeValidatorIndex(primitives.ValidatorIndex(valIdx))
return nil
key := append(encodedTargetEpoch[i], encIdx...)
if err := dataRootsBkt.Put(key, attestation.DataRoot[:]); err != nil {
return err
}
}
}
return nil
})
}
// LoadSlasherChunks given a chunk kind and a disk keys, retrieves chunks for a validator
// min or max span used by slasher from our database.
func (s *Store) LoadSlasherChunks(
ctx context.Context, kind slashertypes.ChunkKind, chunkKeys [][]byte,
ctx context.Context, kind slashertypes.ChunkKind, diskKeys [][]byte,
) ([][]uint16, []bool, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.LoadSlasherChunk")
defer span.End()
keysCount := len(chunkKeys)
chunks := make([][]uint16, 0, keysCount)
exists := make([]bool, 0, keysCount)
encodedKeys := make([][]byte, 0, keysCount)
// Encode kind.
encodedKind := ssz.MarshalUint8(make([]byte, 0), uint8(kind))
// Encode keys.
for _, chunkKey := range chunkKeys {
encodedKey := append(encodedKind, chunkKey...)
encodedKeys = append(encodedKeys, encodedKey)
}
// Read chunks from the database by batch.
for start := 0; start < keysCount; start += batchSize {
stop := min(start+batchSize, len(encodedKeys))
encodedKeysBatch := encodedKeys[start:stop]
if err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(slasherChunksBucket)
for _, encodedKey := range encodedKeysBatch {
chunkBytes := bkt.Get(encodedKey)
if chunkBytes == nil {
chunks = append(chunks, []uint16{})
exists = append(exists, false)
continue
}
chunk, err := decodeSlasherChunk(chunkBytes)
if err != nil {
return err
}
chunks = append(chunks, chunk)
exists = append(exists, true)
chunks := make([][]uint16, 0)
var exists []bool
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(slasherChunksBucket)
for _, diskKey := range diskKeys {
key := append(ssz.MarshalUint8(make([]byte, 0), uint8(kind)), diskKey...)
chunkBytes := bkt.Get(key)
if chunkBytes == nil {
chunks = append(chunks, []uint16{})
exists = append(exists, false)
continue
}
return nil
}); err != nil {
return nil, nil, err
chunk, err := decodeSlasherChunk(chunkBytes)
if err != nil {
return err
}
chunks = append(chunks, chunk)
exists = append(exists, true)
}
}
return chunks, exists, nil
return nil
})
return chunks, exists, err
}
// SaveSlasherChunks given a chunk kind, list of disk keys, and list of chunks,
@@ -409,60 +341,25 @@ func (s *Store) SaveSlasherChunks(
) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveSlasherChunks")
defer span.End()
// Ensure we have the same number of keys and chunks.
if len(chunkKeys) != len(chunks) {
return fmt.Errorf(
"cannot save slasher chunks, got %d keys and %d chunks",
len(chunkKeys), len(chunks),
)
}
chunksCount := len(chunks)
// Encode kind.
encodedKind := ssz.MarshalUint8(make([]byte, 0), uint8(kind))
// Encode keys and chunks.
encodedKeys := make([][]byte, chunksCount)
encodedChunks := make([][]byte, chunksCount)
for i := 0; i < chunksCount; i++ {
chunkKey, chunk := chunkKeys[i], chunks[i]
encodedKey := append(encodedKind, chunkKey...)
encodedChunk, err := encodeSlasherChunk(chunk)
encodedKeys := make([][]byte, len(chunkKeys))
encodedChunks := make([][]byte, len(chunkKeys))
for i := 0; i < len(chunkKeys); i++ {
encodedKeys[i] = append(ssz.MarshalUint8(make([]byte, 0), uint8(kind)), chunkKeys[i]...)
encodedChunk, err := encodeSlasherChunk(chunks[i])
if err != nil {
return errors.Wrapf(err, "failed to encode slasher chunk for key %v", chunkKey)
return err
}
encodedKeys[i] = encodedKey
encodedChunks[i] = encodedChunk
}
// Save chunks in the database by batch.
for start := 0; start < chunksCount; start += batchSize {
stop := min(start+batchSize, len(encodedKeys))
encodedKeysBatch := encodedKeys[start:stop]
encodedChunksBatch := encodedChunks[start:stop]
batchSize := len(encodedKeysBatch)
if err := s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(slasherChunksBucket)
for i := 0; i < batchSize; i++ {
if err := bkt.Put(encodedKeysBatch[i], encodedChunksBatch[i]); err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(slasherChunksBucket)
for i := 0; i < len(chunkKeys); i++ {
if err := bkt.Put(encodedKeys[i], encodedChunks[i]); err != nil {
return err
}
return nil
}); err != nil {
return errors.Wrap(err, "failed to save slasher chunks")
}
}
return nil
return nil
})
}
// CheckDoubleBlockProposals takes in a list of proposals and for each,

View File

@@ -14,56 +14,33 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) {
const attestationsCount = 11_000
// Create context.
ctx := context.Background()
// Create database.
beaconDB := setupDB(t)
// Define the validator index.
validatorIndex := primitives.ValidatorIndex(1)
// Defines attestations to save and retrieve.
attWrappers := make([]*slashertypes.IndexedAttestationWrapper, attestationsCount)
for i := 0; i < attestationsCount; i++ {
var dataRoot [32]byte
binary.LittleEndian.PutUint64(dataRoot[:], uint64(i))
attWrapper := createAttestationWrapper(
primitives.Epoch(i),
primitives.Epoch(i+1),
[]uint64{uint64(validatorIndex)},
dataRoot[:],
)
attWrappers[i] = attWrapper
}
// Check on a sample of validators that no attestation records are available.
for i := 0; i < attestationsCount; i += 100 {
attRecord, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
require.NoError(t, err)
require.Equal(t, true, attRecord == nil)
}
// Save the attestation records to the database.
err := beaconDB.SaveAttestationRecordsForValidators(ctx, attWrappers)
valIdx := primitives.ValidatorIndex(1)
target := primitives.Epoch(5)
source := primitives.Epoch(4)
attRecord, err := beaconDB.AttestationRecordForValidator(ctx, valIdx, target)
require.NoError(t, err)
require.Equal(t, true, attRecord == nil)
// Check on a sample of validators that attestation records are available.
for i := 0; i < attestationsCount; i += 100 {
expected := attWrappers[i]
actual, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
require.NoError(t, err)
require.DeepEqual(t, expected.IndexedAttestation.Data.Source.Epoch, actual.IndexedAttestation.Data.Source.Epoch)
}
sr := [32]byte{1}
err = beaconDB.SaveAttestationRecordsForValidators(
ctx,
[]*slashertypes.IndexedAttestationWrapper{
createAttestationWrapper(source, target, []uint64{uint64(valIdx)}, sr[:]),
},
)
require.NoError(t, err)
attRecord, err = beaconDB.AttestationRecordForValidator(ctx, valIdx, target)
require.NoError(t, err)
assert.DeepEqual(t, target, attRecord.IndexedAttestation.Data.Target.Epoch)
assert.DeepEqual(t, source, attRecord.IndexedAttestation.Data.Source.Epoch)
assert.DeepEqual(t, sr, attRecord.DataRoot)
}
func TestStore_LastEpochWrittenForValidators(t *testing.T) {
@@ -161,113 +138,61 @@ func TestStore_CheckAttesterDoubleVotes(t *testing.T) {
}
func TestStore_SlasherChunk_SaveRetrieve(t *testing.T) {
// Define test parameters.
const (
elemsPerChunk = 16
totalChunks = 11_000
)
// Create context.
ctx := context.Background()
// Create database.
beaconDB := setupDB(t)
// Create min chunk keys and chunks.
minChunkKeys := make([][]byte, totalChunks)
minChunks := make([][]uint16, totalChunks)
elemsPerChunk := 16
totalChunks := 64
chunkKeys := make([][]byte, totalChunks)
chunks := make([][]uint16, totalChunks)
for i := 0; i < totalChunks; i++ {
// Create chunk key.
chunkKey := ssz.MarshalUint64(make([]byte, 0), uint64(i))
minChunkKeys[i] = chunkKey
// Create chunk.
chunk := make([]uint16, elemsPerChunk)
for j := 0; j < len(chunk); j++ {
chunk[j] = uint16(i + j)
chunk[j] = uint16(0)
}
minChunks[i] = chunk
chunks[i] = chunk
chunkKeys[i] = ssz.MarshalUint64(make([]byte, 0), uint64(i))
}
// Create max chunk keys and chunks.
maxChunkKeys := make([][]byte, totalChunks)
maxChunks := make([][]uint16, totalChunks)
for i := 0; i < totalChunks; i++ {
// Create chunk key.
chunkKey := ssz.MarshalUint64(make([]byte, 0), uint64(i+1))
maxChunkKeys[i] = chunkKey
// Create chunk.
chunk := make([]uint16, elemsPerChunk)
for j := 0; j < len(chunk); j++ {
chunk[j] = uint16(i + j + 1)
}
maxChunks[i] = chunk
}
// Save chunks for min spans.
err := beaconDB.SaveSlasherChunks(ctx, slashertypes.MinSpan, minChunkKeys, minChunks)
// We save chunks for min spans.
err := beaconDB.SaveSlasherChunks(ctx, slashertypes.MinSpan, chunkKeys, chunks)
require.NoError(t, err)
// Expect no chunks to be stored for max spans.
// We expect no chunks to be stored for max spans.
_, chunksExist, err := beaconDB.LoadSlasherChunks(
ctx, slashertypes.MaxSpan, minChunkKeys,
ctx, slashertypes.MaxSpan, chunkKeys,
)
require.NoError(t, err)
require.Equal(t, len(minChunks), len(chunksExist))
require.Equal(t, len(chunks), len(chunksExist))
for _, exists := range chunksExist {
require.Equal(t, false, exists)
}
// Check the right chunks are saved.
// We check we saved the right chunks.
retrievedChunks, chunksExist, err := beaconDB.LoadSlasherChunks(
ctx, slashertypes.MinSpan, minChunkKeys,
ctx, slashertypes.MinSpan, chunkKeys,
)
require.NoError(t, err)
require.Equal(t, len(minChunks), len(retrievedChunks))
require.Equal(t, len(minChunks), len(chunksExist))
require.Equal(t, len(chunks), len(retrievedChunks))
require.Equal(t, len(chunks), len(chunksExist))
for i, exists := range chunksExist {
require.Equal(t, true, exists)
require.DeepEqual(t, minChunks[i], retrievedChunks[i])
require.DeepEqual(t, chunks[i], retrievedChunks[i])
}
// Save chunks for max spans.
err = beaconDB.SaveSlasherChunks(ctx, slashertypes.MaxSpan, maxChunkKeys, maxChunks)
// We save chunks for max spans.
err = beaconDB.SaveSlasherChunks(ctx, slashertypes.MaxSpan, chunkKeys, chunks)
require.NoError(t, err)
// Check right chunks are saved.
// We check we saved the right chunks.
retrievedChunks, chunksExist, err = beaconDB.LoadSlasherChunks(
ctx, slashertypes.MaxSpan, maxChunkKeys,
ctx, slashertypes.MaxSpan, chunkKeys,
)
require.NoError(t, err)
require.Equal(t, len(maxChunks), len(retrievedChunks))
require.Equal(t, len(maxChunks), len(chunksExist))
require.Equal(t, len(chunks), len(retrievedChunks))
require.Equal(t, len(chunks), len(chunksExist))
for i, exists := range chunksExist {
require.Equal(t, true, exists)
require.DeepEqual(t, maxChunks[i], retrievedChunks[i])
}
// Check the right chunks are still saved for min span.
retrievedChunks, chunksExist, err = beaconDB.LoadSlasherChunks(
ctx, slashertypes.MinSpan, minChunkKeys,
)
require.NoError(t, err)
require.Equal(t, len(minChunks), len(retrievedChunks))
require.Equal(t, len(minChunks), len(chunksExist))
for i, exists := range chunksExist {
require.Equal(t, true, exists)
require.DeepEqual(t, minChunks[i], retrievedChunks[i])
require.DeepEqual(t, chunks[i], retrievedChunks[i])
}
}

View File

@@ -110,7 +110,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
return nil, errors.Wrap(errBlockTimeTooLate, fmt.Sprintf("(%d > %d)", time, latestBlkTime))
}
// Initialize a pointer to eth1 chain's history to start our search from.
cursorNum := new(big.Int).SetUint64(latestBlkHeight)
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
cursorTime := latestBlkTime
var numOfBlocks uint64
@@ -156,15 +156,15 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
}
if cursorTime > time {
return s.findMaxTargetEth1Block(ctx, new(big.Int).SetUint64(estimatedBlk), time)
return s.findMaxTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
}
return s.findMinTargetEth1Block(ctx, new(big.Int).SetUint64(estimatedBlk), time)
return s.findMinTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
}
// Performs a search to find a target eth1 block which is earlier than or equal to the
// target time. This method is used when head.time > targetTime
func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := upperBoundBlk; ; bn = new(big.Int).Sub(bn, big.NewInt(1)) {
for bn := upperBoundBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -181,7 +181,7 @@ func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big
// Performs a search to find a target eth1 block which is just earlier than or equal to the
// target time. This method is used when head.time < targetTime
func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := lowerBoundBlk; ; bn = new(big.Int).Add(bn, big.NewInt(1)) {
for bn := lowerBoundBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -201,7 +201,7 @@ func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big
}
func (s *Service) retrieveHeaderInfo(ctx context.Context, bNum uint64) (*types.HeaderInfo, error) {
bn := new(big.Int).SetUint64(bNum)
bn := big.NewInt(0).SetUint64(bNum)
exists, info, err := s.headerCache.HeaderInfoByHeight(bn)
if err != nil {
return nil, err

View File

@@ -23,6 +23,9 @@ var (
ErrInvalidPayloadAttributes = errors.New("payload attributes are invalid / inconsistent")
// ErrUnknownPayloadStatus when the payload status is unknown.
ErrUnknownPayloadStatus = errors.New("unknown payload status")
// ErrConfigMismatch when the execution node's terminal total difficulty or
// terminal block hash received via the API mismatches Prysm's configuration value.
ErrConfigMismatch = errors.New("execution client configuration mismatch")
// ErrAcceptedSyncingPayloadStatus when the status of the payload is syncing or accepted.
ErrAcceptedSyncingPayloadStatus = errors.New("payload status is SYNCING or ACCEPTED")
// ErrInvalidPayloadStatus when the status of the payload is invalid.

View File

@@ -269,7 +269,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
}
log.WithFields(logrus.Fields{
"chainStartTime": chainStartTime,
"ChainStartTime": chainStartTime,
}).Info("Minimum number of validators reached for beacon-chain to start")
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.ChainStarted,
@@ -298,7 +298,9 @@ func (s *Service) processPastLogs(ctx context.Context) error {
// Start from the deployment block if our last requested block
// is behind it. This is as the deposit logs can only start from the
// block of the deployment of the deposit contract.
currentBlockNum = max(currentBlockNum, deploymentBlock)
if deploymentBlock > currentBlockNum {
currentBlockNum = deploymentBlock
}
// To store all blocks.
headersMap := make(map[uint64]*types.HeaderInfo)
rawLogCount, err := s.depositContractCaller.GetDepositCount(&bind.CallOpts{})
@@ -382,13 +384,15 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
end := currentBlockNum + batchSize
// Appropriately bound the request, as we do not
// want request blocks beyond the current follow distance.
end = min(end, latestFollowHeight)
if end > latestFollowHeight {
end = latestFollowHeight
}
query := ethereum.FilterQuery{
Addresses: []common.Address{
s.cfg.depositContractAddr,
},
FromBlock: new(big.Int).SetUint64(start),
ToBlock: new(big.Int).SetUint64(end),
FromBlock: big.NewInt(0).SetUint64(start),
ToBlock: big.NewInt(0).SetUint64(end),
}
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
// only change the end block if the remaining logs are below the required log limit.
@@ -396,7 +400,7 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
withinLimit := remainingLogs < depositLogRequestLimit
aboveFollowHeight := end >= latestFollowHeight
if withinLimit && aboveFollowHeight {
query.ToBlock = new(big.Int).SetUint64(latestFollowHeight)
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
end = latestFollowHeight
}
logs, err := s.httpLogger.FilterLogs(ctx, query)
@@ -478,11 +482,11 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
}
for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ {
// Cache eth1 block header here.
_, err := s.BlockHashByHeight(ctx, new(big.Int).SetUint64(i))
_, err := s.BlockHashByHeight(ctx, big.NewInt(0).SetUint64(i))
if err != nil {
return err
}
err = s.ProcessETH1Block(ctx, new(big.Int).SetUint64(i))
err = s.ProcessETH1Block(ctx, big.NewInt(0).SetUint64(i))
if err != nil {
return err
}

View File

@@ -77,7 +77,7 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
if currClient != nil {
currClient.Close()
}
log.WithField("endpoint", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url)).Info("Connected to new endpoint")
log.Infof("Connected to new endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
return
case <-s.ctx.Done():
log.Debug("Received cancelled context,closing existing powchain service")

View File

@@ -415,11 +415,14 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*types.Hea
requestRange := (endBlock - startBlock) + 1
elems := make([]gethRPC.BatchElem, 0, requestRange)
headers := make([]*types.HeaderInfo, 0, requestRange)
if requestRange == 0 {
return headers, nil
}
for i := startBlock; i <= endBlock; i++ {
header := &types.HeaderInfo{}
elems = append(elems, gethRPC.BatchElem{
Method: "eth_getBlockByNumber",
Args: []interface{}{hexutil.EncodeBig(new(big.Int).SetUint64(i)), false},
Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(i)), false},
Result: header,
Error: error(nil),
})
@@ -580,9 +583,6 @@ func (s *Service) run(done <-chan struct{}) {
s.runError = nil
s.initPOWService()
// Do not keep storing the finalized state as it is
// no longer of use.
s.removeStartupState()
chainstartTicker := time.NewTicker(logPeriod)
defer chainstartTicker.Stop()
@@ -636,7 +636,7 @@ func (s *Service) logTillChainStart(ctx context.Context) {
}
fields := logrus.Fields{
"additionalValidatorsNeeded": valNeeded,
"Additional validators needed": valNeeded,
}
if secondsLeft > 0 {
fields["Generating genesis state in"] = time.Duration(secondsLeft) * time.Second
@@ -672,7 +672,9 @@ func (s *Service) cacheBlockHeaders(start, end uint64) error {
// the allotted limit.
endReq -= 1
}
endReq = min(endReq, end)
if endReq > end {
endReq = end
}
// We call batchRequestHeaders for its header caching side-effect, so we don't need the return value.
_, err := s.batchRequestHeaders(startReq, endReq)
if err != nil {
@@ -908,7 +910,3 @@ func (s *Service) migrateOldDepositTree(eth1DataInDB *ethpb.ETH1ChainData) error
s.depositTrie = newDepositTrie
return nil
}
func (s *Service) removeStartupState() {
s.cfg.finalizedStateAtStartup = nil
}

View File

@@ -82,15 +82,6 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
return
}
// Return early if we are checking before 10 seconds into the slot
secs, err := slots.SecondsSinceSlotStart(head.slot, f.store.genesisTime, uint64(time.Now().Unix()))
if err != nil {
log.WithError(err).Error("could not check current slot")
return true
}
if secs < ProcessAttestationsThreshold {
return true
}
// Only orphan a block if the parent LMD vote is strong
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return

View File

@@ -85,19 +85,11 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent = saved
})
t.Run("parent is weak early call", func(t *testing.T) {
t.Run("parent is weak", func(t *testing.T) {
saved := f.store.headNode.parent.weight
f.store.headNode.parent.weight = 0
require.Equal(t, true, f.ShouldOverrideFCU())
f.store.headNode.parent.weight = saved
})
t.Run("parent is weak late call", func(t *testing.T) {
saved := f.store.headNode.parent.weight
driftGenesisTime(f, 2, 11)
f.store.headNode.parent.weight = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.weight = saved
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
})
t.Run("Head is strong", func(t *testing.T) {
f.store.headNode.weight = f.store.committeeWeight

View File

@@ -11,6 +11,7 @@ import (
// MuxConfig contains configuration that should be used when registering the beacon node in the gateway.
type MuxConfig struct {
Handler gateway.MuxHandler
EthPbMux *gateway.PbMux
V1AlphaPbMux *gateway.PbMux
}

View File

@@ -44,11 +44,11 @@ func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.A
// logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target)
func logMessageTimelyFlagsForIndex(idx primitives.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
return logrus.Fields{
"validatorIndex": idx,
"slot": data.Slot,
"source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
"target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
"head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
"ValidatorIndex": idx,
"Slot": data.Slot,
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
}
}
@@ -146,12 +146,12 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
aggregatedPerf.totalCorrectTarget++
}
}
logFields["correctHead"] = latestPerf.timelyHead
logFields["correctSource"] = latestPerf.timelySource
logFields["correctTarget"] = latestPerf.timelyTarget
logFields["inclusionSlot"] = latestPerf.inclusionSlot
logFields["newBalance"] = balance
logFields["balanceChange"] = balanceChg
logFields["CorrectHead"] = latestPerf.timelyHead
logFields["CorrectSource"] = latestPerf.timelySource
logFields["CorrectTarget"] = latestPerf.timelyTarget
logFields["InclusionSlot"] = latestPerf.inclusionSlot
logFields["NewBalance"] = balance
logFields["BalanceChange"] = balanceChg
s.latestPerformance[primitives.ValidatorIndex(idx)] = latestPerf
s.aggregatedPerformance[primitives.ValidatorIndex(idx)] = aggregatedPerf
@@ -167,7 +167,7 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil {
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping unaggregated attestation due to state not found in cache")
return
}
@@ -190,13 +190,13 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
defer s.Unlock()
if s.trackedIndex(att.AggregatorIndex) {
log.WithFields(logrus.Fields{
"aggregatorIndex": att.AggregatorIndex,
"slot": att.Aggregate.Data.Slot,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
"AggregatorIndex": att.AggregatorIndex,
"Slot": att.Aggregate.Data.Slot,
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.BeaconBlockRoot)),
"sourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
"SourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Source.Root)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
"TargetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Target.Root)),
}).Info("Processed attestation aggregation")
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
@@ -209,7 +209,7 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil {
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping aggregated attestation due to state not found in cache")
return
}

View File

@@ -55,8 +55,8 @@ func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
s.processIncludedAttestation(context.Background(), state, att)
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}
@@ -124,8 +124,8 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
}
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processUnaggregatedAttestation(context.Background(), att)
wanted1 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}
@@ -162,7 +162,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
},
}
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x000000000000 prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
require.LogsContain(t, hook, "Skipping aggregated attestation due to state not found in cache")
logrus.SetLevel(logrus.InfoLevel)
}
@@ -200,9 +200,9 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x68656c6c6f2d prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
require.LogsContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2")
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12")
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
}
func TestProcessAttestations(t *testing.T) {
@@ -240,8 +240,8 @@ func TestProcessAttestations(t *testing.T) {
wrappedBlock, err := blocks.NewBeaconBlock(block)
require.NoError(t, err)
s.processAttestations(ctx, state, wrappedBlock)
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)

View File

@@ -39,7 +39,7 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
}
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil {
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping block collection due to state not found in cache")
return
}
@@ -90,13 +90,13 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b
parentRoot := blk.ParentRoot()
log.WithFields(logrus.Fields{
"proposerIndex": blk.ProposerIndex(),
"slot": blk.Slot(),
"version": blk.Version(),
"parentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
"newBalance": balance,
"balanceChange": balanceChg,
"ProposerIndex": blk.ProposerIndex(),
"Slot": blk.Slot(),
"Version": blk.Version(),
"ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
"BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
"NewBalance": balance,
"BalanceChange": balanceChg,
}).Info("Proposed beacon block was included")
}
}
@@ -109,11 +109,11 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
idx := slashing.Header_1.Header.ProposerIndex
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"proposerIndex": idx,
"slot": blk.Slot(),
"slashingSlot": slashing.Header_1.Header.Slot,
"bodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
"bodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
"ProposerIndex": idx,
"Slot": blk.Slot(),
"SlashingSlot": slashing.Header_1.Header.Slot,
"BodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
"BodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
}).Info("Proposer slashing was included")
}
}
@@ -122,16 +122,16 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
if s.trackedIndex(primitives.ValidatorIndex(idx)) {
log.WithFields(logrus.Fields{
"attesterIndex": idx,
"blockInclusionSlot": blk.Slot(),
"attestationSlot1": slashing.Attestation_1.Data.Slot,
"beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
"sourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
"targetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
"attestationSlot2": slashing.Attestation_2.Data.Slot,
"beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
"sourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
"targetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
"AttesterIndex": idx,
"BlockInclusionSlot": blk.Slot(),
"AttestationSlot1": slashing.Attestation_1.Data.Slot,
"BeaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
"AttestationSlot2": slashing.Attestation_2.Data.Slot,
"BeaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
}).Info("Attester slashing was included")
}
}
@@ -159,19 +159,19 @@ func (s *Service) logAggregatedPerformance() {
percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount)
log.WithFields(logrus.Fields{
"validatorIndex": idx,
"startEpoch": p.startEpoch,
"startBalance": p.startBalance,
"totalRequested": p.totalRequestedCount,
"attestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
"balanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
"correctlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
"correctlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
"correctlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
"averageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
"totalProposedBlocks": p.totalProposedCount,
"totalAggregations": p.totalAggregations,
"totalSyncContributions": p.totalSyncCommitteeContributions,
"ValidatorIndex": idx,
"StartEpoch": p.startEpoch,
"StartBalance": p.startBalance,
"TotalRequested": p.totalRequestedCount,
"AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
"BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
"CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
"CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
"CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
"TotalProposedBlocks": p.totalProposedCount,
"TotalAggregations": p.totalAggregations,
"TotalSyncContributions": p.totalSyncCommitteeContributions,
}).Info("Aggregated performance since launch")
}
}

View File

@@ -44,7 +44,7 @@ func TestProcessSlashings(t *testing.T) {
},
},
},
wantedErr: "\"Proposer slashing was included\" bodyRoot1= bodyRoot2= prefix=monitor proposerIndex=2",
wantedErr: "\"Proposer slashing was included\" BodyRoot1= BodyRoot2= ProposerIndex=2",
},
{
name: "Proposer slashing an untracked index",
@@ -89,8 +89,8 @@ func TestProcessSlashings(t *testing.T) {
},
},
},
wantedErr: "\"Attester slashing was included\" attestationSlot1=0 attestationSlot2=0 attesterIndex=1 " +
"beaconBlockRoot1=0x000000000000 beaconBlockRoot2=0x000000000000 blockInclusionSlot=0 prefix=monitor sourceEpoch1=1 sourceEpoch2=0 targetEpoch1=0 targetEpoch2=0",
wantedErr: "\"Attester slashing was included\" AttestationSlot1=0 AttestationSlot2=0 AttesterIndex=1 " +
"BeaconBlockRoot1=0x000000000000 BeaconBlockRoot2=0x000000000000 BlockInclusionSlot=0 SourceEpoch1=1 SourceEpoch2=0 TargetEpoch1=0 TargetEpoch2=0",
},
{
name: "Attester slashing untracked index",
@@ -150,7 +150,7 @@ func TestProcessProposedBlock(t *testing.T) {
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
Body: &ethpb.BeaconBlockBody{},
},
wantedErr: "\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=0x68656c6c6f2d newBalance=32000000000 parentRoot=0x68656c6c6f2d prefix=monitor proposerIndex=12 slot=6 version=0",
wantedErr: "\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=0x68656c6c6f2d NewBalance=32000000000 ParentRoot=0x68656c6c6f2d ProposerIndex=12 Slot=6 Version=0 prefix=monitor",
},
{
name: "Block proposed by untracked validator",
@@ -225,10 +225,10 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
root, err := b.GetBlock().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" bodyRoot1=0x000100000000 bodyRoot2=0x000200000000 prefix=monitor proposerIndex=%d slashingSlot=0 slot=1", idx)
wanted3 := "\"Sync committee contribution included\" balanceChange=0 contribCount=3 expectedContribCount=3 newBalance=32000000000 prefix=monitor validatorIndex=1"
wanted4 := "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=1 newBalance=32000000000 prefix=monitor validatorIndex=2"
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" BodyRoot1=0x000100000000 BodyRoot2=0x000200000000 ProposerIndex=%d SlashingSlot=0 Slot=1 prefix=monitor", idx)
wanted3 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=3 ExpectedContribCount=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor"
wanted4 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor"
wrapped, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
s.processBlock(ctx, wrapped)
@@ -278,10 +278,10 @@ func TestLogAggregatedPerformance(t *testing.T) {
}
s.logAggregatedPerformance()
wanted := "\"Aggregated performance since launch\" attestationInclusion=\"80.00%\"" +
" averageInclusionDistance=1.2 balanceChangePct=\"0.95%\" correctlyVotedHeadPct=\"66.67%\" " +
"correctlyVotedSourcePct=\"91.67%\" correctlyVotedTargetPct=\"100.00%\" prefix=monitor startBalance=31700000000 " +
"startEpoch=0 totalAggregations=0 totalProposedBlocks=1 totalRequested=15 totalSyncContributions=0 " +
"validatorIndex=1"
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" +
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " +
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " +
"StartEpoch=0 TotalAggregations=0 TotalProposedBlocks=1 TotalRequested=15 TotalSyncContributions=0 " +
"ValidatorIndex=1 prefix=monitor"
require.LogsContain(t, hook, wanted)
}

View File

@@ -14,8 +14,8 @@ func (s *Service) processExitsFromBlock(blk interfaces.ReadOnlyBeaconBlock) {
idx := exit.Exit.ValidatorIndex
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"validatorIndex": idx,
"slot": blk.Slot(),
"ValidatorIndex": idx,
"Slot": blk.Slot(),
}).Info("Voluntary exit was included")
}
}
@@ -28,7 +28,7 @@ func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
defer s.RUnlock()
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"validatorIndex": idx,
"ValidatorIndex": idx,
}).Info("Voluntary exit was processed")
}
}

View File

@@ -43,7 +43,7 @@ func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
wb, err := blocks.NewBeaconBlock(block)
require.NoError(t, err)
s.processExitsFromBlock(wb)
require.LogsContain(t, hook, "\"Voluntary exit was included\" prefix=monitor slot=0 validatorIndex=2")
require.LogsContain(t, hook, "\"Voluntary exit was included\" Slot=0 ValidatorIndex=2")
}
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
@@ -99,7 +99,7 @@ func TestProcessExitP2PTrackedIndices(t *testing.T) {
Signature: make([]byte, 96),
}
s.processExit(exit)
require.LogsContain(t, hook, "\"Voluntary exit was processed\" prefix=monitor validatorIndex=1")
require.LogsContain(t, hook, "\"Voluntary exit was processed\" ValidatorIndex=1")
}
func TestProcessExitP2PUntrackedIndices(t *testing.T) {

View File

@@ -21,7 +21,7 @@ func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedCon
aggPerf.totalSyncCommitteeAggregations++
s.aggregatedPerformance[idx] = aggPerf
log.WithField("validatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
}
}
@@ -69,11 +69,11 @@ func (s *Service) processSyncAggregate(state state.BeaconState, blk interfaces.R
fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib))
log.WithFields(logrus.Fields{
"validatorIndex": validatorIdx,
"expectedContribCount": len(committeeIndices),
"contribCount": contrib,
"newBalance": balance,
"balanceChange": balanceChg,
"ValidatorIndex": validatorIdx,
"ExpectedContribCount": len(committeeIndices),
"ContribCount": contrib,
"NewBalance": balance,
"BalanceChange": balanceChg,
}).Info("Sync committee contribution included")
}
}

View File

@@ -22,8 +22,8 @@ func TestProcessSyncCommitteeContribution(t *testing.T) {
}
s.processSyncCommitteeContribution(contrib)
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" prefix=monitor validatorIndex=1")
require.LogsDoNotContain(t, hook, "validatorIndex=2")
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1")
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
}
func TestProcessSyncAggregate(t *testing.T) {
@@ -53,7 +53,7 @@ func TestProcessSyncAggregate(t *testing.T) {
require.NoError(t, err)
s.processSyncAggregate(beaconState, wrappedBlock)
require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=4 newBalance=32000000000 prefix=monitor validatorIndex=1")
require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=100000000 contribCount=2 expectedContribCount=2 newBalance=32000000000 prefix=monitor validatorIndex=12")
require.LogsDoNotContain(t, hook, "validatorIndex=2")
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor")
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 ContribCount=2 ExpectedContribCount=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor")
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
}

View File

@@ -111,7 +111,7 @@ func (s *Service) Start() {
sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] })
log.WithFields(logrus.Fields{
"validatorIndices": tracked,
"ValidatorIndices": tracked,
}).Info("Starting service")
go s.run()
@@ -134,7 +134,7 @@ func (s *Service) run() {
}
epoch := slots.ToEpoch(st.Slot())
log.WithField("epoch", epoch).Info("Synced to head epoch, starting reporting performance")
log.WithField("Epoch", epoch).Info("Synced to head epoch, starting reporting performance")
s.Lock()
s.initializePerformanceStructures(st, epoch)
@@ -157,7 +157,7 @@ func (s *Service) initializePerformanceStructures(state state.BeaconState, epoch
for idx := range s.TrackedValidators {
balance, err := state.BalanceAtIndex(idx)
if err != nil {
log.WithError(err).WithField("validatorIndex", idx).Error(
log.WithError(err).WithField("ValidatorIndex", idx).Error(
"Could not fetch starting balance, skipping aggregated logs.")
balance = 0
}
@@ -276,7 +276,7 @@ func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) {
for idx := range s.TrackedValidators {
syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx)
if err != nil {
log.WithError(err).WithField("validatorIndex", idx).Error(
log.WithError(err).WithField("ValidatorIndex", idx).Error(
"Sync committee assignments will not be reported")
delete(s.trackedSyncCommitteeIndices, idx)
} else if len(syncIdx) == 0 {

View File

@@ -148,7 +148,7 @@ func TestStart(t *testing.T) {
// wait for Logrus
time.Sleep(1000 * time.Millisecond)
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
require.LogsContain(t, hook, "\"Starting service\" prefix=monitor validatorIndices=\"[1 2 12 15]\"")
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"")
s.Lock()
require.Equal(t, s.isLogging, true, "monitor is not running")
s.Unlock()
@@ -237,7 +237,7 @@ func TestMonitorRoutine(t *testing.T) {
// Wait for Logrus
time.Sleep(1000 * time.Millisecond)
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
require.LogsContain(t, hook, wanted1)
}

View File

@@ -151,19 +151,19 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) {
c := params.BeaconConfig()
c.TerminalTotalDifficulty = cliCtx.String(flags.TerminalTotalDifficultyOverride.Name)
log.WithField("terminalBlockDifficulty", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
log.WithField("terminal block difficult", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
params.OverrideBeaconConfig(c)
}
if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) {
c := params.BeaconConfig()
c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name))
log.WithField("terminalBlockHash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
log.WithField("terminal block hash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
params.OverrideBeaconConfig(c)
}
if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) {
c := params.BeaconConfig()
c.TerminalBlockHashActivationEpoch = primitives.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
log.WithField("terminalBlockHashActivationEpoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
log.WithField("terminal block hash activation epoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
params.OverrideBeaconConfig(c)
}

View File

@@ -7,11 +7,9 @@ import (
"bytes"
"context"
"fmt"
"net"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
@@ -120,7 +118,6 @@ type BeaconNode struct {
BackfillOpts []backfill.ServiceOption
initialSyncComplete chan struct{}
BlobStorage *filesystem.BlobStorage
BlobStorageOptions []filesystem.BlobStorageOption
blobRetentionEpochs primitives.Epoch
verifyInitWaiter *verification.InitializerWaiter
syncChecker *initialsync.SyncChecker
@@ -129,16 +126,51 @@ type BeaconNode struct {
// New creates a new node instance, sets up configuration options, and registers
// every required service to the node.
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
if err := configureBeacon(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not set beacon configuration options")
if err := configureTracing(cliCtx); err != nil {
return nil, err
}
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
return nil, fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
}
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
return nil, err
}
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
return nil, err
}
flags.ConfigureGlobalFlags(cliCtx)
if err := configureChainConfig(cliCtx); err != nil {
return nil, err
}
if err := configureHistoricalSlasher(cliCtx); err != nil {
return nil, err
}
err := configureBuilderCircuitBreaker(cliCtx)
if err != nil {
return nil, err
}
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
return nil, err
}
if err := configureEth1Config(cliCtx); err != nil {
return nil, err
}
configureNetwork(cliCtx)
if err := configureInteropConfig(cliCtx); err != nil {
return nil, err
}
if err := configureExecutionSetting(cliCtx); err != nil {
return nil, err
}
configureFastSSZHashingAlgorithm()
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()
registry := runtime.NewServiceRegistry()
ctx := cliCtx.Context
ctx := cliCtx.Context
beacon := &BeaconNode{
cliCtx: cliCtx,
ctx: ctx,
@@ -158,10 +190,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
slasherBlockHeadersFeed: new(event.Feed),
slasherAttestationsFeed: new(event.Feed),
serviceFlagOpts: &serviceFlagOpts{},
initialSyncComplete: make(chan struct{}),
syncChecker: &initialsync.SyncChecker{},
}
beacon.initialSyncComplete = make(chan struct{})
beacon.syncChecker = &initialsync.SyncChecker{}
for _, opt := range opts {
if err := opt(beacon); err != nil {
return nil, err
@@ -170,46 +202,119 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
synchronizer := startup.NewClockSynchronizer()
beacon.clockWaiter = synchronizer
beacon.forkChoicer = doublylinkedtree.New()
beacon.forkChoicer = doublylinkedtree.New()
depositAddress, err := execution.DepositContractAddress()
if err != nil {
return nil, err
}
// Allow tests to set it as an opt.
if beacon.BlobStorage == nil {
beacon.BlobStorageOptions = append(beacon.BlobStorageOptions, filesystem.WithSaveFsync(features.Get().BlobSaveFsync))
blobs, err := filesystem.NewBlobStorage(beacon.BlobStorageOptions...)
if err != nil {
return nil, err
}
beacon.BlobStorage = blobs
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, err
}
beacon.BlobStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
return nil, err
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
return nil, errors.Wrap(err, "backfill status initialization error")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
if errors.Is(err, stategen.ErrNoGenesisBlock) {
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
"state bundled in the application. You must provide the --%s or --%s flag to load "+
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
}
return nil, err
}
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
beacon.BackfillOpts = append(
beacon.BackfillOpts,
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
)
beacon.BackfillOpts = append(beacon.BackfillOpts, backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)))
bf, err := backfill.NewService(ctx, bfs, beacon.BlobStorage, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
if err != nil {
return nil, errors.Wrap(err, "error initializing backfill service")
}
if err := beacon.services.RegisterService(bf); err != nil {
return nil, errors.Wrap(err, "error registering backfill service")
}
if err := registerServices(cliCtx, beacon, synchronizer, bf, bfs); err != nil {
return nil, errors.Wrap(err, "could not register services")
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return nil, err
}
log.Debugln("Registering Attestation Pool Service")
if err := beacon.registerAttestationPool(); err != nil {
return nil, err
}
log.Debugln("Registering Deterministic Genesis Service")
if err := beacon.registerDeterministicGenesisService(); err != nil {
return nil, err
}
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
return nil, err
}
log.Debugln("Registering Initial Sync Service")
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
return nil, err
}
log.Debugln("Registering Sync Service")
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
return nil, err
}
log.Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return nil, err
}
log.Debugln("Registering builder service")
if err := beacon.registerBuilderService(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering RPC Service")
router := newRouter(cliCtx)
if err := beacon.registerRPCService(router); err != nil {
return nil, err
}
log.Debugln("Registering GRPC Gateway Service")
if err := beacon.registerGRPCGateway(router); err != nil {
return nil, err
}
log.Debugln("Registering Validator Monitoring Service")
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
return nil, err
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
log.Debugln("Registering Prometheus Service")
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return nil, err
}
}
// db.DatabasePath is the path to the containing directory
@@ -221,176 +326,8 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
}
beacon.collector = c
// Do not store the finalized state as it has been provided to the respective services during
// their initialization.
beacon.finalizedStateAtStartUp = nil
return beacon, nil
}
func configureBeacon(cliCtx *cli.Context) error {
if err := configureTracing(cliCtx); err != nil {
return errors.Wrap(err, "could not configure tracing")
}
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
return fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
}
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
return errors.Wrap(err, "could not configure beacon chain")
}
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
return errors.Wrap(err, "could not configure beacon chain")
}
flags.ConfigureGlobalFlags(cliCtx)
if err := configureChainConfig(cliCtx); err != nil {
return errors.Wrap(err, "could not configure chain config")
}
if err := configureHistoricalSlasher(cliCtx); err != nil {
return errors.Wrap(err, "could not configure historical slasher")
}
if err := configureBuilderCircuitBreaker(cliCtx); err != nil {
return errors.Wrap(err, "could not configure builder circuit breaker")
}
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
return errors.Wrap(err, "could not configure slots per archived point")
}
if err := configureEth1Config(cliCtx); err != nil {
return errors.Wrap(err, "could not configure eth1 config")
}
configureNetwork(cliCtx)
if err := configureInteropConfig(cliCtx); err != nil {
return errors.Wrap(err, "could not configure interop config")
}
if err := configureExecutionSetting(cliCtx); err != nil {
return errors.Wrap(err, "could not configure execution setting")
}
configureFastSSZHashingAlgorithm()
return nil
}
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
ctx := cliCtx.Context
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, errors.Wrap(err, "could not start DB")
}
beacon.BlobStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not start slashing DB")
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not register P2P service")
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "could not create backfill updater")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
if errors.Is(err, stategen.ErrNoGenesisBlock) {
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
"state bundled in the application. You must provide the --%s or --%s flag to load "+
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
}
return nil, errors.Wrap(err, "could not start state generation")
}
return bfs, nil
}
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bf *backfill.Service, bfs *backfill.Store) error {
if err := beacon.services.RegisterService(bf); err != nil {
return errors.Wrap(err, "could not register backfill service")
}
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return errors.Wrap(err, "could not register POW chain service")
}
log.Debugln("Registering Attestation Pool Service")
if err := beacon.registerAttestationPool(); err != nil {
return errors.Wrap(err, "could not register attestation pool service")
}
log.Debugln("Registering Deterministic Genesis Service")
if err := beacon.registerDeterministicGenesisService(); err != nil {
return errors.Wrap(err, "could not register deterministic genesis service")
}
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register blockchain service")
}
log.Debugln("Registering Initial Sync Service")
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register initial sync service")
}
log.Debugln("Registering Sync Service")
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
return errors.Wrap(err, "could not register sync service")
}
log.Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return errors.Wrap(err, "could not register slasher service")
}
log.Debugln("Registering builder service")
if err := beacon.registerBuilderService(cliCtx); err != nil {
return errors.Wrap(err, "could not register builder service")
}
log.Debugln("Registering RPC Service")
router := newRouter(cliCtx)
if err := beacon.registerRPCService(router); err != nil {
return errors.Wrap(err, "could not register RPC service")
}
log.Debugln("Registering GRPC Gateway Service")
if err := beacon.registerGRPCGateway(router); err != nil {
return errors.Wrap(err, "could not register GRPC gateway service")
}
log.Debugln("Registering Validator Monitoring Service")
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register validator monitoring service")
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
log.Debugln("Registering Prometheus Service")
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return errors.Wrap(err, "could not register prometheus service")
}
}
return nil
}
func initSyncWaiter(ctx context.Context, complete chan struct{}) func() error {
return func() error {
select {
@@ -479,86 +416,40 @@ func (b *BeaconNode) Close() {
close(b.stop)
}
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
var err error
clearDBConfirmed := false
if clearDB && !forceClearDB {
const (
actionText = "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText = "Database will not be deleted. No changes have been made."
)
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return nil, errors.Wrapf(err, "could not confirm action")
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return nil, errors.Wrap(err, "could not clear blob storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return nil, errors.Wrap(err, "could not create new database")
}
}
return d, nil
}
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
return errors.Wrap(err, "could not get deposit contract address")
}
addr := common.HexToAddress(depositAddress)
if len(knownContract) == 0 {
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
return errors.Wrap(err, "could not save deposit contract")
}
}
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
"you are trying to run on a different network than what the database contains. You can run once with "+
"--%s to wipe the old database or use an alternative data directory with --%s",
knownContract, addr.Bytes(), cmd.ClearDB.Name, cmd.DataDirFlag.Name)
}
return nil
}
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
var depositCache cache.DepositCache
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("databasePath", dbPath).Info("Checking DB")
log.WithField("database-path", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrapf(err, "could not create database at %s", dbPath)
return err
}
if clearDBRequired || forceClearDBRequired {
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
clearDBConfirmed := false
if clearDB && !forceClearDB {
actionText := "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText := "Database will not be deleted. No changes have been made."
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return err
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return errors.Wrap(err, "could not clear blob storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrap(err, "could not create new database")
}
}
if err := d.RunMigrations(b.ctx); err != nil {
@@ -567,6 +458,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
b.db = d
var depositCache cache.DepositCache
if features.Get().EnableEIP4881 {
depositCache, err = depositsnapshot.New()
} else {
@@ -581,17 +473,16 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
if b.GenesisInitializer != nil {
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
if err == db.ErrExistingGenesisState {
return errors.Errorf("Genesis state flag specified but a genesis state "+
"exists already. Run again with --%s and/or ensure you are using the "+
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
return errors.New("Genesis state flag specified but a genesis state " +
"exists already. Run again with --clear-db and/or ensure you are using the " +
"appropriate testnet flag to load the given genesis state.")
}
return errors.Wrap(err, "could not load genesis from file")
}
}
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
return errors.Wrap(err, "could not ensure embedded genesis")
return err
}
if b.CheckpointInitializer != nil {
@@ -600,11 +491,23 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
}
}
if err := b.checkAndSaveDepositContract(depositAddress); err != nil {
return errors.Wrap(err, "could not check and save deposit contract")
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
return err
}
log.WithField("address", depositAddress).Info("Deposit contract")
addr := common.HexToAddress(depositAddress)
if len(knownContract) == 0 {
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
return errors.Wrap(err, "could not save deposit contract")
}
}
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
"you are trying to run on a different network than what the database contains. You can run once with "+
"'--clear-db' to wipe the old database or use an alternative data directory with '--datadir'",
knownContract, addr.Bytes())
}
log.Infof("Deposit contract: %#x", addr.Bytes())
return nil
}
@@ -622,7 +525,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("databasePath", dbPath).Info("Checking DB")
log.WithField("database-path", dbPath).Info("Checking DB")
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil {
@@ -692,31 +595,31 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBl
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
bootstrapNodeAddrs, dataDir, err := registration.P2PPreregistration(cliCtx)
if err != nil {
return errors.Wrapf(err, "could not register p2p service")
return err
}
svc, err := p2p.NewService(b.ctx, &p2p.Config{
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
DataDir: dataDir,
LocalIP: cliCtx.String(cmd.P2PIP.Name),
HostAddress: cliCtx.String(cmd.P2PHost.Name),
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
StateNotifier: b,
DB: b.db,
ClockWaiter: b.clockWaiter,
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
BootstrapNodeAddr: bootstrapNodeAddrs,
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
DataDir: dataDir,
LocalIP: cliCtx.String(cmd.P2PIP.Name),
HostAddress: cliCtx.String(cmd.P2PHost.Name),
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
StateNotifier: b,
DB: b.db,
ClockWaiter: b.clockWaiter,
})
if err != nil {
return err
@@ -1058,13 +961,11 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
if b.cliCtx.Bool(flags.DisableGRPCGateway.Name) {
return nil
}
gatewayHost := b.cliCtx.String(flags.GRPCGatewayHost.Name)
gatewayPort := b.cliCtx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := b.cliCtx.String(flags.GRPCGatewayHost.Name)
rpcHost := b.cliCtx.String(flags.RPCHost.Name)
rpcPort := b.cliCtx.Int(flags.RPCPort.Name)
selfAddress := net.JoinHostPort(rpcHost, strconv.Itoa(rpcPort))
gatewayAddress := net.JoinHostPort(gatewayHost, strconv.Itoa(gatewayPort))
selfAddress := fmt.Sprintf("%s:%d", rpcHost, b.cliCtx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
allowedOrigins := strings.Split(b.cliCtx.String(flags.GPRCGatewayCorsDomain.Name), ",")
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
selfCert := b.cliCtx.String(flags.CertFlag.Name)
@@ -1086,6 +987,7 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
apigateway.WithGatewayAddr(gatewayAddress),
apigateway.WithRemoteAddr(selfAddress),
apigateway.WithPbHandlers(muxs),
apigateway.WithMuxHandler(gatewayConfig.Handler),
apigateway.WithRemoteCert(selfCert),
apigateway.WithMaxCallRecvMsgSize(maxCallSize),
apigateway.WithAllowedOrigins(allowedOrigins),
@@ -1158,9 +1060,9 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
return err
}
opts := b.serviceFlagOpts.builderOpts
opts = append(opts, builder.WithHeadFetcher(chainService), builder.WithDatabase(b.db))
opts := append(b.serviceFlagOpts.builderOpts,
builder.WithHeadFetcher(chainService),
builder.WithDatabase(b.db))
// make cache the default.
if !cliCtx.Bool(features.DisableRegistrationCache.Name) {
opts = append(opts, builder.WithRegistrationCache())

View File

@@ -43,15 +43,6 @@ func WithBlobStorage(bs *filesystem.BlobStorage) Option {
}
}
// WithBlobStorageOptions appends 1 or more filesystem.BlobStorageOption on the beacon node,
// to be used when initializing blob storage.
func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
return func(bn *BeaconNode) error {
bn.BlobStorageOptions = append(bn.BlobStorageOptions, opt...)
return nil
}
}
// WithBlobRetentionEpochs sets the blobRetentionEpochs value, used in kv store initialization.
func WithBlobRetentionEpochs(e primitives.Epoch) Option {
return func(bn *BeaconNode) error {

View File

@@ -11,7 +11,6 @@ go_library(
deps = [
"//cmd:go_default_library",
"//config/params:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@in_gopkg_yaml_v2//:go_default_library",

View File

@@ -4,7 +4,6 @@ import (
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/urfave/cli/v2"
@@ -32,9 +31,9 @@ func P2PPreregistration(cliCtx *cli.Context) (bootstrapNodeAddrs []string, dataD
if dataDir == "" {
dataDir = cmd.DefaultDataDir()
if dataDir == "" {
err = errors.Errorf(
"Could not determine your system's HOME path, please specify a --%s you wish to use for your chain data",
cmd.DataDirFlag.Name,
log.Fatal(
"Could not determine your system's HOME path, please specify a --datadir you wish " +
"to use for your chain data",
)
}
}

View File

@@ -49,7 +49,6 @@ go_test(
"//beacon-chain/operations/attestations/kv:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",

View File

@@ -42,7 +42,7 @@ func (s *Service) prepareForkChoiceAtts() {
switch slotInterval.Interval {
case 0:
duration := time.Since(t)
log.WithField("duration", duration).Debug("Aggregated unaggregated attestations")
log.WithField("Duration", duration).Debug("Aggregated unaggregated attestations")
batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds()))
case 1:
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))

View File

@@ -6,7 +6,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// pruneAttsPool prunes attestations pool on every slot interval.
@@ -67,18 +66,7 @@ func (s *Service) pruneExpiredAtts() {
// Return true if the input slot has been expired.
// Expired is defined as one epoch behind than current time.
func (s *Service) expired(providedSlot primitives.Slot) bool {
providedEpoch := slots.ToEpoch(providedSlot)
currSlot := slots.CurrentSlot(s.genesisTime)
currEpoch := slots.ToEpoch(currSlot)
if currEpoch < params.BeaconConfig().DenebForkEpoch {
return s.expiredPreDeneb(providedSlot)
}
return providedEpoch+1 < currEpoch
}
// Handles expiration of attestations before deneb.
func (s *Service) expiredPreDeneb(slot primitives.Slot) bool {
func (s *Service) expired(slot primitives.Slot) bool {
expirationSlot := slot + params.BeaconConfig().SlotsPerEpoch
expirationTime := s.genesisTime + uint64(expirationSlot.Mul(params.BeaconConfig().SecondsPerSlot))
currentTime := uint64(prysmTime.Now().Unix())

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/async"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -128,22 +127,3 @@ func TestPruneExpired_Expired(t *testing.T) {
assert.Equal(t, true, s.expired(0), "Should be expired")
assert.Equal(t, false, s.expired(1), "Should not be expired")
}
func TestPruneExpired_ExpiredDeneb(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.DenebForkEpoch = 3
params.OverrideBeaconConfig(cfg)
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
require.NoError(t, err)
// Rewind back 4 epochs + 10 slots worth of time.
s.genesisTime = uint64(prysmTime.Now().Unix()) - (4*uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) + 10)
secondEpochStart := primitives.Slot(2 * uint64(params.BeaconConfig().SlotsPerEpoch))
thirdEpochStart := primitives.Slot(3 * uint64(params.BeaconConfig().SlotsPerEpoch))
assert.Equal(t, true, s.expired(secondEpochStart), "Should be expired")
assert.Equal(t, false, s.expired(thirdEpochStart), "Should not be expired")
}

View File

@@ -94,7 +94,6 @@ go_library(
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_libp2p_go_mplex//:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_pkg_errors//:go_default_library",

Some files were not shown because too many files have changed in this diff Show More