mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
24 Commits
fast-confi
...
develop2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
16dcc4bf5e | ||
|
|
46afec9cd2 | ||
|
|
6023a0d45f | ||
|
|
a8a2866798 | ||
|
|
011150e93e | ||
|
|
3bd52c706a | ||
|
|
f58019ba17 | ||
|
|
230d2af015 | ||
|
|
3f6c6e935f | ||
|
|
1d50bc0ebf | ||
|
|
281bdd84b4 | ||
|
|
c0368681d3 | ||
|
|
7d6afc3412 | ||
|
|
a168bc256d | ||
|
|
c8187616d8 | ||
|
|
4bc6df1f50 | ||
|
|
643962029a | ||
|
|
a95f7b4867 | ||
|
|
574108644c | ||
|
|
670b2b8291 | ||
|
|
3198d64a7c | ||
|
|
26645ed724 | ||
|
|
ee3541534c | ||
|
|
3e429b7d55 |
@@ -194,7 +194,6 @@ nogo(
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
|
||||
20
api/client/beacon/health/BUILD.bazel
Normal file
20
api/client/beacon/health/BUILD.bazel
Normal file
@@ -0,0 +1,20 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"health.go",
|
||||
"interfaces.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/client/beacon/health",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@org_uber_go_mock//gomock:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["health_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["@org_uber_go_mock//gomock:go_default_library"],
|
||||
)
|
||||
58
api/client/beacon/health/health.go
Normal file
58
api/client/beacon/health/health.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type NodeHealthTracker struct {
|
||||
isHealthy *bool
|
||||
healthChan chan bool
|
||||
node Node
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewTracker(node Node) Tracker {
|
||||
return &NodeHealthTracker{
|
||||
node: node,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// HealthUpdates provides a read-only channel for health updates.
|
||||
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
|
||||
return n.healthChan
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) IsHealthy(_ context.Context) bool {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
if n.isHealthy == nil {
|
||||
return false
|
||||
}
|
||||
return *n.isHealthy
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
newStatus := n.node.IsHealthy(ctx)
|
||||
if n.isHealthy == nil {
|
||||
n.isHealthy = &newStatus
|
||||
}
|
||||
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
if isStatusChanged {
|
||||
// Update the health status
|
||||
n.isHealthy = &newStatus
|
||||
// Send the new status to the health channel, potentially overwriting the existing value
|
||||
select {
|
||||
case <-n.healthChan:
|
||||
n.healthChan <- newStatus
|
||||
default:
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
}
|
||||
return newStatus
|
||||
}
|
||||
110
api/client/beacon/health/health_test.go
Normal file
110
api/client/beacon/health/health_test.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestNodeHealth_IsHealthy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
isHealthy bool
|
||||
want bool
|
||||
}{
|
||||
{"initially healthy", true, true},
|
||||
{"initially unhealthy", false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.isHealthy,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
if got := n.IsHealthy(t.Context()); got != tt.want {
|
||||
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
initial bool // Initial health status
|
||||
newStatus bool // Status to update to
|
||||
shouldSend bool // Should a message be sent through the channel
|
||||
}{
|
||||
{"healthy to unhealthy", true, false, true},
|
||||
{"unhealthy to healthy", false, true, true},
|
||||
{"remain healthy", true, true, false},
|
||||
{"remain unhealthy", false, false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := NewMockHealthClient(ctrl)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.initial,
|
||||
node: client,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
|
||||
s := n.CheckHealth(t.Context())
|
||||
// Check if health status was updated
|
||||
if s != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
|
||||
}
|
||||
|
||||
select {
|
||||
case status := <-n.HealthUpdates():
|
||||
if !tt.shouldSend {
|
||||
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
|
||||
} else if status != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
|
||||
}
|
||||
default:
|
||||
if tt.shouldSend {
|
||||
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := NewMockHealthClient(ctrl)
|
||||
n := NewTracker(client)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Number of goroutines to spawn for both reading and writing
|
||||
numGoroutines := 6
|
||||
|
||||
wg.Add(numGoroutines * 2) // for readers and writers
|
||||
|
||||
// Concurrently update health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
|
||||
n.CheckHealth(t.Context())
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
|
||||
n.CheckHealth(t.Context())
|
||||
}()
|
||||
}
|
||||
|
||||
// Concurrently read health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = n.IsHealthy(t.Context()) // Just read the value
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait() // Wait for all goroutines to finish
|
||||
}
|
||||
13
api/client/beacon/health/interfaces.go
Normal file
13
api/client/beacon/health/interfaces.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package health
|
||||
|
||||
import "context"
|
||||
|
||||
type Tracker interface {
|
||||
HealthUpdates() <-chan bool
|
||||
CheckHealth(ctx context.Context) bool
|
||||
Node
|
||||
}
|
||||
|
||||
type Node interface {
|
||||
IsHealthy(ctx context.Context) bool
|
||||
}
|
||||
58
api/client/beacon/health/mock.go
Normal file
58
api/client/beacon/health/mock.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = Node(&MockHealthClient{})
|
||||
)
|
||||
|
||||
// MockHealthClient is a mock of HealthClient interface.
|
||||
type MockHealthClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockHealthClientMockRecorder
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
|
||||
type MockHealthClientMockRecorder struct {
|
||||
mock *MockHealthClient
|
||||
}
|
||||
|
||||
// IsHealthy mocks base method.
|
||||
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsHealthy", arg0)
|
||||
ret0, ok := ret[0].(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return ret0
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// IsHealthy indicates an expected call of IsHealthy.
|
||||
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
|
||||
mr.mock.Lock()
|
||||
defer mr.mock.Unlock()
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
|
||||
}
|
||||
|
||||
// NewMockHealthClient creates a new mock instance.
|
||||
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
|
||||
mock := &MockHealthClient{ctrl: ctrl}
|
||||
mock.recorder = &MockHealthClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
@@ -50,7 +50,6 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
|
||||
@@ -72,7 +72,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
log.WithFields(log.Fields{
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("Builder http request")
|
||||
}).Info("builder http request")
|
||||
return nil
|
||||
}
|
||||
t := io.TeeReader(r.Body, b)
|
||||
@@ -89,7 +89,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
log.WithFields(log.Fields{
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("Builder http request")
|
||||
}).Info("builder http request")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -101,7 +101,7 @@ type BuilderClient interface {
|
||||
NodeURL() string
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
Status(ctx context.Context) error
|
||||
}
|
||||
|
||||
@@ -446,9 +446,6 @@ func sszValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]
|
||||
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
|
||||
|
||||
func getVersionsBlockToPayload(blockVersion int) (int, error) {
|
||||
if blockVersion >= version.Fulu {
|
||||
return version.Fulu, nil
|
||||
}
|
||||
if blockVersion >= version.Deneb {
|
||||
return version.Deneb, nil
|
||||
}
|
||||
@@ -463,7 +460,7 @@ func getVersionsBlockToPayload(blockVersion int) (int, error) {
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full execution payload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
body, postOpts, err := c.buildBlindedBlockRequest(sb)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -561,7 +558,7 @@ func (c *Client) buildBlindedBlockRequest(sb interfaces.ReadOnlySignedBeaconBloc
|
||||
func (c *Client) parseBlindedBlockResponse(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if c.sszEnabled {
|
||||
return c.parseBlindedBlockResponseSSZ(respBytes, forkVersion)
|
||||
}
|
||||
@@ -571,18 +568,8 @@ func (c *Client) parseBlindedBlockResponse(
|
||||
func (c *Client) parseBlindedBlockResponseSSZ(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
if forkVersion >= version.Fulu {
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundleV2{}
|
||||
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundleV2 SSZ")
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(payloadAndBlobs.Payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to wrap execution data for %s", version.String(forkVersion))
|
||||
}
|
||||
return ed, payloadAndBlobs.BlobsBundle, nil
|
||||
} else if forkVersion >= version.Deneb {
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if forkVersion >= version.Deneb {
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{}
|
||||
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundle SSZ")
|
||||
|
||||
@@ -2,7 +2,6 @@ package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -1575,166 +1573,3 @@ func TestRequestLogger(t *testing.T) {
|
||||
err = c.Status(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetVersionsBlockToPayload(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
blockVersion int
|
||||
expectedVersion int
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "Fulu version",
|
||||
blockVersion: 6, // version.Fulu
|
||||
expectedVersion: 6,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Deneb version",
|
||||
blockVersion: 4, // version.Deneb
|
||||
expectedVersion: 4,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Capella version",
|
||||
blockVersion: 3, // version.Capella
|
||||
expectedVersion: 3,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Bellatrix version",
|
||||
blockVersion: 2, // version.Bellatrix
|
||||
expectedVersion: 2,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Unsupported version",
|
||||
blockVersion: 0,
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
version, err := getVersionsBlockToPayload(tt.blockVersion)
|
||||
if tt.expectedError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedVersion, version)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBlindedBlockResponseSSZ_WithBlobsBundleV2(t *testing.T) {
|
||||
c := &Client{sszEnabled: true}
|
||||
|
||||
// Create test payload
|
||||
payload := &v1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BlockNumber: 123456,
|
||||
GasLimit: 30000000,
|
||||
GasUsed: 21000,
|
||||
Timestamp: 1234567890,
|
||||
ExtraData: []byte("test-extra-data"),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*v1.Withdrawal{},
|
||||
BlobGasUsed: 1024,
|
||||
ExcessBlobGas: 2048,
|
||||
}
|
||||
|
||||
// Create test BlobsBundleV2
|
||||
bundleV2 := &v1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
Proofs: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
Blobs: [][]byte{make([]byte, 131072), make([]byte, 131072)},
|
||||
}
|
||||
|
||||
// Test Fulu version (should use ExecutionPayloadDenebAndBlobsBundleV2)
|
||||
t.Run("Fulu version with BlobsBundleV2", func(t *testing.T) {
|
||||
payloadAndBlobsV2 := &v1.ExecutionPayloadDenebAndBlobsBundleV2{
|
||||
Payload: payload,
|
||||
BlobsBundle: bundleV2,
|
||||
}
|
||||
|
||||
respBytes, err := payloadAndBlobsV2.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
ed, bundle, err := c.parseBlindedBlockResponseSSZ(respBytes, 6) // version.Fulu
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ed)
|
||||
require.NotNil(t, bundle)
|
||||
|
||||
// Verify the bundle is BlobsBundleV2
|
||||
bundleV2Result, ok := bundle.(*v1.BlobsBundleV2)
|
||||
assert.Equal(t, true, ok, "Expected BlobsBundleV2 type")
|
||||
require.Equal(t, len(bundleV2.KzgCommitments), len(bundleV2Result.KzgCommitments))
|
||||
require.Equal(t, len(bundleV2.Proofs), len(bundleV2Result.Proofs))
|
||||
require.Equal(t, len(bundleV2.Blobs), len(bundleV2Result.Blobs))
|
||||
})
|
||||
|
||||
// Test Deneb version (should use regular BlobsBundle)
|
||||
t.Run("Deneb version with regular BlobsBundle", func(t *testing.T) {
|
||||
regularBundle := &v1.BlobsBundle{
|
||||
KzgCommitments: bundleV2.KzgCommitments,
|
||||
Proofs: bundleV2.Proofs,
|
||||
Blobs: bundleV2.Blobs,
|
||||
}
|
||||
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{
|
||||
Payload: payload,
|
||||
BlobsBundle: regularBundle,
|
||||
}
|
||||
|
||||
respBytes, err := payloadAndBlobs.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
ed, bundle, err := c.parseBlindedBlockResponseSSZ(respBytes, 4) // version.Deneb
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ed)
|
||||
require.NotNil(t, bundle)
|
||||
|
||||
// Verify the bundle is regular BlobsBundle
|
||||
regularBundleResult, ok := bundle.(*v1.BlobsBundle)
|
||||
assert.Equal(t, true, ok, "Expected BlobsBundle type")
|
||||
require.Equal(t, len(regularBundle.KzgCommitments), len(regularBundleResult.KzgCommitments))
|
||||
})
|
||||
|
||||
// Test invalid SSZ data
|
||||
t.Run("Invalid SSZ data", func(t *testing.T) {
|
||||
invalidBytes := []byte("invalid-ssz-data")
|
||||
|
||||
ed, bundle, err := c.parseBlindedBlockResponseSSZ(invalidBytes, 6)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, true, ed == nil)
|
||||
assert.Equal(t, true, bundle == nil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
|
||||
// Note: The full integration test is complex due to version detection logic
|
||||
// The key functionality is tested in the parseBlindedBlockResponseSSZ tests above
|
||||
// and in the mock service tests which verify the interface changes work correctly
|
||||
|
||||
t.Run("Interface signature verification", func(t *testing.T) {
|
||||
// This test verifies that the SubmitBlindedBlock method signature
|
||||
// has been updated to return BlobsBundler interface
|
||||
|
||||
client := &Client{}
|
||||
|
||||
// Verify the method exists with the correct signature
|
||||
// by using reflection or by checking it compiles with the interface
|
||||
var _ func(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) = client.SubmitBlindedBlock
|
||||
|
||||
// This test passes if the signature is correct
|
||||
assert.Equal(t, true, true)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock --
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -127,7 +127,7 @@ func AcceptEncodingHeaderHandler() Middleware {
|
||||
gz := gzip.NewWriter(w)
|
||||
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
|
||||
defer func() {
|
||||
if !gzipRW.zip {
|
||||
if !gzipRW.zipped {
|
||||
return
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
@@ -143,22 +143,13 @@ func AcceptEncodingHeaderHandler() Middleware {
|
||||
type gzipResponseWriter struct {
|
||||
gz *gzip.Writer
|
||||
http.ResponseWriter
|
||||
zip bool
|
||||
}
|
||||
|
||||
func (g *gzipResponseWriter) WriteHeader(statusCode int) {
|
||||
if strings.Contains(g.Header().Get("Content-Type"), api.JsonMediaType) {
|
||||
// Removing the current Content-Length because zipping will change it.
|
||||
g.Header().Del("Content-Length")
|
||||
g.Header().Set("Content-Encoding", "gzip")
|
||||
g.zip = true
|
||||
}
|
||||
|
||||
g.ResponseWriter.WriteHeader(statusCode)
|
||||
zipped bool
|
||||
}
|
||||
|
||||
func (g *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
if g.zip {
|
||||
if strings.Contains(g.Header().Get("Content-Type"), api.JsonMediaType) {
|
||||
g.zipped = true
|
||||
g.Header().Set("Content-Encoding", "gzip")
|
||||
return g.gz.Write(b)
|
||||
}
|
||||
return g.ResponseWriter.Write(b)
|
||||
|
||||
@@ -13,23 +13,6 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// frozenHeaderRecorder allows asserting that response headers were not modified
|
||||
// after the call to WriteHeader.
|
||||
//
|
||||
// Its purpose is to have a regression test for https://github.com/OffchainLabs/prysm/pull/15499.
|
||||
type frozenHeaderRecorder struct {
|
||||
*httptest.ResponseRecorder
|
||||
frozenHeader http.Header
|
||||
}
|
||||
|
||||
func (r *frozenHeaderRecorder) WriteHeader(code int) {
|
||||
if r.frozenHeader != nil {
|
||||
return
|
||||
}
|
||||
r.ResponseRecorder.WriteHeader(code)
|
||||
r.frozenHeader = r.ResponseRecorder.Header().Clone()
|
||||
}
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
@@ -149,7 +132,6 @@ func TestAcceptEncodingHeaderHandler(t *testing.T) {
|
||||
dummyContent := "Test gzip middleware content"
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", r.Header.Get("Accept"))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(dummyContent))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -163,19 +145,19 @@ func TestAcceptEncodingHeaderHandler(t *testing.T) {
|
||||
expectCompressed bool
|
||||
}{
|
||||
{
|
||||
name: "Accept gzip",
|
||||
name: "Gzip supported",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "gzip",
|
||||
expectCompressed: true,
|
||||
},
|
||||
{
|
||||
name: "Accept multiple encodings",
|
||||
name: "Multiple encodings supported",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "deflate, gzip",
|
||||
expectCompressed: true,
|
||||
},
|
||||
{
|
||||
name: "Accept unsupported encoding",
|
||||
name: "Gzip not supported",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "deflate",
|
||||
expectCompressed: false,
|
||||
@@ -201,12 +183,12 @@ func TestAcceptEncodingHeaderHandler(t *testing.T) {
|
||||
if tt.acceptEncoding != "" {
|
||||
req.Header.Set("Accept-Encoding", tt.acceptEncoding)
|
||||
}
|
||||
rr := &frozenHeaderRecorder{ResponseRecorder: httptest.NewRecorder()}
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if tt.expectCompressed {
|
||||
require.Equal(t, "gzip", rr.frozenHeader.Get("Content-Encoding"), "Expected Content-Encoding header to be 'gzip'")
|
||||
require.Equal(t, "gzip", rr.Header().Get("Content-Encoding"), "Expected Content-Encoding header to be 'gzip'")
|
||||
|
||||
compressedBody := rr.Body.Bytes()
|
||||
require.NotEqual(t, dummyContent, string(compressedBody), "Response body should be compressed and differ from the original")
|
||||
|
||||
@@ -74,7 +74,7 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
|
||||
}
|
||||
|
||||
return &BeaconState{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -177,7 +177,7 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
|
||||
}
|
||||
|
||||
return &BeaconStateAltair{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -295,7 +295,7 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
|
||||
}
|
||||
|
||||
return &BeaconStateBellatrix{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -430,7 +430,7 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
|
||||
}
|
||||
|
||||
return &BeaconStateCapella{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -568,7 +568,7 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
|
||||
}
|
||||
|
||||
return &BeaconStateDeneb{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -742,7 +742,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
}
|
||||
|
||||
return &BeaconStateElectra{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -932,7 +932,7 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
lookahead[i] = fmt.Sprintf("%d", uint64(v))
|
||||
}
|
||||
return &BeaconStateFulu{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
|
||||
@@ -31,7 +31,6 @@ type GetForkChoiceDumpResponse struct {
|
||||
type ForkChoiceDumpExtraData struct {
|
||||
UnrealizedJustifiedCheckpoint *Checkpoint `json:"unrealized_justified_checkpoint"`
|
||||
UnrealizedFinalizedCheckpoint *Checkpoint `json:"unrealized_finalized_checkpoint"`
|
||||
SafeHeadRoot string `json:"safe_head_root"`
|
||||
ProposerBoostRoot string `json:"proposer_boost_root"`
|
||||
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root"`
|
||||
HeadRoot string `json:"head_root"`
|
||||
|
||||
@@ -19,10 +19,10 @@ func RunEvery(ctx context.Context, period time.Duration, f func()) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.WithField("function", funcName).Trace("Running")
|
||||
log.WithField("function", funcName).Trace("running")
|
||||
f()
|
||||
case <-ctx.Done():
|
||||
log.WithField("function", funcName).Debug("Context is closed, exiting")
|
||||
log.WithField("function", funcName).Debug("context is closed, exiting")
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -41,8 +41,7 @@ type ForkchoiceFetcher interface {
|
||||
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
|
||||
CachedHeadRoot() [32]byte
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(time.Time)
|
||||
SafeBlockHash() [32]byte
|
||||
SetForkChoiceGenesisTime(uint64)
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
@@ -515,7 +514,7 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
s.genesisTime = t
|
||||
}
|
||||
|
||||
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
|
||||
|
||||
@@ -2,7 +2,6 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -28,7 +27,7 @@ func (s *Service) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp time.Time) {
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
@@ -91,12 +90,6 @@ func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
}
|
||||
|
||||
func (s *Service) SafeBlockHash() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.SafeBlockHash()
|
||||
}
|
||||
|
||||
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
|
||||
func (s *Service) FinalizedBlockHash() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
|
||||
@@ -3,6 +3,9 @@ package blockchain
|
||||
import (
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -11,7 +14,10 @@ import (
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
@@ -25,8 +31,11 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
s := testServiceWithDB(t)
|
||||
s.head = &head{root: [32]byte{'A'}}
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
@@ -42,10 +51,13 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
s := testServiceWithDB(t)
|
||||
s.head = &head{block: wsb}
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
head: &head{block: wsb},
|
||||
}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
@@ -61,8 +73,10 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := s.cfg.BeaconDB
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
@@ -137,7 +138,7 @@ func TestFinalizedBlockHash(t *testing.T) {
|
||||
|
||||
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
service := testServiceWithDB(t)
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -152,7 +153,7 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
@@ -199,7 +200,7 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
c.head = &head{block: wsb, state: s}
|
||||
|
||||
received, err := c.HeadBlock(t.Context())
|
||||
@@ -212,7 +213,7 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
headState, err := c.HeadState(t.Context())
|
||||
require.NoError(t, err)
|
||||
@@ -220,8 +221,7 @@ func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
c := testServiceNoDB(t)
|
||||
c.genesisTime = time.Unix(999, 0)
|
||||
c := &Service{genesisTime: time.Unix(999, 0)}
|
||||
wanted := time.Unix(999, 0)
|
||||
assert.Equal(t, wanted, c.GenesisTime(), "Did not get wanted genesis time")
|
||||
}
|
||||
@@ -230,7 +230,7 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
f := ðpb.Fork{Epoch: 999}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Fork: f})
|
||||
require.NoError(t, err)
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
if !proto.Equal(c.CurrentFork(), f) {
|
||||
t.Error("Received incorrect fork version")
|
||||
@@ -242,7 +242,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
if !proto.Equal(c.CurrentFork(), f) {
|
||||
t.Error("Received incorrect fork version")
|
||||
}
|
||||
@@ -250,7 +250,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
|
||||
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
|
||||
// Should not panic if head state is nil.
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||
@@ -269,7 +269,7 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||
d := ðpb.Eth1Data{DepositCount: 999}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Eth1Data: d})
|
||||
require.NoError(t, err)
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
if !proto.Equal(c.HeadETH1Data(), d) {
|
||||
t.Error("Received incorrect eth1 data")
|
||||
@@ -298,7 +298,7 @@ func TestIsCanonical_Ok(t *testing.T) {
|
||||
|
||||
func TestService_HeadValidatorsIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
|
||||
c.head = &head{}
|
||||
indices, err := c.HeadValidatorsIndices(t.Context(), 0)
|
||||
@@ -313,7 +313,7 @@ func TestService_HeadValidatorsIndices(t *testing.T) {
|
||||
|
||||
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
|
||||
c.head = &head{}
|
||||
root := c.HeadGenesisValidatorsRoot()
|
||||
@@ -332,7 +332,7 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
|
||||
func TestService_ChainHeads(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -366,7 +366,7 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
_, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
@@ -381,7 +381,7 @@ func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
c.head = nil
|
||||
|
||||
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
@@ -396,7 +396,7 @@ func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
||||
|
||||
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
|
||||
@@ -409,7 +409,7 @@ func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
|
||||
c := testServiceNoDB(t)
|
||||
c := &Service{}
|
||||
c.head = nil
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
|
||||
@@ -431,9 +431,7 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := testServiceWithDB(t)
|
||||
c.SetGenesisTime(time.Now())
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
@@ -452,15 +450,14 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
// If head is nil, for some reason, an error should be returned rather than panic.
|
||||
c = testServiceNoDB(t)
|
||||
c = &Service{}
|
||||
_, err = c.IsOptimistic(ctx)
|
||||
require.ErrorIs(t, err, ErrNilHead)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
c := testServiceNoDB(t)
|
||||
c.genesisTime = time.Now()
|
||||
c := &Service{genesisTime: time.Now()}
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
@@ -468,8 +465,7 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
|
||||
func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -485,10 +481,9 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -539,9 +534,9 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -578,9 +573,9 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -598,9 +593,9 @@ func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsFinalized(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
|
||||
r1 := [32]byte{'a'}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Root: r1,
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -16,6 +17,9 @@ var stateDefragmentationTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
// a higher number of fragmented indexes are reallocated to a new separate slice for
|
||||
// that field.
|
||||
func (s *Service) defragmentState(st state.BeaconState) {
|
||||
if !features.Get().EnableExperimentalState {
|
||||
return
|
||||
}
|
||||
startTime := time.Now()
|
||||
st.Defragment()
|
||||
elapsedTime := time.Since(startTime)
|
||||
|
||||
@@ -63,10 +63,10 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
return nil, nil
|
||||
}
|
||||
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
safeBlockHash := s.cfg.ForkChoiceStore.SafeBlockHash()
|
||||
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
SafeBlockHash: safeBlockHash[:],
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
if len(fcs.HeadBlockHash) != 32 || [32]byte(fcs.HeadBlockHash) == [32]byte{} {
|
||||
@@ -141,7 +141,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, r, b, st); err != nil {
|
||||
log.WithError(err).Error("Could not save head after pruning invalid blocks")
|
||||
log.WithError(err).Error("could not save head after pruning invalid blocks")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -354,7 +354,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.StartTime(s.genesisTime, slot)
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timestamp to get payload attribute")
|
||||
return emptyAttri
|
||||
|
||||
@@ -76,14 +76,14 @@ func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fc
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
log.WithError(err).Error("could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
log.WithError(err).Error("could not update forkchoice with payload attributes for proposal")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
|
||||
@@ -114,7 +114,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
currentSlot := s.CurrentSlot()
|
||||
if proposingSlot == currentSlot {
|
||||
@@ -132,17 +132,17 @@ func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitiv
|
||||
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
|
||||
return true
|
||||
}
|
||||
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, time.Now())
|
||||
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute seconds since slot start")
|
||||
log.WithError(err).Error("could not compute seconds since slot start")
|
||||
}
|
||||
if sss >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
"sinceSlotStart": sss,
|
||||
"threshold": doublylinkedtree.ProcessAttestationsThreshold,
|
||||
}).Info("Attempted late block reorg aborted due to attestations after threshold")
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
|
||||
doublylinkedtree.ProcessAttestationsThreshold)
|
||||
lateBlockFailedAttemptFirstThreshold.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +160,6 @@ func TestShouldOverrideFCU(t *testing.T) {
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
fcs.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
headRoot := [32]byte{'b'}
|
||||
parentRoot := [32]byte{'a'}
|
||||
ojc := ðpb.Checkpoint{}
|
||||
@@ -181,12 +180,11 @@ func TestShouldOverrideFCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, headRoot, head)
|
||||
|
||||
wantLog := "aborted due to attestations after threshold"
|
||||
fcs.SetGenesisTime(time.Now().Add(-29 * time.Second))
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
|
||||
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsDoNotContain(t, hook, wantLog)
|
||||
fcs.SetGenesisTime(time.Now().Add(-24 * time.Second))
|
||||
require.LogsDoNotContain(t, hook, "10 seconds")
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
|
||||
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsContain(t, hook, wantLog)
|
||||
require.LogsContain(t, hook, "10 seconds")
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
oldHeadRoot := bytesutil.ToBytes32(r)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not check if node is optimistically synced")
|
||||
log.WithError(err).Error("could not check if node is optimistically synced")
|
||||
}
|
||||
if headBlock.Block().ParentRoot() != oldHeadRoot {
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
@@ -111,11 +111,11 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
|
||||
@@ -18,6 +18,9 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Initialize the state cache for sync committees.
|
||||
var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
||||
|
||||
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
|
||||
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
|
||||
type HeadSyncCommitteeFetcher interface {
|
||||
@@ -140,7 +143,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
|
||||
defer mLock.Unlock()
|
||||
|
||||
// If there's already a head state exists with the request slot, we don't need to process slots.
|
||||
cachedState, err := s.syncCommitteeHeadState.Get(slot)
|
||||
cachedState, err := syncCommitteeHeadStateCache.Get(slot)
|
||||
switch {
|
||||
case err == nil:
|
||||
syncHeadStateHit.Inc()
|
||||
@@ -163,7 +166,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
|
||||
return nil, err
|
||||
}
|
||||
syncHeadStateMiss.Inc()
|
||||
err = s.syncCommitteeHeadState.Put(slot, headState)
|
||||
err = syncCommitteeHeadStateCache.Put(slot, headState)
|
||||
return headState, err
|
||||
default:
|
||||
// In the event, we encounter another error
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
dbTest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -14,7 +15,7 @@ import (
|
||||
|
||||
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := testServiceWithDB(t)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Current period
|
||||
@@ -37,7 +38,7 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := testServiceWithDB(t)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
@@ -51,7 +52,7 @@ func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := testServiceWithDB(t)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
@@ -65,7 +66,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := testServiceWithDB(t)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
|
||||
@@ -80,7 +81,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := testServiceWithDB(t)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
|
||||
@@ -94,7 +95,7 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := testServiceWithDB(t)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
|
||||
@@ -108,7 +109,7 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := testServiceWithDB(t)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
|
||||
@@ -121,14 +122,17 @@ func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
|
||||
s := testServiceNoDB(t)
|
||||
c := syncCommitteeHeadStateCache
|
||||
t.Cleanup(func() {
|
||||
syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
||||
})
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
|
||||
require.NoError(t, beaconState.SetSlot(100))
|
||||
cachedState, err := s.syncCommitteeHeadState.Get(101)
|
||||
cachedState, err := c.Get(101)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
require.Equal(t, nil, cachedState)
|
||||
require.NoError(t, s.syncCommitteeHeadState.Put(101, beaconState))
|
||||
cachedState, err = s.syncCommitteeHeadState.Get(101)
|
||||
require.NoError(t, c.Put(101, beaconState))
|
||||
cachedState, err = c.Get(101)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, beaconState, cachedState)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -153,10 +154,14 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
t.Run("genesis_state_root", func(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
srv.originBlockRoot = [32]byte{1}
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
srv := &Service{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
originBlockRoot: [32]byte{1},
|
||||
}
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
@@ -180,11 +185,15 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
})
|
||||
t.Run("non_genesis_values", func(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
genesisRoot := [32]byte{1}
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
srv.originBlockRoot = genesisRoot
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
srv := &Service{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
originBlockRoot: genesisRoot,
|
||||
}
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
@@ -398,7 +407,7 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
|
||||
@@ -86,8 +86,8 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.ToTime(genesisTime, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,6 +8,16 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err = reportEpochMetrics(t.Context(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -8,10 +8,9 @@ import (
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t testing.TB) []Option {
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
cs := startup.NewClockSynchronizer()
|
||||
@@ -32,15 +31,3 @@ func testServiceOptsNoDB() []Option {
|
||||
cs := startup.NewClockSynchronizer()
|
||||
return []Option{WithClockSynchronizer(cs)}
|
||||
}
|
||||
|
||||
func testServiceNoDB(t testing.TB) *Service {
|
||||
s, err := NewService(t.Context(), testServiceOptsNoDB()...)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
func testServiceWithDB(t testing.TB) *Service {
|
||||
s, err := NewService(t.Context(), testServiceOptsWithDB(t)...)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -33,14 +33,6 @@ func WithMaxGoroutines(x int) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithLCStore for light client store access.
|
||||
func WithLCStore() Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithWeakSubjectivityCheckpoint for checkpoint sync.
|
||||
func WithWeakSubjectivityCheckpoint(c *ethpb.Checkpoint) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -254,7 +246,7 @@ func WithSlasherEnabled(enabled bool) Option {
|
||||
// WithGenesisTime sets the genesis time for the blockchain service.
|
||||
func WithGenesisTime(genesisTime time.Time) Option {
|
||||
return func(s *Service) error {
|
||||
s.genesisTime = genesisTime.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
s.genesisTime = genesisTime
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,8 +60,10 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
return err
|
||||
}
|
||||
|
||||
genesisTime := uint64(s.genesisTime.Unix())
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := verifyAttTargetEpoch(ctx, s.genesisTime, time.Now().Add(disparity), tgt); err != nil {
|
||||
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Add(disparity).Unix()), tgt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -74,7 +76,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := slots.VerifyTime(s.genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
if err := slots.VerifyTime(genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -139,8 +139,8 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func verifyAttTargetEpoch(_ context.Context, genesis, now time.Time, c *ethpb.Checkpoint) error {
|
||||
currentSlot := slots.At(genesis, now)
|
||||
func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := primitives.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
var prevEpoch primitives.Epoch
|
||||
// Prevents previous epoch under flow
|
||||
|
||||
@@ -355,22 +355,22 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := time.Unix(int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := time.Unix(int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := time.Unix(2*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
err := verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err := verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
|
||||
assert.ErrorContains(t, "target epoch 0 does not match current epoch 2 or prev epoch 1", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -329,7 +329,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
// The latest block header is from the previous epoch
|
||||
r, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
// The proposer indices cache takes the target root for the previous
|
||||
@@ -339,12 +339,12 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
err = helpers.UpdateCachedCheckpointToStateRoot(st, &forkchoicetypes.Checkpoint{Epoch: e, Root: target})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -562,7 +562,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||
func (s *Service) runLateBlockTasks() {
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Failed to wait for initial sync")
|
||||
log.WithError(err).Error("failed to wait for initial sync")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -737,10 +737,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine slot start time: %w", err)
|
||||
}
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
@@ -858,10 +855,7 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
nc := s.blobNotifiers.forRoot(root, block.Slot())
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine slot start time: %w", err)
|
||||
}
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
@@ -912,7 +906,7 @@ func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
// it also updates the next slot cache and the proposer index cache to deal with skipped slots.
|
||||
func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
currentSlot := s.CurrentSlot()
|
||||
if currentSlot == s.HeadSlot() {
|
||||
if s.CurrentSlot() == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
@@ -933,10 +927,10 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -950,7 +944,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if attribute.IsEmpty() {
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("unable to retrieve head block to fire payload attributes event")
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
return slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
}
|
||||
|
||||
// getFCUArgs returns the arguments to call forkchoice update
|
||||
@@ -45,7 +45,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
@@ -68,11 +68,11 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
@@ -134,6 +134,9 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
if err := s.processLightClientUpdate(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client update")
|
||||
}
|
||||
if err := s.processLightClientBootstrap(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client bootstrap")
|
||||
}
|
||||
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client optimistic update")
|
||||
}
|
||||
@@ -186,7 +189,47 @@ func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(attestedState.Slot()))
|
||||
|
||||
return s.lcStore.SaveLightClientUpdate(cfg.ctx, period, update)
|
||||
oldUpdate, err := s.cfg.BeaconDB.LightClientUpdate(cfg.ctx, period)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get current light client update")
|
||||
}
|
||||
|
||||
if oldUpdate == nil {
|
||||
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
|
||||
isNewUpdateBetter, err := lightclient.IsBetterUpdate(update, oldUpdate)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
|
||||
if isNewUpdateBetter {
|
||||
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
|
||||
return nil
|
||||
}
|
||||
|
||||
// processLightClientBootstrap saves a light client bootstrap for this block
|
||||
// when feature flag is enabled.
|
||||
func (s *Service) processLightClientBootstrap(cfg *postBlockProcessConfig) error {
|
||||
blockRoot := cfg.roblock.Root()
|
||||
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(cfg.ctx, s.CurrentSlot(), cfg.postState, cfg.roblock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create light client bootstrap")
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveLightClientBootstrap(cfg.ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client bootstrap")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientFinalityUpdate(
|
||||
@@ -389,7 +432,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the future.
|
||||
if err := slots.VerifyTime(s.genesisTime, b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -484,7 +527,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
// is meant to be asynchronous and run in the background rather than being
|
||||
// tied to the execution of a block.
|
||||
if err := s.cfg.StateGen.MigrateToCold(s.ctx, fRoot); err != nil {
|
||||
log.WithError(err).Error("Could not migrate to cold")
|
||||
log.WithError(err).Error("could not migrate to cold")
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
@@ -573,7 +616,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not fetch finalized state")
|
||||
log.WithError(err).Error("could not fetch finalized state")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -591,7 +634,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not cast eth1 deposit index")
|
||||
log.WithError(err).Error("could not cast eth1 deposit index")
|
||||
return
|
||||
}
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
@@ -600,12 +643,12 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
finalizedEth1DepIdx := eth1DepositIndex - 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx), common.Hash(finalizedState.Eth1Data().BlockHash),
|
||||
0 /* Setting a zero value as we have no access to block height */); err != nil {
|
||||
log.WithError(err).Error("Could not insert finalized deposits")
|
||||
log.WithError(err).Error("could not insert finalized deposits")
|
||||
return
|
||||
}
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
|
||||
log.WithError(err).Error("Could not prune deposit proofs")
|
||||
log.WithError(err).Error("could not prune deposit proofs")
|
||||
}
|
||||
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
|
||||
// to the provided eth1 deposit index.
|
||||
|
||||
@@ -59,8 +59,11 @@ func Test_pruneAttsFromPool_Electra(t *testing.T) {
|
||||
cfg.TargetCommitteeSize = 8
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
s := testServiceNoDB(t)
|
||||
s.cfg.AttPool = kv.NewAttCaches()
|
||||
s := Service{
|
||||
cfg: &config{
|
||||
AttPool: kv.NewAttCaches(),
|
||||
},
|
||||
}
|
||||
|
||||
data := ðpb.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
@@ -468,8 +471,7 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt
|
||||
}
|
||||
|
||||
func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
svc := testServiceNoDB(t)
|
||||
svc.genesisTime = prysmTime.Now().Add(1 * time.Hour)
|
||||
svc := Service{genesisTime: prysmTime.Now().Add(1 * time.Hour)}
|
||||
|
||||
slot := svc.CurrentSlot()
|
||||
require.Equal(t, primitives.Slot(0), slot, "Unexpected slot")
|
||||
@@ -1357,7 +1359,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1378,7 +1380,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1399,7 +1401,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1548,7 +1550,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1568,7 +1570,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1589,7 +1591,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1740,7 +1742,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1761,7 +1763,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1812,7 +1814,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
// import blocks 13 through 18 to justify 12
|
||||
invalidRoots := make([][32]byte, 19-13)
|
||||
for i := 13; i < 19; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1907,7 +1909,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// Import blocks 21--30 (Epoch 3 was not enough to justify 2)
|
||||
for i := 21; i < 30; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
@@ -1993,8 +1995,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -2014,7 +2015,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -2063,7 +2064,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
// import blocks 13 through 18 to justify 12
|
||||
for i := 13; i < 19; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -2306,7 +2307,6 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
service, tr := minimalTestService(t)
|
||||
service.SetGenesisTime(time.Now())
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
@@ -2319,20 +2319,17 @@ func TestFillMissingBlockPayloadId_PrepareAllPayloads(t *testing.T) {
|
||||
defer resetCfg()
|
||||
|
||||
service, tr := minimalTestService(t)
|
||||
service.SetGenesisTime(time.Now())
|
||||
service.SetForkChoiceGenesisTime(time.Now())
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot primitives.Slot, delay time.Duration) {
|
||||
now := time.Now()
|
||||
slotDuration := time.Duration(slot) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
genesis := now.Add(-slotDuration - delay)
|
||||
s.SetGenesisTime(genesis)
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(genesis)
|
||||
func driftGenesisTime(s *Service, slot, delay int64) {
|
||||
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) + delay
|
||||
newTime := time.Unix(time.Now().Unix()-offset, 0)
|
||||
s.SetGenesisTime(newTime)
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(newTime.Unix()))
|
||||
}
|
||||
|
||||
func TestMissingBlobIndices(t *testing.T) {
|
||||
@@ -2713,16 +2710,61 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
|
||||
s, tr := minimalTestService(t, WithLCStore())
|
||||
s, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
t.Run("No old update", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().VersionToForkEpochMap()[testVersion])*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Altair)
|
||||
})
|
||||
|
||||
t.Run("New update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -2753,66 +2795,517 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
t.Run("no old update", func(t *testing.T) {
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the light client update is saved
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), testVersion)
|
||||
})
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("new update is better", func(t *testing.T) {
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), testVersion)
|
||||
})
|
||||
|
||||
t.Run("old update is better", func(t *testing.T) {
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// set a better sync aggregate
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: scb,
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
})
|
||||
|
||||
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, oldUpdate, u)
|
||||
require.Equal(t, u.Version(), testVersion)
|
||||
})
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Altair)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("Old update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: scb,
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
})
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, oldUpdate, u)
|
||||
require.Equal(t, u.Version(), version.Altair)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
t.Run("No old update", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Capella)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Capella)
|
||||
})
|
||||
|
||||
t.Run("New update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Capella)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Capella)
|
||||
})
|
||||
|
||||
t.Run("Old update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Capella)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: scb,
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
})
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, oldUpdate, u)
|
||||
require.Equal(t, u.Version(), version.Capella)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
t.Run("No old update", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Deneb)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Deneb)
|
||||
})
|
||||
|
||||
t.Run("New update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Deneb)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Deneb)
|
||||
})
|
||||
|
||||
t.Run("Old update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Deneb)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: scb,
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
})
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, oldUpdate, u)
|
||||
require.Equal(t, u.Version(), version.Deneb)
|
||||
})
|
||||
})
|
||||
|
||||
reset()
|
||||
}
|
||||
|
||||
func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
|
||||
s, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
require.NoError(t, s.processLightClientBootstrap(cfg))
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.cfg.BeaconDB.LightClientBootstrap(ctx, currentBlockRoot[:])
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
stateRoot, err := l.State.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), version.Altair)
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Capella)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
require.NoError(t, s.processLightClientBootstrap(cfg))
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.cfg.BeaconDB.LightClientBootstrap(ctx, currentBlockRoot[:])
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
stateRoot, err := l.State.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), version.Capella)
|
||||
})
|
||||
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Deneb)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
require.NoError(t, s.processLightClientBootstrap(cfg))
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.cfg.BeaconDB.LightClientBootstrap(ctx, currentBlockRoot[:])
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
stateRoot, err := l.State.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), version.Deneb)
|
||||
})
|
||||
|
||||
reset()
|
||||
}
|
||||
|
||||
type testIsAvailableParams struct {
|
||||
|
||||
@@ -43,7 +43,7 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := slots.ValidateClock(ss, s.genesisTime); err != nil {
|
||||
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
|
||||
@@ -69,7 +69,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
if s.genesisTime.IsZero() {
|
||||
@@ -103,7 +103,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
log.WithError(err).Error("could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
@@ -144,7 +144,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
log.WithError(err).Error("could not get head block")
|
||||
return
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
@@ -161,7 +161,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.GetData().Slot + 1
|
||||
if err := slots.VerifyTime(s.genesisTime, nextSlot, disparity); err != nil {
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(time.Now().Add(-1*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)))
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -183,7 +183,7 @@ func (s *Service) updateCheckpoints(
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
|
||||
log.WithError(err).Error("Could not report epoch metrics")
|
||||
log.WithError(err).Error("could not report epoch metrics")
|
||||
}
|
||||
}
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {
|
||||
@@ -283,7 +283,7 @@ func (s *Service) reportPostBlockProcessing(
|
||||
// Log block sync status.
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
@@ -300,30 +300,15 @@ func (s *Service) reportPostBlockProcessing(
|
||||
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
|
||||
// Send finalization event
|
||||
go func() {
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
|
||||
// Insert finalized deposits into finalized deposit trie
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDepositsAndPrune(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
// Save a light client bootstrap for the finalized checkpoint
|
||||
go func() {
|
||||
err := s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not save light client bootstrap by block root")
|
||||
} else {
|
||||
log.Debugf("Saved light client bootstrap for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
|
||||
@@ -8,10 +8,7 @@ import (
|
||||
blockchainTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -21,7 +18,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -342,7 +338,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
s.SetGenesisTime(time.Now())
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
@@ -352,9 +348,8 @@ func TestHandleCaches_EnablingLargeSize(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
helpers.ClearCache()
|
||||
require.NoError(t, s.handleCaches())
|
||||
assert.LogsContain(t, hook, "Expanding committee cache size")
|
||||
}
|
||||
@@ -567,48 +562,3 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
|
||||
s, tr := minimalTestService(t, WithLCStore())
|
||||
ctx := tr.ctx
|
||||
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock))
|
||||
finalizedBlockRoot, err := l.FinalizedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, l.FinalizedState, finalizedBlockRoot))
|
||||
|
||||
cp := l.AttestedState.FinalizedCheckpoint()
|
||||
require.DeepSSZEqual(t, finalizedBlockRoot, [32]byte(cp.Root))
|
||||
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: cp.Epoch, Root: [32]byte(cp.Root)}))
|
||||
|
||||
sss, err := s.cfg.BeaconDB.State(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sss)
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.FinalizedState)
|
||||
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, btst, b)
|
||||
require.Equal(t, b.Version(), testVersion)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +69,6 @@ type Service struct {
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
startWaitingDataColumnSidecars chan bool // for testing purposes only
|
||||
syncCommitteeHeadState *cache.SyncCommitteeHeadStateCache
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -181,15 +180,14 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
syncCommitteeHeadState: cache.NewSyncCommitteeHeadState(),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
@@ -268,7 +266,7 @@ func (s *Service) Status() error {
|
||||
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
|
||||
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = saved.GenesisTime()
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
|
||||
originRoot, err := s.originRootFromSavedState(s.ctx)
|
||||
@@ -373,7 +371,7 @@ func (s *Service) startFromExecutionChain() error {
|
||||
if e.Type == statefeed.ChainStarted {
|
||||
data, ok := e.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("Event data is not type *statefeed.ChainStartedData")
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
@@ -410,7 +408,7 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti
|
||||
|
||||
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
|
||||
log.WithError(err).Fatal("Failed to initialize blockchain service from execution start event")
|
||||
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -424,7 +422,7 @@ func (s *Service) initializeBeaconChain(
|
||||
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
s.genesisTime = genesisTime.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
s.genesisTime = genesisTime
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
@@ -485,7 +483,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "Could not set optimistic status of genesis block to false")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
|
||||
if err := s.setHead(&head{
|
||||
genesisBlkRoot,
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -16,7 +17,10 @@ func init() {
|
||||
}
|
||||
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -127,7 +127,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -164,7 +164,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
@@ -238,7 +238,7 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -345,8 +345,12 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
s := testServiceWithDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, fc), ForkChoiceStore: fc},
|
||||
}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
r, err := blk.HashTreeRoot()
|
||||
@@ -367,7 +371,10 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
|
||||
func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -384,16 +391,22 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
s := testServiceWithDB(t)
|
||||
s.initSyncBlocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
}
|
||||
bb := util.NewBeaconBlock()
|
||||
r, err := bb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveInitSyncBlock(s.ctx, r, wsb))
|
||||
require.NoError(t, s.saveInitSyncBlock(ctx, r, wsb))
|
||||
require.NoError(t, s.Stop())
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
@@ -415,8 +428,11 @@ func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
ctx := b.Context()
|
||||
s := testServiceWithDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
blk := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
@@ -432,7 +448,10 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
|
||||
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
ctx := b.Context()
|
||||
s := testServiceWithDB(b)
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
blk := util.NewBeaconBlock()
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -38,7 +38,7 @@ func (s *Service) startupHeadRoot() [32]byte {
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
|
||||
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
@@ -46,7 +46,7 @@ func (s *Service) startupHeadRoot() [32]byte {
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
|
||||
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
@@ -64,16 +64,16 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
}
|
||||
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block, starting with finalized block as head")
|
||||
log.WithError(err).Error("could not get head block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("Head block is older than finalized block, starting with finalized block as head")
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
chain, err := s.buildForkchoiceChain(s.ctx, blk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
|
||||
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
@@ -170,6 +170,6 @@ func (s *Service) setupForkchoiceCheckpoints() error {
|
||||
Root: fRoot}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
|
||||
require.LogsContain(t, hook, "could not get head block root, starting with finalized block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
@@ -25,9 +24,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -87,7 +84,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar, _ ...chan<- bool) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@@ -112,10 +109,8 @@ type testServiceRequirements struct {
|
||||
|
||||
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
|
||||
ctx := t.Context()
|
||||
genesis := time.Now().Add(-1 * 4 * time.Duration(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().SecondsPerSlot)) * time.Second) // Genesis was 4 epochs ago.
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
fcs.SetGenesisTime(genesis)
|
||||
sg := stategen.New(beaconDB, fcs)
|
||||
notif := &mockBeaconNode{}
|
||||
fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot)
|
||||
@@ -154,7 +149,6 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccessor{}),
|
||||
WithLightClientStore(&lightclient.Store{}),
|
||||
WithGenesisTime(genesis),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -555,11 +555,11 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not update head")
|
||||
logrus.WithError(err).Error("could not update head")
|
||||
}
|
||||
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not insert node to forkchoice")
|
||||
logrus.WithError(err).Error("could not insert node to forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -632,11 +632,6 @@ func (s *ChainService) CachedHeadRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SafeBlockHash mocks the same method in the chain service
|
||||
func (s *ChainService) SafeBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// GetProposerHead mocks the same method in the chain service
|
||||
func (s *ChainService) GetProposerHead() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
@@ -646,7 +641,7 @@ func (s *ChainService) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime mocks the same method in the chain service
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp time.Time) {
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
s.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ package blockchain
|
||||
import (
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -15,8 +17,11 @@ import (
|
||||
)
|
||||
|
||||
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1792480
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -63,15 +68,15 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := s.cfg.BeaconDB
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
|
||||
require.NoError(t, err)
|
||||
s.cfg.WeakSubjectivityCheckpt = tt.checkpt
|
||||
s.wsVerifier = wv
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
require.NoError(t, err)
|
||||
fcs := doublylinkedtree.New()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt, ForkChoiceStore: fcs},
|
||||
wsVerifier: wv,
|
||||
}
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(t.Context(), cp.Epoch)
|
||||
if tt.wantErr == nil {
|
||||
|
||||
@@ -24,7 +24,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
|
||||
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
@@ -68,7 +68,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
log.WithError(err).Error("Failed to check builder status")
|
||||
} else {
|
||||
log.WithField("endpoint", s.c.NodeURL()).Info("Builder has been configured")
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
|
||||
}
|
||||
}
|
||||
@@ -87,7 +87,7 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock submits a blinded block to the builder relay network.
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
|
||||
@@ -29,7 +29,6 @@ type MockBuilderService struct {
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
BlobBundleV2 *v1.BlobsBundleV2
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
@@ -47,7 +46,7 @@ func (s *MockBuilderService) Configured() bool {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
switch b.Version() {
|
||||
case version.Bellatrix:
|
||||
w, err := blocks.WrappedExecutionPayload(s.Payload)
|
||||
@@ -67,16 +66,6 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
|
||||
}
|
||||
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
|
||||
case version.Fulu:
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload for fulu")
|
||||
}
|
||||
// For Fulu, return BlobsBundleV2 if available, otherwise regular BlobsBundle
|
||||
if s.BlobBundleV2 != nil {
|
||||
return w, s.BlobBundleV2, s.ErrSubmitBlindedBlock
|
||||
}
|
||||
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
|
||||
default:
|
||||
return nil, nil, errors.New("unknown block version for mocking")
|
||||
}
|
||||
|
||||
2
beacon-chain/cache/sync_committee.go
vendored
2
beacon-chain/cache/sync_committee.go
vendored
@@ -178,7 +178,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if clearCount != s.cleared.Load() {
|
||||
log.Warn("Cache rotated during async committee update operation - abandoning cache update")
|
||||
log.Warn("cache rotated during async committee update operation - abandoning cache update")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ func Test_BaseReward(t *testing.T) {
|
||||
valIdx: 2,
|
||||
st: genState(1),
|
||||
want: 0,
|
||||
errString: "index 2 out of bounds",
|
||||
errString: "validator index 2 does not exist",
|
||||
},
|
||||
{
|
||||
name: "active balance is 32eth",
|
||||
@@ -89,7 +89,7 @@ func Test_BaseRewardWithTotalBalance(t *testing.T) {
|
||||
valIdx: 2,
|
||||
activeBalance: 1,
|
||||
want: 0,
|
||||
errString: "index 2 out of bounds",
|
||||
errString: "validator index 2 does not exist",
|
||||
},
|
||||
{
|
||||
name: "active balance is 1",
|
||||
|
||||
@@ -217,15 +217,15 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
|
||||
// ValidateSyncMessageTime validates sync message to ensure that the provided slot is valid.
|
||||
// Spec: [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot
|
||||
func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
if err := slots.ValidateClock(slot, genesisTime); err != nil {
|
||||
if err := slots.ValidateClock(slot, uint64(genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
}
|
||||
messageTime, err := slots.StartTime(genesisTime, slot)
|
||||
messageTime, err := slots.ToTime(uint64(genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(genesisTime)
|
||||
slotStartTime, err := slots.StartTime(genesisTime, currentSlot)
|
||||
currentSlot := slots.Since(genesisTime)
|
||||
slotStartTime, err := slots.ToTime(uint64(genesisTime.Unix()), currentSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -119,7 +119,6 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = Eth1DataHasEnoughSupport(s, eth1data)
|
||||
_ = err
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -320,7 +319,6 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = VerifyDeposit(s, deposit)
|
||||
_ = err
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -384,6 +382,5 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
_ = err
|
||||
err = VerifyExitAndSignature(val, s, ve)
|
||||
_ = err
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,7 +162,7 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
|
||||
if !bytes.Equal(payload.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
t, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -501,7 +501,7 @@ func Test_ValidatePayload(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
ts, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
ts, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -551,7 +551,7 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
ts, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
ts, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -627,7 +627,7 @@ func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
ts, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
ts, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -697,7 +697,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
ts, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
ts, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -43,7 +43,7 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateCapella{
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -59,7 +59,7 @@ func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateDeneb{
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -194,11 +194,11 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if IsValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("Failed to find source validator index")
|
||||
log.Error("failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("Failed to switch to compounding validator")
|
||||
log.WithError(err).Error("failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -280,7 +280,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
}
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
log.WithError(err).Error("failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if bal > 0 {
|
||||
@@ -290,7 +290,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
// Initiate the exit of the source validator.
|
||||
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to compute consolidation epoch")
|
||||
log.WithError(err).Error("failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
|
||||
@@ -208,7 +208,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -36,7 +36,7 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateBellatrix{
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -111,7 +111,7 @@ func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.Be
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateFulu{
|
||||
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -36,6 +36,7 @@ go_library(
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
|
||||
@@ -124,18 +125,18 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx primitives.
|
||||
// valid_attestation_slot = 101
|
||||
//
|
||||
// In the attestation must be within the range of 95 to 102 in the example above.
|
||||
func ValidateAttestationTime(attSlot primitives.Slot, genesis time.Time, clockDisparity time.Duration) error {
|
||||
attTime, err := slots.StartTime(genesis, attSlot)
|
||||
func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
attTime, err := slots.ToTime(uint64(genesisTime.Unix()), attSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(genesis)
|
||||
currentSlot := slots.Since(genesisTime)
|
||||
|
||||
// When receiving an attestation, it can be from the future.
|
||||
// so the upper bounds is set to now + clockDisparity(SECONDS_PER_SLOT * 2).
|
||||
// But when sending an attestation, it should not be in future slot.
|
||||
// so the upper bounds is set to now + clockDisparity(MAXIMUM_GOSSIP_CLOCK_DISPARITY).
|
||||
upperBounds := time.Now().Add(clockDisparity)
|
||||
upperBounds := prysmTime.Now().Add(clockDisparity)
|
||||
|
||||
// An attestation cannot be older than the current slot - attestation propagation slot range
|
||||
// with a minor tolerance for peer clock disparity.
|
||||
@@ -143,7 +144,7 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesis time.Time, clockDi
|
||||
if currentSlot > params.BeaconConfig().AttestationPropagationSlotRange {
|
||||
lowerBoundsSlot = currentSlot - params.BeaconConfig().AttestationPropagationSlotRange
|
||||
}
|
||||
lowerTime, err := slots.StartTime(genesis, lowerBoundsSlot)
|
||||
lowerTime, err := slots.ToTime(uint64(genesisTime.Unix()), lowerBoundsSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -186,7 +187,7 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesis time.Time, clockDi
|
||||
// VerifyCheckpointEpoch is within current epoch and previous epoch
|
||||
// with respect to current time. Returns true if it's within, false if it's not.
|
||||
func VerifyCheckpointEpoch(c *ethpb.Checkpoint, genesis time.Time) bool {
|
||||
currentSlot := slots.CurrentSlot(genesis)
|
||||
currentSlot := slots.CurrentSlot(uint64(genesis.Unix()))
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
var prevEpoch primitives.Epoch
|
||||
|
||||
@@ -217,10 +217,10 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
{
|
||||
name: "attestation.slot is well beyond current slot",
|
||||
args: args{
|
||||
attSlot: 1024,
|
||||
genesisTime: time.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
attSlot: 1 << 32,
|
||||
genesisTime: prysmTime.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
wantedErr: "attestation slot 1024 not within attestation propagation range of 0 to 15 (current slot)",
|
||||
wantedErr: "attestation slot 4294967296 not within attestation propagation range of 0 to 15 (current slot)",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -669,11 +669,11 @@ func ComputeCommittee(
|
||||
// InitializeProposerLookahead computes the list of the proposer indices for the next MIN_SEED_LOOKAHEAD + 1 epochs.
|
||||
func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]uint64, error) {
|
||||
lookAhead := make([]uint64, 0, uint64(params.BeaconConfig().MinSeedLookahead+1)*uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
for i := range params.BeaconConfig().MinSeedLookahead + 1 {
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch+i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch+i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute proposer indices")
|
||||
|
||||
@@ -916,46 +916,3 @@ func TestAssignmentForValidator(t *testing.T) {
|
||||
require.DeepEqual(t, &helpers.LiteAssignment{}, got)
|
||||
})
|
||||
}
|
||||
|
||||
// Regression for #15450
|
||||
func TestInitializeProposerLookahead_RegressionTest(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 128)
|
||||
// Set some validators to activate in epoch 3 instead of 0
|
||||
validators := state.Validators()
|
||||
for i := 64; i < 128; i++ {
|
||||
validators[i].ActivationEpoch = 3
|
||||
}
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(64)) // epoch 2
|
||||
epoch := slots.ToEpoch(state.Slot())
|
||||
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, state, epoch)
|
||||
require.NoError(t, err)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
for epochOffset := primitives.Epoch(0); epochOffset < 2; epochOffset++ {
|
||||
targetEpoch := epoch + epochOffset
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(ctx, state, targetEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedProposers, err := helpers.PrecomputeProposerIndices(state, activeIndices, targetEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
startIdx := int(epochOffset) * slotsPerEpoch
|
||||
endIdx := startIdx + slotsPerEpoch
|
||||
actualProposers := proposerLookahead[startIdx:endIdx]
|
||||
|
||||
expectedUint64 := make([]uint64, len(expectedProposers))
|
||||
for i, proposer := range expectedProposers {
|
||||
expectedUint64[i] = uint64(proposer)
|
||||
}
|
||||
|
||||
// This assertion would fail with the original bug:
|
||||
for i, expected := range expectedUint64 {
|
||||
require.Equal(t, expected, actualProposers[i],
|
||||
"Proposer index mismatch at slot %d in epoch %d", i, targetEpoch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.ErrorContains(t, "index 12390192 out of bounds", err)
|
||||
require.ErrorContains(t, "validator index 12390192 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -187,7 +187,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := helpers.IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.ErrorContains(t, "index 120391029 out of bounds", err)
|
||||
require.ErrorContains(t, "validator index 120391029 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -287,7 +287,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.ErrorContains(t, "index 129301923 out of bounds", err)
|
||||
require.ErrorContains(t, "validator index 129301923 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
@@ -374,7 +374,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.ErrorContains(t, "index 21093019 out of bounds", err)
|
||||
require.ErrorContains(t, "validator index 21093019 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
|
||||
@@ -561,6 +561,17 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
},
|
||||
want: []primitives.ValidatorIndex{0, 2, 3},
|
||||
},*/
|
||||
{
|
||||
name: "impossible_zero_validators", // Regression test for issue #13051
|
||||
args: args{
|
||||
state: ðpb.BeaconState{
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
Validators: make([]*ethpb.Validator, 0),
|
||||
},
|
||||
epoch: 10,
|
||||
},
|
||||
wantedErr: "state has nil validator slice",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -10,7 +10,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -26,7 +25,6 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,146 +1,20 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrLightClientBootstrapNotFound = errors.New("light client bootstrap not found")
|
||||
|
||||
type Store struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
beaconDB iface.HeadAccessDatabase
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate
|
||||
}
|
||||
|
||||
func NewLightClientStore(db iface.HeadAccessDatabase) *Store {
|
||||
return &Store{
|
||||
beaconDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (interfaces.LightClientBootstrap, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client bootstrap from the database
|
||||
bootstrap, err := s.beaconDB.LightClientBootstrap(ctx, blockRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bootstrap == nil { // not found
|
||||
return nil, ErrLightClientBootstrapNotFound
|
||||
}
|
||||
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
|
||||
state, err := s.beaconDB.State(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch state for block root %x", blockRoot)
|
||||
}
|
||||
if state == nil {
|
||||
return errors.Errorf("failed to fetch state for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
bootstrap, err := NewLightClientBootstrapFromBeaconState(ctx, state.Slot(), state, blk)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client bootstrap for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
// Save the light client bootstrap to the database
|
||||
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) ([]interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client updatesMap from the database
|
||||
updatesMap, err := s.beaconDB.LightClientUpdates(ctx, startPeriod, endPeriod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var updates []interfaces.LightClientUpdate
|
||||
for i := startPeriod; i <= endPeriod; i++ {
|
||||
update, ok := updatesMap[i]
|
||||
if !ok {
|
||||
// Only return the first contiguous range of updates
|
||||
break
|
||||
}
|
||||
updates = append(updates, update)
|
||||
}
|
||||
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client update for the given period from the database
|
||||
update, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update interfaces.LightClientUpdate) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
oldUpdate, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get current light client update")
|
||||
}
|
||||
|
||||
if oldUpdate == nil {
|
||||
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
|
||||
isNewUpdateBetter, err := IsBetterUpdate(update, oldUpdate)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
|
||||
if isNewUpdateBetter {
|
||||
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
|
||||
return nil
|
||||
func NewLightClientStore() *Store {
|
||||
return &Store{}
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate) {
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
errors "github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -122,7 +122,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
|
||||
}
|
||||
log.WithFields(lf).WithFields(logging.BlockFieldsFromBlob(sidecars[0])).
|
||||
Debug("Invalid BlobSidecars received")
|
||||
Debug("invalid BlobSidecars received")
|
||||
}
|
||||
return errors.Wrapf(err, "invalid BlobSidecars received for block %#x", root)
|
||||
}
|
||||
|
||||
@@ -255,7 +255,7 @@ func pruneBefore(before primitives.Epoch, l fsLayout) (map[primitives.Epoch]*pru
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Error("Encountered unhandled error during pruning")
|
||||
log.WithError(err).Error("encountered unhandled error during pruning")
|
||||
return nil, errors.Wrap(errPruneFailed, err.Error())
|
||||
}
|
||||
if ident.epoch >= before {
|
||||
|
||||
@@ -564,6 +564,14 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
|
||||
cfg.EpochsPerSyncCommitteePeriod = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
versionToForkEpoch := map[int]primitives.Epoch{
|
||||
version.Altair: params.BeaconConfig().AltairForkEpoch,
|
||||
version.Bellatrix: params.BeaconConfig().BellatrixForkEpoch,
|
||||
version.Capella: params.BeaconConfig().CapellaForkEpoch,
|
||||
version.Deneb: params.BeaconConfig().DenebForkEpoch,
|
||||
version.Electra: params.BeaconConfig().ElectraForkEpoch,
|
||||
}
|
||||
|
||||
db := setupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -575,7 +583,7 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
|
||||
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().VersionToForkEpochMap()[testVersion]) * uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(versionToForkEpoch[testVersion]) * uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
|
||||
|
||||
@@ -60,13 +60,13 @@ type Service struct {
|
||||
initSyncWaiter func() error
|
||||
}
|
||||
|
||||
func New(ctx context.Context, db iface.Database, genesisTime time.Time, initSyncWaiter, backfillWaiter func() error, opts ...ServiceOption) (*Service, error) {
|
||||
func New(ctx context.Context, db iface.Database, genesisTime uint64, initSyncWaiter, backfillWaiter func() error, opts ...ServiceOption) (*Service, error) {
|
||||
p := &Service{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
ps: pruneStartSlotFunc(helpers.MinEpochsForBlockRequests() + 1), // Default retention epochs is MIN_EPOCHS_FOR_BLOCK_REQUESTS + 1 from the current slot.
|
||||
done: make(chan struct{}),
|
||||
slotTicker: slots.NewSlotTicker(slots.UnsafeStartTime(genesisTime, 0), params.BeaconConfig().SecondsPerSlot),
|
||||
slotTicker: slots.NewSlotTicker(slots.StartTime(genesisTime, 0), params.BeaconConfig().SecondsPerSlot),
|
||||
initSyncWaiter: initSyncWaiter,
|
||||
backfillWaiter: backfillWaiter,
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func TestPruner_PruningConditions(t *testing.T) {
|
||||
if !tt.backfillCompleted {
|
||||
backfillWaiter = waiter
|
||||
}
|
||||
p, err := New(ctx, beaconDB, time.Now(), initSyncWaiter, backfillWaiter, WithSlotTicker(slotTicker))
|
||||
p, err := New(ctx, beaconDB, uint64(time.Now().Unix()), initSyncWaiter, backfillWaiter, WithSlotTicker(slotTicker))
|
||||
require.NoError(t, err)
|
||||
|
||||
go p.Start()
|
||||
@@ -100,7 +100,7 @@ func TestPruner_PruneSuccess(t *testing.T) {
|
||||
p, err := New(
|
||||
ctx,
|
||||
beaconDB,
|
||||
time.Now(),
|
||||
uint64(time.Now().Unix()),
|
||||
nil,
|
||||
nil,
|
||||
WithSlotTicker(slotTicker),
|
||||
|
||||
@@ -620,7 +620,7 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
blobIndex := kzgIndexes[i]
|
||||
proof, err := blocks.MerkleProofKZGCommitment(blockBody, blobIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", blobIndex).Error("Failed to get Merkle proof for KZG commitment")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to get Merkle proof for KZG commitment")
|
||||
continue
|
||||
}
|
||||
sidecar := ðpb.BlobSidecar{
|
||||
@@ -634,14 +634,14 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
|
||||
roBlob, err := blocks.NewROBlobWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", blobIndex).Error("Failed to create RO blob with root")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to create RO blob with root")
|
||||
continue
|
||||
}
|
||||
|
||||
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
|
||||
verifiedBlob, err := v.VerifiedROBlob()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", blobIndex).Error("Failed to verify RO blob")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to verify RO blob")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -682,7 +682,7 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(blobAndProofV2s) == 0 {
|
||||
log.Debug("No blobs returned from execution client")
|
||||
log.Debug("No blobs returned from EL")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -715,7 +715,7 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
log.Debug("Data columns successfully reconstructed from the execution client")
|
||||
log.Debug("Data columns successfully reconstructed from the execution client.")
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
}
|
||||
|
||||
@@ -248,14 +248,14 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
for i := range s.chainStartData.ChainstartDeposits {
|
||||
proof, err := s.depositTrie.MerkleProof(i)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to generate deposit proof")
|
||||
log.WithError(err).Error("unable to generate deposit proof")
|
||||
}
|
||||
s.chainStartData.ChainstartDeposits[i].Proof = proof
|
||||
}
|
||||
|
||||
root, err := s.depositTrie.HashTreeRoot()
|
||||
if err != nil { // This should never happen.
|
||||
log.WithError(err).Error("Unable to determine root of deposit trie, aborting chain start")
|
||||
log.WithError(err).Error("unable to determine root of deposit trie, aborting chain start")
|
||||
return
|
||||
}
|
||||
s.chainStartData.Eth1Data = ðpb.Eth1Data{
|
||||
|
||||
@@ -557,8 +557,8 @@ func (s *Service) initPOWService() {
|
||||
}
|
||||
}
|
||||
// Handle edge case with embedded genesis state by fetching genesis header to determine
|
||||
// its height only if the deposit requests have not started yet (Pre Pectra EIP-6110 behavior).
|
||||
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 && !s.depositRequestsStarted {
|
||||
// its height.
|
||||
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 {
|
||||
genHash := common.BytesToHash(s.chainStartData.Eth1Data.BlockHash)
|
||||
genBlock := s.chainStartData.GenesisBlock
|
||||
// In the event our provided chainstart data references a non-existent block hash,
|
||||
@@ -711,7 +711,7 @@ func (s *Service) cacheBlockHeaders(start, end uint64) error {
|
||||
// Determines the earliest voting block from which to start caching all our previous headers from.
|
||||
func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock uint64) (uint64, error) {
|
||||
genesisTime := s.chainStartData.GenesisTime
|
||||
currSlot := slots.CurrentSlot(time.Unix(int64(genesisTime), 0)) // lint:ignore uintcast -- Genesis time will never exceed int64 in seconds.
|
||||
currSlot := slots.CurrentSlot(genesisTime)
|
||||
|
||||
// In the event genesis has not occurred yet, we just request to go back follow_distance blocks.
|
||||
if genesisTime == 0 || currSlot == 0 {
|
||||
@@ -837,7 +837,7 @@ func (s *Service) validPowchainData(ctx context.Context) (*ethpb.ETH1ChainData,
|
||||
}
|
||||
s.chainStartData = ðpb.ChainStartData{
|
||||
Chainstarted: true,
|
||||
GenesisTime: uint64(genState.GenesisTime().Unix()),
|
||||
GenesisTime: genState.GenesisTime(),
|
||||
GenesisBlock: 0,
|
||||
Eth1Data: genState.Eth1Data(),
|
||||
ChainstartDeposits: make([]*ethpb.Deposit, 0),
|
||||
|
||||
@@ -29,7 +29,6 @@ func New() *ForkChoice {
|
||||
unrealizedFinalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
safeHeadRoot: [32]byte{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
@@ -71,115 +70,11 @@ func (f *ForkChoice) Head(
|
||||
|
||||
jc := f.JustifiedCheckpoint()
|
||||
fc := f.FinalizedCheckpoint()
|
||||
|
||||
currentSlot := slots.CurrentSlot(f.store.genesisTime)
|
||||
secondsSinceSlotStart, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute seconds since slot start")
|
||||
}
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: jc.Epoch,
|
||||
finalizedEpoch: fc.Epoch,
|
||||
currentSlot: currentSlot,
|
||||
secondsSinceSlotStart: secondsSinceSlotStart,
|
||||
committeeWeight: f.store.committeeWeight,
|
||||
pbRoot: f.store.proposerBoostRoot,
|
||||
pbValue: f.store.previousProposerBoostScore,
|
||||
}); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "Could not update best descendant")
|
||||
}
|
||||
h, err := f.store.head(ctx)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "Could not get head")
|
||||
}
|
||||
|
||||
// Return early if the head is not the highest received node before updating the safe head.
|
||||
if f.store.highestReceivedNode.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
if err := f.updateSafeHead(ctx); err != nil {
|
||||
log.WithError(err).Error("Could not update safe head")
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// updateSafeHead updates the safe head in the fork choice store.
|
||||
func (f *ForkChoice) updateSafeHead(
|
||||
ctx context.Context,
|
||||
) error {
|
||||
oldSafeHeadRoot := f.store.safeHeadRoot
|
||||
newSafeHeadRoot, err := f.store.safeHead(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not get safe head")
|
||||
}
|
||||
|
||||
// If the safe head has not changed, return early.
|
||||
if oldSafeHeadRoot == newSafeHeadRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update safe head
|
||||
f.store.safeHeadRoot = newSafeHeadRoot
|
||||
|
||||
f.logSafeHead(ctx, newSafeHeadRoot, oldSafeHeadRoot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ForkChoice) logSafeHead(ctx context.Context, newSafeHeadRoot [32]byte, oldSafeHeadRoot [32]byte) {
|
||||
newSafeHeadNode, ok := f.store.nodeByRoot[newSafeHeadRoot]
|
||||
if !ok || newSafeHeadNode == nil {
|
||||
log.WithError(ErrNilNode).Error("Could not find new safe head node")
|
||||
return
|
||||
}
|
||||
newSafeHeadSlot := newSafeHeadNode.slot
|
||||
currentSlot := slots.CurrentSlot(f.store.genesisTime)
|
||||
secondsSinceSlotStart, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute seconds since slot start")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentSlot": fmt.Sprintf("%d", currentSlot),
|
||||
"sinceSlotStartTime": fmt.Sprintf("%d", secondsSinceSlotStart.Milliseconds()),
|
||||
"newSafeHeadSlot": fmt.Sprintf("%d", newSafeHeadSlot),
|
||||
"newSafeHeadRoot": fmt.Sprintf("%#x", newSafeHeadRoot),
|
||||
"weight": fmt.Sprintf("%d", newSafeHeadNode.weight),
|
||||
}).Info("Safe head has changed")
|
||||
|
||||
// Update metrics.
|
||||
safeHeadSlotNumber.Set(float64(newSafeHeadSlot))
|
||||
|
||||
// Check if the safe head reorged.
|
||||
commonRoot, forkSlot, err := f.CommonAncestor(ctx, oldSafeHeadRoot, newSafeHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not find common ancestor root")
|
||||
return
|
||||
}
|
||||
|
||||
// The safe head has reorged. This is bad!
|
||||
if oldSafeHeadRoot != [32]byte{} && commonRoot != oldSafeHeadRoot {
|
||||
oldSafeHeadNode, ok := f.store.nodeByRoot[oldSafeHeadRoot]
|
||||
if !ok || oldSafeHeadNode == nil {
|
||||
log.WithError(ErrNilNode).Error("Could not find old safe head node")
|
||||
return
|
||||
}
|
||||
oldSafeHeadSlot := oldSafeHeadNode.slot
|
||||
dis := oldSafeHeadSlot + newSafeHeadSlot - 2*forkSlot
|
||||
dep := max(uint64(oldSafeHeadSlot-forkSlot), uint64(newSafeHeadSlot-forkSlot))
|
||||
log.WithFields(logrus.Fields{
|
||||
"oldSafeHeadSlot": fmt.Sprintf("%d", oldSafeHeadSlot),
|
||||
"oldSafeHeadRoot": fmt.Sprintf("%#x", oldSafeHeadRoot),
|
||||
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
|
||||
"distance": dis,
|
||||
"depth": dep,
|
||||
}).Error("Safe head reorg occurred")
|
||||
|
||||
safeHeadReorgDistance.Observe(float64(dis))
|
||||
safeHeadReorgDepth.Observe(float64(dep))
|
||||
safeHeadReorgCount.Inc()
|
||||
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(f.store.genesisTime), 0))
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
|
||||
}
|
||||
return f.store.head(ctx)
|
||||
}
|
||||
|
||||
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
|
||||
@@ -232,7 +127,7 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
|
||||
_, remErr := f.store.removeNode(ctx, node)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("Could not remove node")
|
||||
log.WithError(remErr).Error("could not remove node")
|
||||
}
|
||||
return errors.Wrap(err, "could not update checkpoints")
|
||||
}
|
||||
@@ -591,8 +486,8 @@ func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.B
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesisTime tracked by forkchoice
|
||||
func (f *ForkChoice) SetGenesisTime(genesis time.Time) {
|
||||
f.store.genesisTime = genesis.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
func (f *ForkChoice) SetGenesisTime(genesisTime uint64) {
|
||||
f.store.genesisTime = genesisTime
|
||||
}
|
||||
|
||||
// SetOriginRoot sets the genesis block root
|
||||
@@ -642,23 +537,6 @@ func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// SafeBlockHash returns the hash of the payload at the safe head
|
||||
func (f *ForkChoice) SafeBlockHash() [32]byte {
|
||||
switch params.BeaconConfig().SafeBlockAlgorithm {
|
||||
case "justified":
|
||||
return f.JustifiedPayloadBlockHash()
|
||||
case "fast-confirmation":
|
||||
safeHeadRoot := f.store.safeHeadRoot
|
||||
node, ok := f.store.nodeByRoot[safeHeadRoot]
|
||||
if !ok || node == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
default:
|
||||
return f.UnrealizedJustifiedPayloadBlockHash()
|
||||
}
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkchoice.
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
|
||||
jc := ðpb.Checkpoint{
|
||||
@@ -692,7 +570,6 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
|
||||
resp := &forkchoice2.Dump{
|
||||
JustifiedCheckpoint: jc,
|
||||
UnrealizedJustifiedCheckpoint: ujc,
|
||||
SafeHeadRoot: f.store.safeHeadRoot[:],
|
||||
FinalizedCheckpoint: fc,
|
||||
UnrealizedFinalizedCheckpoint: ufc,
|
||||
ProposerBoostRoot: f.store.proposerBoostRoot[:],
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
@@ -19,7 +20,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
|
||||
// prepareForkchoiceState prepares a beacon State with the given data to mock
|
||||
@@ -230,13 +230,7 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
|
||||
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
currentSlot: 6,
|
||||
secondsSinceSlotStart: 0,
|
||||
committeeWeight: f.store.committeeWeight,
|
||||
}))
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
|
||||
|
||||
r1 := [32]byte{'1'}
|
||||
@@ -697,7 +691,7 @@ func TestForkChoice_UpdateCheckpoints(t *testing.T) {
|
||||
fcs := setup(tt.justified.Epoch, tt.finalized.Epoch)
|
||||
fcs.store.justifiedCheckpoint = tt.justified
|
||||
fcs.store.finalizedCheckpoint = tt.finalized
|
||||
driftGenesisTime(fcs, tt.currentSlot, 0)
|
||||
fcs.store.genesisTime = uint64(time.Now().Unix()) - uint64(tt.currentSlot)*params.BeaconConfig().SecondsPerSlot
|
||||
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
|
||||
[32]byte{}, [32]byte{}, tt.finalized.Epoch, tt.finalized.Epoch)
|
||||
@@ -930,85 +924,3 @@ func TestForkChoice_CleanupInserting(t *testing.T) {
|
||||
require.NotNil(t, f.InsertNode(ctx, st, roblock))
|
||||
require.Equal(t, false, f.HasNode(roblock.Root()))
|
||||
}
|
||||
|
||||
func TestForkChoiceSafeHead(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
balances := []uint64{32, 32, 32, 32, 32, 32, 32, 32, 32, 32}
|
||||
f.balancesByRoot = func(context.Context, [32]byte) ([]uint64, error) {
|
||||
return balances, nil
|
||||
}
|
||||
require.NoError(t, f.updateJustifiedBalances(context.Background(), [32]byte{}))
|
||||
require.Equal(t, uint64(len(balances)), f.numActiveValidators)
|
||||
require.Equal(t, uint64(10), f.store.committeeWeight)
|
||||
require.DeepEqual(t, balances, f.justifiedBalances)
|
||||
proposerScoreBoost := params.BeaconConfig().ProposerScoreBoost
|
||||
require.Equal(t, uint64(40), proposerScoreBoost)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
require.Equal(t, primitives.Slot(32), slotsPerEpoch)
|
||||
|
||||
driftGenesisTime(f, primitives.Slot(11), 0)
|
||||
st, b, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, b))
|
||||
for i := 2; i < 10; i++ {
|
||||
st, b, err = prepareForkchoiceState(ctx, primitives.Slot(i), indexToHash(uint64(i)), indexToHash(uint64(i-1)), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, b))
|
||||
}
|
||||
// Add a node at slot 11 to ensure highest received node is at current slot
|
||||
st, b, err = prepareForkchoiceState(ctx, primitives.Slot(11), indexToHash(uint64(11)), indexToHash(uint64(9)), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, b))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
currentSlot primitives.Slot
|
||||
nodeBalances []uint64
|
||||
wantRoot [32]byte
|
||||
}{
|
||||
{
|
||||
name: "safeHead is head-1",
|
||||
currentSlot: primitives.Slot(11),
|
||||
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 14, 10},
|
||||
wantRoot: indexToHash(9),
|
||||
},
|
||||
{
|
||||
name: "safeHead is head-2",
|
||||
currentSlot: primitives.Slot(11),
|
||||
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
|
||||
wantRoot: indexToHash(9),
|
||||
},
|
||||
{
|
||||
name: "safeHead is head-3",
|
||||
currentSlot: primitives.Slot(11),
|
||||
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 10},
|
||||
wantRoot: indexToHash(7),
|
||||
},
|
||||
{
|
||||
name: "safeHead is justified",
|
||||
currentSlot: primitives.Slot(11),
|
||||
nodeBalances: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
wantRoot: params.BeaconConfig().ZeroHash,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
driftGenesisTime(f, tc.currentSlot, 0)
|
||||
require.Equal(t, tc.currentSlot, slots.CurrentSlot(f.store.genesisTime))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[params.BeaconConfig().ZeroHash].balance = tc.nodeBalances[0]
|
||||
for i := 1; i < 10; i++ {
|
||||
s.nodeByRoot[indexToHash(uint64(i))].balance = tc.nodeBalances[i]
|
||||
}
|
||||
s.nodeByRoot[indexToHash(uint64(11))].balance = tc.nodeBalances[10]
|
||||
|
||||
_, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
safeHead := f.store.safeHeadRoot
|
||||
require.Equal(t, tc.wantRoot, safeHead)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,28 +51,4 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
safeHeadSlotNumber = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "safe_head_slot",
|
||||
Help: "The slot number of the current safe head.",
|
||||
},
|
||||
)
|
||||
safeHeadReorgCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "safe_head_reorgs_total",
|
||||
Help: "Count the number of safe head reorgs",
|
||||
})
|
||||
safeHeadReorgDistance = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "safe_head_reorg_distance",
|
||||
Help: "Captures distance of safe head reorgs. Distance is defined as the number of blocks between the old safe head and the new safe head",
|
||||
Buckets: []float64{1, 2, 4, 8, 16, 32, 64},
|
||||
},
|
||||
)
|
||||
safeHeadReorgDepth = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "safe_head_reorg_depth",
|
||||
Help: "Captures depth of safe head reorgs. Depth is defined as the number of blocks between the safe heads and the common ancestor",
|
||||
Buckets: []float64{1, 2, 4, 8, 16, 32},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -3,34 +3,20 @@ package doublylinkedtree
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
forkchoice2 "github.com/OffchainLabs/prysm/v6/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessAttestationsThreshold is the amount of time after which we
|
||||
// ProcessAttestationsThreshold is the number of seconds after which we
|
||||
// process attestations for the current slot
|
||||
const ProcessAttestationsThreshold = 10 * time.Second
|
||||
const ProcessAttestationsThreshold = 10
|
||||
|
||||
type updateDescendantArgs struct {
|
||||
justifiedEpoch primitives.Epoch
|
||||
finalizedEpoch primitives.Epoch
|
||||
currentSlot primitives.Slot
|
||||
secondsSinceSlotStart time.Duration
|
||||
committeeWeight uint64
|
||||
pbRoot [32]byte
|
||||
pbValue uint64
|
||||
}
|
||||
|
||||
// applyWeightChanges recursively traverses a tree of nodes to update each node's total weight and
|
||||
// weight without proposer boost by summing the balance of the node and its children.
|
||||
// If the node matches a specific root (`pbRoot`), it subtracts a given boost value (`pbValue`) from the weight without boost,
|
||||
// ensuring the balance is sufficient. It also handles context cancellation and errors during recursion.
|
||||
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
childrenWeight := uint64(0)
|
||||
@@ -50,51 +36,14 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// maxWeight computes the maximum possible voting weight for this node.
|
||||
// This function computes the maximum weight a node can contribute from its start slot to the end slot,
|
||||
// scaled by committee weight. If the range is within one epoch, it returns the number of slots times the committee weight.
|
||||
// If the range spans at least one full epoch or starts at an epoch boundary and ends in the next epoch, it returns the full epoch weight.
|
||||
// Otherwise, it prorates the weight based on the number of slots in the start and end epochs, accounting for partial epoch coverage.
|
||||
func (n *Node) maxWeight(endSlot primitives.Slot, committeeWeight uint64) uint64 {
|
||||
startSlot := n.slot
|
||||
if n.parent != nil {
|
||||
startSlot = n.parent.slot + 1
|
||||
}
|
||||
if startSlot > endSlot {
|
||||
return 0
|
||||
}
|
||||
|
||||
startEpoch := slots.ToEpoch(startSlot)
|
||||
endEpoch := slots.ToEpoch(endSlot)
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
slotSpan := uint64(endSlot - startSlot + 1)
|
||||
|
||||
if startEpoch == endEpoch {
|
||||
return committeeWeight * slotSpan
|
||||
}
|
||||
|
||||
if endEpoch > startEpoch+1 || (endEpoch == startEpoch+1 && uint64(startSlot)%slotsPerEpoch == 0) {
|
||||
return committeeWeight * slotsPerEpoch
|
||||
}
|
||||
|
||||
slotsInStartEpoch := slotsPerEpoch - (uint64(startSlot) % slotsPerEpoch)
|
||||
slotsInEndEpoch := (uint64(endSlot) % slotsPerEpoch) + 1
|
||||
|
||||
weightEnd := committeeWeight * slotsInEndEpoch
|
||||
weightStart := (committeeWeight * slotsInStartEpoch * (slotsPerEpoch - slotsInEndEpoch)) / slotsPerEpoch
|
||||
|
||||
return weightEnd + weightStart
|
||||
}
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children.
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, args *updateDescendantArgs) error {
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.bestDescendant = nil
|
||||
n.bestConfirmedDescendant = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -105,11 +54,10 @@ func (n *Node) updateBestDescendant(ctx context.Context, args *updateDescendantA
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := child.updateBestDescendant(ctx, args); err != nil {
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(args.currentSlot)
|
||||
childLeadsToViableHead := child.leadsToViableHead(args.justifiedEpoch, currentEpoch)
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && !hasViableDescendant {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
@@ -130,33 +78,13 @@ func (n *Node) updateBestDescendant(ctx context.Context, args *updateDescendantA
|
||||
}
|
||||
}
|
||||
if hasViableDescendant {
|
||||
// This node has a viable descendant.
|
||||
if bestChild.bestDescendant == nil {
|
||||
// The best descendant is the best child.
|
||||
n.bestDescendant = bestChild
|
||||
} else {
|
||||
// The best descendant is more than 1 hop away.
|
||||
n.bestDescendant = bestChild.bestDescendant
|
||||
}
|
||||
|
||||
if uint64(args.secondsSinceSlotStart.Seconds()) < params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot {
|
||||
prevSlot := primitives.Slot(0)
|
||||
if args.currentSlot > 1 {
|
||||
prevSlot = args.currentSlot - 1
|
||||
}
|
||||
|
||||
if bestChild.confirmed(prevSlot, args.committeeWeight, args.pbRoot, args.pbValue) {
|
||||
n.bestConfirmedDescendant = bestChild.bestConfirmedDescendant
|
||||
if n.bestConfirmedDescendant == nil {
|
||||
n.bestConfirmedDescendant = bestChild
|
||||
}
|
||||
} else {
|
||||
n.bestConfirmedDescendant = nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
n.bestDescendant = nil
|
||||
n.bestConfirmedDescendant = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -203,10 +131,10 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
// Note that genesisTime has seconds granularity, therefore we use a strict
|
||||
// inequality < here. For example a block that arrives 3.9999 seconds into the
|
||||
// slot will have secs = 3 below.
|
||||
func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
sss, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
|
||||
votingWindow := time.Duration(params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot) * time.Second
|
||||
return sss < votingWindow, err
|
||||
func (n *Node) arrivedEarly(genesisTime uint64) (bool, error) {
|
||||
secs, err := slots.SecondsSinceSlotStart(n.slot, genesisTime, n.timestamp)
|
||||
votingWindow := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
return secs < votingWindow, err
|
||||
}
|
||||
|
||||
// arrivedAfterOrphanCheck returns whether this block was inserted after the
|
||||
@@ -214,8 +142,8 @@ func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
// Note that genesisTime has seconds granularity, therefore we use an
|
||||
// inequality >= here. For example a block that arrives 10.00001 seconds into the
|
||||
// slot will have secs = 10 below.
|
||||
func (n *Node) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
|
||||
secs, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
|
||||
func (n *Node) arrivedAfterOrphanCheck(genesisTime uint64) (bool, error) {
|
||||
secs, err := slots.SecondsSinceSlotStart(n.slot, genesisTime, n.timestamp)
|
||||
return secs >= ProcessAttestationsThreshold, err
|
||||
}
|
||||
|
||||
@@ -263,38 +191,3 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// confirmed returns true if the node satisfies the confirmation rule.
|
||||
func (n *Node) confirmed(slot primitives.Slot, committeeWeight uint64, pbRoot [32]byte, pbValue uint64) bool {
|
||||
if n.slot > slot {
|
||||
return false
|
||||
}
|
||||
|
||||
pbWeight := committeeWeight * params.BeaconConfig().ProposerScoreBoost / 100
|
||||
maxWeight := n.maxWeight(slot, committeeWeight)
|
||||
byzantineWeight := maxWeight * params.BeaconConfig().FastConfirmationByzantineThreshold / 100
|
||||
threshold := (maxWeight+pbWeight)/2 + byzantineWeight
|
||||
|
||||
nodeWeight := n.weight
|
||||
|
||||
var pbWeightSubtracted bool
|
||||
if n.root == pbRoot || (n.bestDescendant != nil && n.bestDescendant.root == pbRoot) {
|
||||
if nodeWeight < pbValue {
|
||||
return false
|
||||
}
|
||||
nodeWeight -= pbValue
|
||||
pbWeightSubtracted = true
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"nodeSlot": n.slot,
|
||||
"committeeWeight": committeeWeight,
|
||||
"maxWeight": maxWeight,
|
||||
"nodeWeight": nodeWeight,
|
||||
"threshold": threshold,
|
||||
"pbWeightSubtracted": pbWeightSubtracted,
|
||||
}).Info("Checking confirmation")
|
||||
|
||||
return nodeWeight > threshold
|
||||
}
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/forkchoice"
|
||||
@@ -111,60 +109,12 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
s.nodeByRoot[indexToHash(2)].weight = 200
|
||||
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
currentSlot: 2,
|
||||
secondsSinceSlotStart: 0,
|
||||
committeeWeight: f.store.committeeWeight,
|
||||
}))
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_BestConfirmedDescendant(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
// Insert first child node
|
||||
state1, blk1, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state1, blk1))
|
||||
|
||||
// Insert second child node
|
||||
state2, blk2, err := prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state2, blk2))
|
||||
|
||||
s := f.store
|
||||
|
||||
// Set weightWithoutBoost manually to control confirmation logic
|
||||
node1 := s.nodeByRoot[indexToHash(1)]
|
||||
node2 := s.nodeByRoot[indexToHash(2)]
|
||||
|
||||
node1.weight = 100
|
||||
node2.weight = 200
|
||||
|
||||
// Execute update
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
currentSlot: 3,
|
||||
secondsSinceSlotStart: 0,
|
||||
committeeWeight: f.store.committeeWeight,
|
||||
}))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert the correct bestConfirmedDescendant is selected
|
||||
assert.NotNil(t, s.treeRootNode.bestConfirmedDescendant, "expected bestConfirmedDescendant to be set")
|
||||
assert.Equal(t, node2, s.treeRootNode.bestConfirmedDescendant, "expected node2 to be the bestConfirmedDescendant")
|
||||
|
||||
// Additional: verify that the best descendant logic is consistent
|
||||
assert.Equal(t, node2, s.treeRootNode.bestDescendant, "expected node2 to be the bestDescendant")
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := t.Context()
|
||||
@@ -179,13 +129,7 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
s.nodeByRoot[indexToHash(2)].weight = 100
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
currentSlot: 2,
|
||||
secondsSinceSlotStart: 0,
|
||||
committeeWeight: f.store.committeeWeight,
|
||||
}))
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
|
||||
@@ -318,7 +262,7 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
// early block
|
||||
driftGenesisTime(f, 1, time.Second)
|
||||
driftGenesisTime(f, 1, 1)
|
||||
root := [32]byte{'a'}
|
||||
f.justifiedBalances = []uint64{10}
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
@@ -334,9 +278,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, late)
|
||||
|
||||
orphanLateBlockFirstThreshold := time.Duration(params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot) * time.Second
|
||||
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
// late block
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
root = [32]byte{'b'}
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, root, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -352,7 +296,7 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
require.Equal(t, false, late)
|
||||
|
||||
// very late block
|
||||
driftGenesisTime(f, 3, ProcessAttestationsThreshold+time.Second)
|
||||
driftGenesisTime(f, 3, ProcessAttestationsThreshold+1)
|
||||
root = [32]byte{'c'}
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -382,238 +326,3 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
require.ErrorContains(t, "invalid timestamp", err)
|
||||
require.Equal(t, false, late)
|
||||
}
|
||||
|
||||
func TestNode_maxWeight(t *testing.T) {
|
||||
type fields struct {
|
||||
slot primitives.Slot
|
||||
parent *Node
|
||||
}
|
||||
type args struct {
|
||||
endSlot primitives.Slot
|
||||
committeeWeight uint64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want uint64
|
||||
}{
|
||||
{
|
||||
name: "startSlot > endSlot, should return 0",
|
||||
fields: fields{
|
||||
parent: &Node{slot: 9},
|
||||
},
|
||||
args: args{
|
||||
endSlot: 9,
|
||||
},
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "startEpoch == currentEpoch",
|
||||
fields: fields{
|
||||
parent: &Node{slot: 4},
|
||||
},
|
||||
args: args{
|
||||
endSlot: 7,
|
||||
committeeWeight: 10,
|
||||
},
|
||||
want: 30, // (7 - 5 + 1) = 30
|
||||
},
|
||||
{
|
||||
name: "currentEpoch > startEpoch + 1",
|
||||
fields: fields{
|
||||
slot: 0,
|
||||
},
|
||||
args: args{
|
||||
endSlot: 32,
|
||||
committeeWeight: 10,
|
||||
},
|
||||
want: 320, // slotsPerEpoch * committeeWeight
|
||||
},
|
||||
{
|
||||
name: "currentEpoch == startEpoch+1 && startSlot % slotsPerEpoch == 0",
|
||||
fields: fields{
|
||||
parent: &Node{
|
||||
slot: 31,
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
endSlot: 64,
|
||||
committeeWeight: 5,
|
||||
},
|
||||
want: 160, // slotsPerEpoch * committeeWeight
|
||||
},
|
||||
{
|
||||
name: "partial overlap between epochs",
|
||||
fields: fields{
|
||||
slot: 30,
|
||||
},
|
||||
args: args{
|
||||
endSlot: 33,
|
||||
committeeWeight: 4,
|
||||
},
|
||||
want: func() uint64 {
|
||||
startSlot := uint64(30)
|
||||
currentSlot := uint64(33)
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
slotsInStartEpoch := slotsPerEpoch - (startSlot % slotsPerEpoch) // 32 - 30 = 2
|
||||
slotsInCurrentEpoch := (currentSlot % slotsPerEpoch) + 1 // 33 % 32 + 1 = 2
|
||||
|
||||
weightStart := (4 * slotsInStartEpoch * (slotsPerEpoch - slotsInCurrentEpoch)) / slotsPerEpoch // 4 * 2 * 30 / 32 = 7 (int division)
|
||||
weightCurrent := 4 * slotsInCurrentEpoch // 4 * 2 = 8
|
||||
return weightStart + weightCurrent // 7 + 8 = 15
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &Node{
|
||||
slot: tt.fields.slot,
|
||||
parent: tt.fields.parent,
|
||||
}
|
||||
if got := n.maxWeight(tt.args.endSlot, tt.args.committeeWeight); got != tt.want {
|
||||
t.Errorf("maxWeight() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNode_confirmed(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FastConfirmationByzantineThreshold = 33
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
type fields struct {
|
||||
nodeSlot primitives.Slot
|
||||
weight uint64
|
||||
root [32]byte
|
||||
bestDescendant *Node
|
||||
}
|
||||
type args struct {
|
||||
slot primitives.Slot
|
||||
committeeWeight uint64
|
||||
pbRoot [32]byte
|
||||
pbValue uint64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "node slot > slot returns false",
|
||||
fields: fields{
|
||||
nodeSlot: 10,
|
||||
},
|
||||
args: args{
|
||||
slot: 9,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "weight without boost <= threshold returns false",
|
||||
fields: fields{
|
||||
weight: 186, // 200 committee weight, 40 pb weight, 66 byzantine weight
|
||||
bestDescendant: &Node{},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "weight without boost > threshold returns true",
|
||||
fields: fields{
|
||||
weight: 187, // 200 committee weight, 40 pb weight, 66 byzantine weight
|
||||
bestDescendant: &Node{},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "node root matches pbRoot but balance < pbValue returns false",
|
||||
fields: fields{
|
||||
weight: 187, bestDescendant: &Node{
|
||||
root: [32]byte{1},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
pbRoot: [32]byte{1},
|
||||
pbValue: 100000000,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "node root matches pbRoot, balance >= pbValue, adjusted weight <= threshold returns false",
|
||||
fields: fields{
|
||||
weight: 187,
|
||||
bestDescendant: &Node{
|
||||
root: [32]byte{1},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
pbRoot: [32]byte{1},
|
||||
pbValue: 1,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "node root matches pbRoot (self), balance >= pbValue, adjusted weight <= threshold returns false",
|
||||
fields: fields{
|
||||
weight: 187,
|
||||
root: [32]byte{1},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
pbRoot: [32]byte{1},
|
||||
pbValue: 1,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "node root matches pbRoot, balance >= pbValue, adjusted weight > threshold returns true",
|
||||
fields: fields{
|
||||
weight: 188,
|
||||
bestDescendant: &Node{
|
||||
root: [32]byte{1},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
pbRoot: [32]byte{1},
|
||||
pbValue: 1,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &Node{
|
||||
slot: tt.fields.nodeSlot,
|
||||
weight: tt.fields.weight,
|
||||
root: tt.fields.root,
|
||||
bestDescendant: tt.fields.bestDescendant,
|
||||
}
|
||||
if got := n.confirmed(tt.args.slot, tt.args.committeeWeight, tt.args.pbRoot, tt.args.pbValue); got != tt.want {
|
||||
t.Errorf("confirmed() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,12 +12,8 @@ import (
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(f *ForkChoice, slot primitives.Slot, delay time.Duration) {
|
||||
genesis := time.Now()
|
||||
s := time.Duration(slot*primitives.Slot(params.BeaconConfig().SecondsPerSlot)) * time.Second
|
||||
genesis = genesis.Add(-1 * s)
|
||||
genesis = genesis.Add(-1 * delay.Abs())
|
||||
f.SetGenesisTime(genesis)
|
||||
func driftGenesisTime(f *ForkChoice, slot primitives.Slot, delay uint64) {
|
||||
f.SetGenesisTime(uint64(time.Now().Unix()) - uint64(slot)*params.BeaconConfig().SecondsPerSlot - delay)
|
||||
}
|
||||
|
||||
// Simple, ex-ante attack mitigation using proposer boost.
|
||||
@@ -449,7 +445,7 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
slot := primitives.Slot(1)
|
||||
currentSlot := primitives.Slot(1)
|
||||
driftGenesisTime(f, currentSlot, time.Duration(params.BeaconConfig().SecondsPerSlot-1)*time.Second)
|
||||
driftGenesisTime(f, currentSlot, params.BeaconConfig().SecondsPerSlot-1)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slot, root, zeroHash, zeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
@@ -469,7 +465,7 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
slot := primitives.Slot(1)
|
||||
currentSlot := primitives.Slot(1)
|
||||
driftGenesisTime(f, currentSlot, time.Second)
|
||||
driftGenesisTime(f, currentSlot, 1)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slot, root, zeroHash, zeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -83,12 +83,12 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
}
|
||||
|
||||
// Return early if we are checking before 10 seconds into the slot
|
||||
sss, err := slots.SinceSlotStart(head.slot, f.store.genesisTime, time.Now())
|
||||
secs, err := slots.SecondsSinceSlotStart(head.slot, f.store.genesisTime, uint64(time.Now().Unix()))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check current slot")
|
||||
return true
|
||||
}
|
||||
if sss < ProcessAttestationsThreshold {
|
||||
if secs < ProcessAttestationsThreshold {
|
||||
return true
|
||||
}
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
@@ -110,9 +110,9 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
if head == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// Only reorg blocks from the previous slot.
|
||||
currentSlot := slots.CurrentSlot(f.store.genesisTime)
|
||||
if head.slot+1 != currentSlot {
|
||||
if head.slot+1 != slots.CurrentSlot(f.store.genesisTime) {
|
||||
return head.root
|
||||
}
|
||||
// Do not reorg on epoch boundaries
|
||||
@@ -153,12 +153,12 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// Only reorg if we are proposing early
|
||||
sss, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
|
||||
secs, err := slots.SecondsSinceSlotStart(head.slot+1, f.store.genesisTime, uint64(time.Now().Unix()))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if proposing early")
|
||||
return head.root
|
||||
}
|
||||
if sss >= orphanLateBlockProposingEarly*time.Second {
|
||||
if secs >= orphanLateBlockProposingEarly {
|
||||
return head.root
|
||||
}
|
||||
return parent.root
|
||||
|
||||
@@ -2,7 +2,6 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -28,8 +27,8 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
}
|
||||
f.ProcessAttestation(ctx, attesters, blk.Root(), 0)
|
||||
|
||||
orphanLateBlockFirstThreshold := time.Duration(params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot) * time.Second
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
|
||||
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
st, blk, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
@@ -49,29 +48,29 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
t.Run("head is not from current slot", func(t *testing.T) {
|
||||
driftGenesisTime(f, 3, 0)
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
})
|
||||
t.Run("head is from epoch boundary", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
driftGenesisTime(f, params.BeaconConfig().SlotsPerEpoch-1, 0)
|
||||
f.store.headNode.slot = params.BeaconConfig().SlotsPerEpoch - 1
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
f.store.headNode.slot = saved
|
||||
})
|
||||
t.Run("head is early", func(t *testing.T) {
|
||||
saved := f.store.headNode.timestamp
|
||||
f.store.headNode.timestamp = saved.Add(-2 * time.Second)
|
||||
f.store.headNode.timestamp = saved - 2
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.timestamp = saved
|
||||
})
|
||||
t.Run("chain not finalizing", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
f.store.headNode.slot = 97
|
||||
driftGenesisTime(f, 97, orphanLateBlockFirstThreshold+time.Second)
|
||||
driftGenesisTime(f, 97, orphanLateBlockFirstThreshold+1)
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.slot = saved
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
})
|
||||
t.Run("Not single block reorg", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.slot
|
||||
@@ -93,11 +92,11 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
})
|
||||
t.Run("parent is weak late call", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.weight
|
||||
driftGenesisTime(f, 2, 11*time.Second)
|
||||
driftGenesisTime(f, 2, 11)
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.weight = saved
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
})
|
||||
t.Run("Head is strong", func(t *testing.T) {
|
||||
f.store.headNode.weight = f.store.committeeWeight
|
||||
@@ -126,7 +125,7 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
}
|
||||
f.ProcessAttestation(ctx, attesters, blk.Root(), 0)
|
||||
|
||||
driftGenesisTime(f, 3, 1*time.Second)
|
||||
driftGenesisTime(f, 3, 1)
|
||||
childRoot := [32]byte{'b'}
|
||||
st, blk, err = prepareForkchoiceState(ctx, 2, childRoot, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -135,9 +134,10 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
f.store.headNode.timestamp.Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot-orphanLateBlockFirstThreshold) * time.Second)
|
||||
f.store.headNode.timestamp -= params.BeaconConfig().SecondsPerSlot - orphanLateBlockFirstThreshold
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
require.Equal(t, parentRoot, f.GetProposerHead())
|
||||
|
||||
})
|
||||
t.Run("head is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode
|
||||
@@ -148,20 +148,19 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
t.Run("head is not from previous slot", func(t *testing.T) {
|
||||
driftGenesisTime(f, 4, 0)
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
driftGenesisTime(f, 3, 1*time.Second)
|
||||
driftGenesisTime(f, 3, 1)
|
||||
})
|
||||
t.Run("head is from epoch boundary", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
driftGenesisTime(f, params.BeaconConfig().SlotsPerEpoch, 0)
|
||||
f.store.headNode.slot = params.BeaconConfig().SlotsPerEpoch - 1
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
driftGenesisTime(f, 3, 1*time.Second)
|
||||
driftGenesisTime(f, 3, 1)
|
||||
f.store.headNode.slot = saved
|
||||
})
|
||||
t.Run("head is early", func(t *testing.T) {
|
||||
saved := f.store.headNode.timestamp
|
||||
headTimeStamp := f.store.genesisTime.Add(time.Duration(uint64(f.store.headNode.slot)*params.BeaconConfig().SecondsPerSlot+1) * time.Second)
|
||||
f.store.headNode.timestamp = headTimeStamp
|
||||
f.store.headNode.timestamp = saved - 2
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
f.store.headNode.timestamp = saved
|
||||
})
|
||||
@@ -171,7 +170,7 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
driftGenesisTime(f, 98, 0)
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
f.store.headNode.slot = saved
|
||||
driftGenesisTime(f, 3, 1*time.Second)
|
||||
driftGenesisTime(f, 3, 1)
|
||||
})
|
||||
t.Run("Not single block reorg", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.slot
|
||||
|
||||
@@ -44,7 +44,7 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
if bestDescendant == nil {
|
||||
bestDescendant = justifiedNode
|
||||
}
|
||||
currentEpoch := slots.EpochsSinceGenesis(s.genesisTime)
|
||||
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(s.genesisTime), 0))
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, currentEpoch) {
|
||||
s.allTipsAreInvalid = true
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch, justified Epoch %d, %d != %d, %d",
|
||||
@@ -62,39 +62,6 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
return bestDescendant.root, nil
|
||||
}
|
||||
|
||||
// safeHead starts from justified root and then follows the best descendant links
|
||||
// to find the best safe head block.
|
||||
func (s *Store) safeHead(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.safeHead")
|
||||
defer span.End()
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// JustifiedRoot has to be known
|
||||
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
|
||||
if !ok || justifiedNode == nil {
|
||||
// If the justifiedCheckpoint is from genesis, then the root is
|
||||
// zeroHash. In this case it should be the root of forkchoice
|
||||
// tree.
|
||||
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
justifiedNode = s.treeRootNode
|
||||
} else {
|
||||
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
|
||||
}
|
||||
}
|
||||
|
||||
// If the justified node doesn't have a best confirmed descendant,
|
||||
// the best node is itself.
|
||||
bestConfirmedDescendant := justifiedNode.bestConfirmedDescendant
|
||||
if bestConfirmedDescendant == nil {
|
||||
bestConfirmedDescendant = justifiedNode
|
||||
}
|
||||
|
||||
return bestConfirmedDescendant.root, nil
|
||||
}
|
||||
|
||||
// insert registers a new block node to the fork choice store's node list.
|
||||
// It then updates the new node's parent with the best child and descendant node.
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
@@ -132,7 +99,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
optimistic: true,
|
||||
payloadHash: payloadHash,
|
||||
timestamp: time.Now(),
|
||||
timestamp: uint64(time.Now().Unix()),
|
||||
}
|
||||
|
||||
// Set the node's target checkpoint
|
||||
@@ -161,36 +128,22 @@ func (s *Store) insert(ctx context.Context,
|
||||
} else {
|
||||
parent.children = append(parent.children, n)
|
||||
// Apply proposer boost
|
||||
now := time.Now()
|
||||
if now.Before(s.genesisTime) {
|
||||
timeNow := uint64(time.Now().Unix())
|
||||
if timeNow < s.genesisTime {
|
||||
return n, nil
|
||||
}
|
||||
secondsIntoSlot := (timeNow - s.genesisTime) % params.BeaconConfig().SecondsPerSlot
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, now)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not determine time since current slot started: %w", err)
|
||||
}
|
||||
boostThreshold := time.Duration(params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot) * time.Second
|
||||
boostThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
isFirstBlock := s.proposerBoostRoot == [32]byte{}
|
||||
if currentSlot == slot && sss < boostThreshold && isFirstBlock {
|
||||
if currentSlot == slot && secondsIntoSlot < boostThreshold && isFirstBlock {
|
||||
s.proposerBoostRoot = root
|
||||
}
|
||||
|
||||
// Update best descendants
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
secondsSinceSlotStart, err := slots.SinceSlotStart(currentSlot, s.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not compute seconds since slot start")
|
||||
secondsSinceSlotStart = 0
|
||||
}
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: jEpoch,
|
||||
finalizedEpoch: fEpoch,
|
||||
currentSlot: currentSlot,
|
||||
secondsSinceSlotStart: secondsSinceSlotStart,
|
||||
committeeWeight: s.committeeWeight,
|
||||
}); err != nil {
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
_, remErr := s.removeNode(ctx, n)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("could not remove node")
|
||||
@@ -315,18 +268,17 @@ func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
}
|
||||
|
||||
// HighestReceivedBlockDelay returns the number of slots that the highest
|
||||
// received block was late when receiving it. For example, a block was late by 12 slots,
|
||||
// then this method is expected to return 12.
|
||||
// received block was late when receiving it
|
||||
func (f *ForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
n := f.store.highestReceivedNode
|
||||
if n == nil {
|
||||
return 0
|
||||
}
|
||||
sss, err := slots.SinceSlotStart(n.slot, f.store.genesisTime, n.timestamp)
|
||||
secs, err := slots.SecondsSinceSlotStart(n.slot, f.store.genesisTime, n.timestamp)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return primitives.Slot(uint64(sss/time.Second) / params.BeaconConfig().SecondsPerSlot)
|
||||
return primitives.Slot(secs / params.BeaconConfig().SecondsPerSlot)
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
|
||||
|
||||
@@ -329,7 +329,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
var b [32]byte
|
||||
|
||||
// Make sure it doesn't underflow
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
ctx := t.Context()
|
||||
_, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -347,7 +347,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration((-64*int64(params.BeaconConfig().SecondsPerSlot))-1) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration((-64*int64(params.BeaconConfig().SecondsPerSlot))-1) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
@@ -360,7 +360,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration(-66*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-66*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), count)
|
||||
@@ -373,7 +373,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration(-66*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-66*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(3), count)
|
||||
@@ -386,7 +386,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration(-98*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-98*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
@@ -400,7 +400,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
@@ -415,7 +415,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
@@ -430,7 +430,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
@@ -445,17 +445,17 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), count)
|
||||
require.Equal(t, primitives.Slot(132), f.HighestReceivedBlockSlot())
|
||||
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration(-134*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-134*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
f.SetGenesisTime(time.Now().Add(time.Duration(-165*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-165*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), count)
|
||||
@@ -616,124 +616,3 @@ func TestStore_CleanupInserting(t *testing.T) {
|
||||
require.NotNil(t, f.InsertNode(ctx, st, blk))
|
||||
require.Equal(t, false, f.HasNode(blk.Root()))
|
||||
}
|
||||
|
||||
func TestStore_HighestReceivedBlockDelay(t *testing.T) {
|
||||
f := ForkChoice{
|
||||
store: &Store{
|
||||
genesisTime: time.Unix(0, 0),
|
||||
highestReceivedNode: &Node{
|
||||
slot: 10,
|
||||
timestamp: time.Unix(int64(((10 + 12) * params.BeaconConfig().SecondsPerSlot)), 0), // 12 slots late
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, primitives.Slot(12), f.HighestReceivedBlockDelay())
|
||||
}
|
||||
|
||||
func TestStore_safeHead(t *testing.T) {
|
||||
genesisEpoch := params.BeaconConfig().GenesisEpoch
|
||||
root1 := [32]byte{0x01}
|
||||
root2 := [32]byte{0x02}
|
||||
root3 := [32]byte{0x03}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupStore func() *Store
|
||||
wantRoot [32]byte
|
||||
expectErr bool
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "context cancelled returns error",
|
||||
setupStore: func() *Store {
|
||||
return &Store{}
|
||||
},
|
||||
wantRoot: [32]byte{},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "justified root missing and not genesis returns error",
|
||||
setupStore: func() *Store {
|
||||
return &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root1,
|
||||
},
|
||||
nodeByRoot: make(map[[32]byte]*Node),
|
||||
}
|
||||
},
|
||||
wantRoot: [32]byte{},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "justified is genesis, uses tree root",
|
||||
setupStore: func() *Store {
|
||||
return &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
|
||||
Epoch: genesisEpoch,
|
||||
Root: [32]byte{}, // zero hash
|
||||
},
|
||||
nodeByRoot: map[[32]byte]*Node{},
|
||||
treeRootNode: &Node{root: root2},
|
||||
}
|
||||
},
|
||||
wantRoot: root2,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "justified exists with no best confirmed descendant",
|
||||
setupStore: func() *Store {
|
||||
node := &Node{root: root1}
|
||||
return &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root1,
|
||||
},
|
||||
nodeByRoot: map[[32]byte]*Node{
|
||||
root1: node,
|
||||
},
|
||||
}
|
||||
},
|
||||
wantRoot: root1,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "justified exists with best confirmed descendant",
|
||||
setupStore: func() *Store {
|
||||
descendant := &Node{root: root3}
|
||||
node := &Node{root: root1, bestConfirmedDescendant: descendant}
|
||||
return &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: root1,
|
||||
},
|
||||
nodeByRoot: map[[32]byte]*Node{
|
||||
root1: node,
|
||||
},
|
||||
}
|
||||
},
|
||||
wantRoot: root3,
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
store := tt.setupStore()
|
||||
ctx := context.Background()
|
||||
if tt.name == "context cancelled returns error" {
|
||||
c, cancel := context.WithCancel(ctx)
|
||||
cancel()
|
||||
ctx = c
|
||||
}
|
||||
got, err := store.safeHead(ctx)
|
||||
if (err != nil) != tt.expectErr {
|
||||
t.Fatalf("expected error: %v, got: %v", tt.expectErr, err)
|
||||
}
|
||||
if err == nil && got != tt.wantRoot {
|
||||
t.Errorf("safeHead() = %x, want %x", got, tt.wantRoot)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
@@ -28,7 +27,6 @@ type Store struct {
|
||||
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
safeHeadRoot [fieldparams.RootLength]byte // safe head root in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
@@ -39,7 +37,7 @@ type Store struct {
|
||||
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
|
||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
genesisTime time.Time
|
||||
genesisTime uint64
|
||||
highestReceivedNode *Node // The highest slot node.
|
||||
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.
|
||||
allTipsAreInvalid bool // tracks if all tips are not viable for head
|
||||
@@ -62,8 +60,7 @@ type Node struct {
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
timestamp time.Time // The timestamp when the node was inserted.
|
||||
bestConfirmedDescendant *Node // bestConfirmedDescendant node of this node.
|
||||
timestamp uint64 // The timestamp when the node was inserted.
|
||||
}
|
||||
|
||||
// Vote defines an individual validator's vote.
|
||||
|
||||
@@ -2,7 +2,6 @@ package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -83,7 +82,6 @@ type FastGetter interface {
|
||||
DependentRoot(primitives.Epoch) ([32]byte, error)
|
||||
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
|
||||
UnrealizedJustifiedPayloadBlockHash() [32]byte
|
||||
SafeBlockHash() [32]byte
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
ParentRoot(root [32]byte) ([32]byte, error)
|
||||
}
|
||||
@@ -94,7 +92,7 @@ type Setter interface {
|
||||
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
|
||||
UpdateJustifiedCheckpoint(context.Context, *forkchoicetypes.Checkpoint) error
|
||||
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
|
||||
SetGenesisTime(time.Time)
|
||||
SetGenesisTime(uint64)
|
||||
SetOriginRoot([32]byte)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
SetBalancesByRooter(BalancesByRooter)
|
||||
|
||||
@@ -100,13 +100,6 @@ func (ro *ROForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return ro.getter.UnrealizedJustifiedPayloadBlockHash()
|
||||
}
|
||||
|
||||
// SafeBlockHash delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) SafeBlockHash() [32]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.SafeBlockHash()
|
||||
}
|
||||
|
||||
// NodeCount delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) NodeCount() int {
|
||||
ro.l.RLock()
|
||||
|
||||
@@ -27,7 +27,6 @@ const (
|
||||
previousJustifiedCheckpointCalled
|
||||
justifiedPayloadBlockHashCalled
|
||||
unrealizedJustifiedPayloadBlockHashCalled
|
||||
safeBlockHashCalled
|
||||
nodeCountCalled
|
||||
highestReceivedBlockSlotCalled
|
||||
highestReceivedBlockRootCalled
|
||||
@@ -108,11 +107,6 @@ func TestROLocking(t *testing.T) {
|
||||
call: unrealizedJustifiedPayloadBlockHashCalled,
|
||||
cb: func(g FastGetter) { g.UnrealizedJustifiedPayloadBlockHash() },
|
||||
},
|
||||
{
|
||||
name: "safeBlockHashCalled",
|
||||
call: safeBlockHashCalled,
|
||||
cb: func(g FastGetter) { g.SafeBlockHash() },
|
||||
},
|
||||
{
|
||||
name: "nodeCountCalled",
|
||||
call: nodeCountCalled,
|
||||
@@ -255,11 +249,6 @@ func (ro *mockROForkchoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) SafeBlockHash() [32]byte {
|
||||
ro.calls = append(ro.calls, safeBlockHashCalled)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) NodeCount() int {
|
||||
ro.calls = append(ro.calls, nodeCountCalled)
|
||||
return 0
|
||||
|
||||
@@ -195,32 +195,3 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
|
||||
return params.SetActive(c)
|
||||
}
|
||||
|
||||
func configureSafeBlockConfig(cliCtx *cli.Context) error {
|
||||
c := params.BeaconConfig().Copy()
|
||||
|
||||
if cliCtx.IsSet(flags.FastConfirmationByzantineThreshold.Name) {
|
||||
threshold := cliCtx.Uint64(flags.FastConfirmationByzantineThreshold.Name)
|
||||
if threshold > 100 {
|
||||
return fmt.Errorf("fast-confirmation-byzantine-threshold must be between 0 and 100")
|
||||
}
|
||||
c.FastConfirmationByzantineThreshold = cliCtx.Uint64(flags.FastConfirmationByzantineThreshold.Name)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cliCtx.IsSet(flags.SafeBlock.Name) {
|
||||
safeBlock := cliCtx.String(flags.SafeBlock.Name)
|
||||
switch safeBlock {
|
||||
case "justified", "unrealized-justified", "fast-confirmation":
|
||||
default:
|
||||
return fmt.Errorf("invalid safe-block option: %s", safeBlock)
|
||||
}
|
||||
c.SafeBlockAlgorithm = safeBlock
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/httprest"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/middleware"
|
||||
@@ -236,7 +235,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.finalizedStateAtStartUp = nil
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
beacon.lcStore = lightclient.NewLightClientStore(beacon.db)
|
||||
beacon.lcStore = lightclient.NewLightClientStore()
|
||||
}
|
||||
|
||||
return beacon, nil
|
||||
@@ -289,10 +288,6 @@ func configureBeacon(cliCtx *cli.Context) error {
|
||||
return errors.Wrap(err, "could not configure execution setting")
|
||||
}
|
||||
|
||||
if err := configureSafeBlockConfig(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure safe block config")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1151,7 +1146,7 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
genesis := time.Unix(int64(params.BeaconConfig().MinGenesisTime+params.BeaconConfig().GenesisDelay), 0)
|
||||
genesisTimeUnix := params.BeaconConfig().MinGenesisTime + params.BeaconConfig().GenesisDelay
|
||||
var backfillService *backfill.Service
|
||||
if err := b.services.FetchService(&backfillService); err != nil {
|
||||
return err
|
||||
@@ -1166,7 +1161,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
p, err := pruner.New(
|
||||
cliCtx.Context,
|
||||
b.db,
|
||||
genesis,
|
||||
genesisTimeUnix,
|
||||
initSyncWaiter(cliCtx.Context, b.initialSyncComplete),
|
||||
backfillService.WaitForCompletion,
|
||||
opts...,
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -57,6 +58,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
|
||||
@@ -78,16 +78,16 @@ func (c *AttCaches) aggregateParallel(atts map[attestation.Id][]ethpb.Att, leftO
|
||||
for as := range ch {
|
||||
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(as)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not aggregate unaggregated attestations")
|
||||
log.WithError(err).Error("could not aggregate unaggregated attestations")
|
||||
continue
|
||||
}
|
||||
if aggregated == nil {
|
||||
log.Error("Nil aggregated attestation")
|
||||
log.Error("nil aggregated attestation")
|
||||
continue
|
||||
}
|
||||
if aggregated.IsAggregated() {
|
||||
if err := c.SaveAggregatedAttestations([]ethpb.Att{aggregated}); err != nil {
|
||||
log.WithError(err).Error("Could not save aggregated attestation")
|
||||
log.WithError(err).Error("could not save aggregated attestation")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -31,7 +31,7 @@ func (s *Service) prepareForkChoiceAtts() {
|
||||
break
|
||||
}
|
||||
}
|
||||
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, intervals[:])
|
||||
ticker := slots.NewSlotTickerWithIntervals(time.Unix(int64(s.genesisTime), 0), intervals[:])
|
||||
for {
|
||||
select {
|
||||
case slotInterval := <-ticker.C():
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user