Compare commits

...

16 Commits

Author SHA1 Message Date
terence tsao
398c616dfb Reduce val index cache 2025-07-15 16:24:28 -07:00
terence tsao
96370a359c fix(execution): skip genesis block retrieval when EIP-6110 is active 2025-07-14 15:04:28 -07:00
Kaloyan Tanev
56e8881bc1 Rework DV aggregation selection proofs (#15156)
* Reword DV selection proofs

* Add changelog

* Expect 1 call to DomainData in DV update duties test

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-07-14 15:33:26 +00:00
james-prysm
78f8411ad2 move validator run slot ticker (#15479)
* moving the ticker from chain start to right before the main loop and also after the wait for activation edge case

* fixing unit test

* adding in a unit test

* adding in comment based on potuz's feedback
2025-07-11 19:39:52 +00:00
james-prysm
83943b5dd8 validator client: removing need to call canonical head api (#15480)
* removing need to call cononical head api

* typo

* removing unneeded tests

* fixing unit tests
2025-07-10 20:25:17 +00:00
james-prysm
bc7e4f7751 switching enable to disable for duties (#15445) 2025-07-10 18:57:36 +00:00
Manu NALEPA
02f8726e46 VerifiedROBlobFromDisk and VerifiedRODataColumnFromDisk: Add tests. (#15484) 2025-07-10 16:11:12 +00:00
terence
16b567f6af Add log capitalization analyzer and apply changes (#15452)
* Add log capitalization analyzer and apply fixes across codebase

Implements a new nogo analyzer to enforce proper log message capitalization and applies the fixes to all affected log statements throughout the beacon chain, validator, and supporting components.

Co-Authored-By: Claude <noreply@anthropic.com>

* Radek's feedback

---------

Co-authored-by: Claude <noreply@anthropic.com>
2025-07-10 13:43:38 +00:00
Bastin
5c1d827335 Unify LC API 1/2 (bootstrap) (#15476)
* add versiotToForkEpoch map

* Unify LC API (bootstrap)
2025-07-10 13:07:46 +00:00
Bastin
68d7df0e4f add versiotToForkEpoch map (#15482) 2025-07-10 12:57:28 +00:00
kasey
288b33750d Isolate committee cache (#15478)
* always init service through NewService

* move head state cache to service struct

* changelog

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-07-09 21:43:00 +00:00
terence
f4f48d6372 Optimize BuildBlobSidecars Merkle proof computation by pre-computing subtrees (#15473)
* Optimize BuildBlobSidecars Merkle proof computation by pre-computing subtrees

Co-Authored-By: Claude <noreply@anthropic.com>

* Add change log

* Fix change log

---------

Co-authored-by: Claude <noreply@anthropic.com>
2025-07-09 16:12:14 +00:00
james-prysm
f2d57f0b5f changes for safe validator shutdown and restarts on healthcheck (#15401)
* poc changes for safe validator shutdown

* simplifying health routine and adding safe shutdown after max restarts reached

* fixing health tests

* fixing tests

* changelog

* gofmt

* fixing runner

* improve how runner times out

* improvements to ux on logs

* linting

* adding in max healthcheck flag

* changelog

* Update james-prysm_safe-validator-shutdown.md

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/runner.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/service.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/runner.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/runner.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing some feedback from radek

* addressing some more feedback

* fixing name based on feedback

* fixing mistake on max health checks

* conflict accidently checked in

* go 1.23 no longer needs you to stop for the ticker

* Update flags.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* wip no unit test for recursive healthy host find

* rework healthcheck

* gaz

* fixing bugs and improving logs with new monitor

* removing health tracker, fixing runner tests, and adding placeholder for monitor tests

* fixing event stream check

* linting

* adding in health monitor tests

* gaz

* improving test

* removing some log.fatals

* forgot to remove comment

* missed fatal removal

* doppleganger should exit the node safely now

* Update validator/client/health_monitor.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* radek review

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/health_monitor.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/health_monitor.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/health_monitor.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* radek feedback

* read up on more suggestions and making fixes to channel

* suggested updates after more reading

* reverting some of this because it froze the validator after healthcheck failed

* fully reverting

* some improvements I found during testing

* Update cmd/validator/flags/flags.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* preston's feedback

* clarifications on changelog

* converted to using an event feed instead of my own channel publishing implementation, adding relevant logs

* preston log suggestion

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-07-09 15:39:06 +00:00
Radosław Kapka
7025e50a6c Make the multi-value slice permanent (#15414)
* Remove Old State Paths

* Changelog

* Gazelle

* Lint

* Fix State Tests

* fix tests, update native state code

* move ErrOutOfBounds error from consensus types to mvslice

* fix TestStreamEvents_OperationsEvents

* add missing gc to fuzz tests

* more test fixes

* build fix

---------

Co-authored-by: nisdas <nishdas93@gmail.com>
2025-07-08 17:45:20 +00:00
Radosław Kapka
961ea05454 Allow SSZ requests for pending deposits, partial withdrawals and consolidations (#15474) 2025-07-08 17:45:05 +00:00
terence
da5525096c Move data col reconstruction log to after reconstruction / save complete (#15475)
* Move data col reconstruction / save log to after they are done

* Rename to reconstructionDuration

* Rename to reconstructionAndSaveDuration
2025-07-08 14:49:31 +00:00
179 changed files with 3077 additions and 2863 deletions

View File

@@ -194,6 +194,7 @@ nogo(
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/logcapitalization:go_default_library",
"//tools/analyzers/logruswitherror:go_default_library",
"//tools/analyzers/maligned:go_default_library",
"//tools/analyzers/nop:go_default_library",

View File

@@ -1,20 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"health.go",
"interfaces.go",
"mock.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/api/client/beacon/health",
visibility = ["//visibility:public"],
deps = ["@org_uber_go_mock//gomock:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["health_test.go"],
embed = [":go_default_library"],
deps = ["@org_uber_go_mock//gomock:go_default_library"],
)

View File

@@ -1,58 +0,0 @@
package health
import (
"context"
"sync"
)
type NodeHealthTracker struct {
isHealthy *bool
healthChan chan bool
node Node
sync.RWMutex
}
func NewTracker(node Node) Tracker {
return &NodeHealthTracker{
node: node,
healthChan: make(chan bool, 1),
}
}
// HealthUpdates provides a read-only channel for health updates.
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
return n.healthChan
}
func (n *NodeHealthTracker) IsHealthy(_ context.Context) bool {
n.RLock()
defer n.RUnlock()
if n.isHealthy == nil {
return false
}
return *n.isHealthy
}
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
n.Lock()
defer n.Unlock()
newStatus := n.node.IsHealthy(ctx)
if n.isHealthy == nil {
n.isHealthy = &newStatus
}
isStatusChanged := newStatus != *n.isHealthy
if isStatusChanged {
// Update the health status
n.isHealthy = &newStatus
// Send the new status to the health channel, potentially overwriting the existing value
select {
case <-n.healthChan:
n.healthChan <- newStatus
default:
n.healthChan <- newStatus
}
}
return newStatus
}

View File

@@ -1,110 +0,0 @@
package health
import (
"sync"
"testing"
"go.uber.org/mock/gomock"
)
func TestNodeHealth_IsHealthy(t *testing.T) {
tests := []struct {
name string
isHealthy bool
want bool
}{
{"initially healthy", true, true},
{"initially unhealthy", false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
n := &NodeHealthTracker{
isHealthy: &tt.isHealthy,
healthChan: make(chan bool, 1),
}
if got := n.IsHealthy(t.Context()); got != tt.want {
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
}
})
}
}
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
tests := []struct {
name string
initial bool // Initial health status
newStatus bool // Status to update to
shouldSend bool // Should a message be sent through the channel
}{
{"healthy to unhealthy", true, false, true},
{"unhealthy to healthy", false, true, true},
{"remain healthy", true, true, false},
{"remain unhealthy", false, false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := NewMockHealthClient(ctrl)
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
n := &NodeHealthTracker{
isHealthy: &tt.initial,
node: client,
healthChan: make(chan bool, 1),
}
s := n.CheckHealth(t.Context())
// Check if health status was updated
if s != tt.newStatus {
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
}
select {
case status := <-n.HealthUpdates():
if !tt.shouldSend {
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
} else if status != tt.newStatus {
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
}
default:
if tt.shouldSend {
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
}
}
})
}
}
func TestNodeHealth_Concurrency(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := NewMockHealthClient(ctrl)
n := NewTracker(client)
var wg sync.WaitGroup
// Number of goroutines to spawn for both reading and writing
numGoroutines := 6
wg.Add(numGoroutines * 2) // for readers and writers
// Concurrently update health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
n.CheckHealth(t.Context())
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
n.CheckHealth(t.Context())
}()
}
// Concurrently read health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
_ = n.IsHealthy(t.Context()) // Just read the value
}()
}
wg.Wait() // Wait for all goroutines to finish
}

View File

@@ -1,13 +0,0 @@
package health
import "context"
type Tracker interface {
HealthUpdates() <-chan bool
CheckHealth(ctx context.Context) bool
Node
}
type Node interface {
IsHealthy(ctx context.Context) bool
}

View File

@@ -1,58 +0,0 @@
package health
import (
"context"
"reflect"
"sync"
"go.uber.org/mock/gomock"
)
var (
_ = Node(&MockHealthClient{})
)
// MockHealthClient is a mock of HealthClient interface.
type MockHealthClient struct {
ctrl *gomock.Controller
recorder *MockHealthClientMockRecorder
sync.Mutex
}
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
type MockHealthClientMockRecorder struct {
mock *MockHealthClient
}
// IsHealthy mocks base method.
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
m.Lock()
defer m.Unlock()
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsHealthy", arg0)
ret0, ok := ret[0].(bool)
if !ok {
return false
}
return ret0
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
return m.recorder
}
// IsHealthy indicates an expected call of IsHealthy.
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
mr.mock.Lock()
defer mr.mock.Unlock()
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
}
// NewMockHealthClient creates a new mock instance.
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
mock := &MockHealthClient{ctrl: ctrl}
mock.recorder = &MockHealthClientMockRecorder{mock}
return mock
}

View File

@@ -72,7 +72,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
log.WithFields(log.Fields{
"bodyBase64": "(nil value)",
"url": r.URL.String(),
}).Info("builder http request")
}).Info("Builder http request")
return nil
}
t := io.TeeReader(r.Body, b)
@@ -89,7 +89,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
log.WithFields(log.Fields{
"bodyBase64": string(body),
"url": r.URL.String(),
}).Info("builder http request")
}).Info("Builder http request")
return nil
}

View File

@@ -19,10 +19,10 @@ func RunEvery(ctx context.Context, period time.Duration, f func()) {
for {
select {
case <-ticker.C:
log.WithField("function", funcName).Trace("running")
log.WithField("function", funcName).Trace("Running")
f()
case <-ctx.Done():
log.WithField("function", funcName).Debug("context is closed, exiting")
log.WithField("function", funcName).Debug("Context is closed, exiting")
ticker.Stop()
return
}

View File

@@ -3,9 +3,6 @@ package blockchain
import (
"testing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -14,10 +11,7 @@ import (
)
func TestHeadSlot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
s := testServiceWithDB(t)
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
@@ -31,11 +25,8 @@ func TestHeadSlot_DataRace(t *testing.T) {
}
func TestHeadRoot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
head: &head{root: [32]byte{'A'}},
}
s := testServiceWithDB(t)
s.head = &head{root: [32]byte{'A'}}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
@@ -51,13 +42,10 @@ func TestHeadRoot_DataRace(t *testing.T) {
}
func TestHeadBlock_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
head: &head{block: wsb},
}
s := testServiceWithDB(t)
s.head = &head{block: wsb}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
@@ -73,10 +61,8 @@ func TestHeadBlock_DataRace(t *testing.T) {
}
func TestHeadState_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
}
s := testServiceWithDB(t)
beaconDB := s.cfg.BeaconDB
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})

View File

@@ -6,7 +6,6 @@ import (
"time"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
@@ -138,7 +137,7 @@ func TestFinalizedBlockHash(t *testing.T) {
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
ctx := t.Context()
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
service := testServiceWithDB(t)
ojc := &ethpb.Checkpoint{Root: []byte{'j'}}
ofc := &ethpb.Checkpoint{Root: []byte{'f'}}
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
@@ -153,7 +152,7 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
}
func TestHeadSlot_CanRetrieve(t *testing.T) {
c := &Service{}
c := testServiceNoDB(t)
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
@@ -200,7 +199,7 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{block: wsb, state: s}
received, err := c.HeadBlock(t.Context())
@@ -213,7 +212,7 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
func TestHeadState_CanRetrieve(t *testing.T) {
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{state: s}
headState, err := c.HeadState(t.Context())
require.NoError(t, err)
@@ -221,7 +220,8 @@ func TestHeadState_CanRetrieve(t *testing.T) {
}
func TestGenesisTime_CanRetrieve(t *testing.T) {
c := &Service{genesisTime: time.Unix(999, 0)}
c := testServiceNoDB(t)
c.genesisTime = time.Unix(999, 0)
wanted := time.Unix(999, 0)
assert.Equal(t, wanted, c.GenesisTime(), "Did not get wanted genesis time")
}
@@ -230,7 +230,7 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
f := &ethpb.Fork{Epoch: 999}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Fork: f})
require.NoError(t, err)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{state: s}
if !proto.Equal(c.CurrentFork(), f) {
t.Error("Received incorrect fork version")
@@ -242,7 +242,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
}
c := &Service{}
c := testServiceNoDB(t)
if !proto.Equal(c.CurrentFork(), f) {
t.Error("Received incorrect fork version")
}
@@ -250,7 +250,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
// Should not panic if head state is nil.
c := &Service{}
c := testServiceNoDB(t)
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
@@ -269,7 +269,7 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
d := &ethpb.Eth1Data{DepositCount: 999}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Eth1Data: d})
require.NoError(t, err)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{state: s}
if !proto.Equal(c.HeadETH1Data(), d) {
t.Error("Received incorrect eth1 data")
@@ -298,7 +298,7 @@ func TestIsCanonical_Ok(t *testing.T) {
func TestService_HeadValidatorsIndices(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{}
indices, err := c.HeadValidatorsIndices(t.Context(), 0)
@@ -313,7 +313,7 @@ func TestService_HeadValidatorsIndices(t *testing.T) {
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{}
root := c.HeadGenesisValidatorsRoot()
@@ -332,7 +332,7 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
func TestService_ChainHeads(t *testing.T) {
ctx := t.Context()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
c := testServiceWithDB(t)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
@@ -366,7 +366,7 @@ func TestService_ChainHeads(t *testing.T) {
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{state: s}
_, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
@@ -381,7 +381,7 @@ func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
}
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
c := &Service{}
c := testServiceNoDB(t)
c.head = nil
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
@@ -396,7 +396,7 @@ func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{state: s}
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
@@ -409,7 +409,7 @@ func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
}
func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
c := &Service{}
c := testServiceNoDB(t)
c.head = nil
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
@@ -431,7 +431,8 @@ func TestService_IsOptimistic(t *testing.T) {
ctx := t.Context()
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c := testServiceWithDB(t)
c.head = &head{root: [32]byte{'b'}}
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
@@ -450,14 +451,15 @@ func TestService_IsOptimistic(t *testing.T) {
require.Equal(t, true, opt)
// If head is nil, for some reason, an error should be returned rather than panic.
c = &Service{}
c = testServiceNoDB(t)
_, err = c.IsOptimistic(ctx)
require.ErrorIs(t, err, ErrNilHead)
}
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
ctx := t.Context()
c := &Service{genesisTime: time.Now()}
c := testServiceNoDB(t)
c.genesisTime = time.Now()
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, opt)
@@ -465,7 +467,8 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
func TestService_IsOptimisticForRoot(t *testing.T) {
ctx := t.Context()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c := testServiceWithDB(t)
c.head = &head{root: [32]byte{'b'}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
@@ -481,9 +484,10 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
}
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c := testServiceWithDB(t)
c.head = &head{root: [32]byte{'b'}}
beaconDB := c.cfg.BeaconDB
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
@@ -534,9 +538,9 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
}
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c := testServiceWithDB(t)
beaconDB := c.cfg.BeaconDB
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
@@ -573,9 +577,9 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
}
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c := testServiceWithDB(t)
beaconDB := c.cfg.BeaconDB
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
@@ -593,9 +597,9 @@ func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
}
func TestService_IsFinalized(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
c := testServiceWithDB(t)
beaconDB := c.cfg.BeaconDB
r1 := [32]byte{'a'}
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
Root: r1,

View File

@@ -2,7 +2,6 @@ package blockchain
import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -17,9 +16,6 @@ var stateDefragmentationTime = promauto.NewSummary(prometheus.SummaryOpts{
// a higher number of fragmented indexes are reallocated to a new separate slice for
// that field.
func (s *Service) defragmentState(st state.BeaconState) {
if !features.Get().EnableExperimentalState {
return
}
startTime := time.Now()
st.Defragment()
elapsedTime := time.Since(startTime)

View File

@@ -141,7 +141,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
}
if err := s.saveHead(ctx, r, b, st); err != nil {
log.WithError(err).Error("could not save head after pruning invalid blocks")
log.WithError(err).Error("Could not save head after pruning invalid blocks")
}
log.WithFields(logrus.Fields{

View File

@@ -76,14 +76,14 @@ func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fc
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
log.WithError(err).Error("could not compute payload attributes")
log.WithError(err).Error("Could not compute payload attributes")
return
}
if fcuArgs.attributes.IsEmpty() {
return
}
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
log.WithError(err).Error("could not update forkchoice with payload attributes for proposal")
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
}
}
@@ -99,7 +99,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
}
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
log.WithError(err).Error("could not save head")
log.WithError(err).Error("Could not save head")
}
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
@@ -114,7 +114,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
if err != nil {
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
}
currentSlot := s.CurrentSlot()
if proposingSlot == currentSlot {
@@ -135,7 +135,7 @@ func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitiv
secs, err := slots.SecondsSinceSlotStart(currentSlot,
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
if err != nil {
log.WithError(err).Error("could not compute seconds since slot start")
log.WithError(err).Error("Could not compute seconds since slot start")
}
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
log.WithFields(logrus.Fields{

View File

@@ -98,7 +98,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
oldHeadRoot := bytesutil.ToBytes32(r)
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
if err != nil {
log.WithError(err).Error("could not check if node is optimistically synced")
log.WithError(err).Error("Could not check if node is optimistically synced")
}
if headBlock.Block().ParentRoot() != oldHeadRoot {
// A chain re-org occurred, so we fire an event notifying the rest of the services.
@@ -111,11 +111,11 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
}
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
}
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),

View File

@@ -18,9 +18,6 @@ import (
"github.com/pkg/errors"
)
// Initialize the state cache for sync committees.
var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
type HeadSyncCommitteeFetcher interface {
@@ -143,7 +140,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
defer mLock.Unlock()
// If there's already a head state exists with the request slot, we don't need to process slots.
cachedState, err := syncCommitteeHeadStateCache.Get(slot)
cachedState, err := s.syncCommitteeHeadState.Get(slot)
switch {
case err == nil:
syncHeadStateHit.Inc()
@@ -166,7 +163,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
return nil, err
}
syncHeadStateMiss.Inc()
err = syncCommitteeHeadStateCache.Put(slot, headState)
err = s.syncCommitteeHeadState.Put(slot, headState)
return headState, err
default:
// In the event, we encounter another error

View File

@@ -5,7 +5,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
dbTest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -15,7 +14,7 @@ import (
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := testServiceWithDB(t)
c.head = &head{state: s}
// Current period
@@ -38,7 +37,7 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := testServiceWithDB(t)
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
@@ -52,7 +51,7 @@ func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
func TestService_headNextSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := testServiceWithDB(t)
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
@@ -66,7 +65,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := testServiceWithDB(t)
c.head = &head{state: s}
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
@@ -81,7 +80,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := testServiceWithDB(t)
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
@@ -95,7 +94,7 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := testServiceWithDB(t)
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
@@ -109,7 +108,7 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := testServiceWithDB(t)
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
@@ -122,17 +121,14 @@ func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
}
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
c := syncCommitteeHeadStateCache
t.Cleanup(func() {
syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
})
s := testServiceNoDB(t)
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
require.NoError(t, beaconState.SetSlot(100))
cachedState, err := c.Get(101)
cachedState, err := s.syncCommitteeHeadState.Get(101)
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
require.Equal(t, nil, cachedState)
require.NoError(t, c.Put(101, beaconState))
cachedState, err = c.Get(101)
require.NoError(t, s.syncCommitteeHeadState.Put(101, beaconState))
cachedState, err = s.syncCommitteeHeadState.Get(101)
require.NoError(t, err)
require.DeepEqual(t, beaconState, cachedState)
}

View File

@@ -9,7 +9,6 @@ import (
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -154,14 +153,9 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
func Test_notifyNewHeadEvent(t *testing.T) {
t.Run("genesis_state_root", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
notifier := &mock.MockStateNotifier{RecordEvents: true}
srv := &Service{
cfg: &config{
StateNotifier: notifier,
ForkChoiceStore: doublylinkedtree.New(),
},
originBlockRoot: [32]byte{1},
}
srv := testServiceWithDB(t)
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
srv.originBlockRoot = [32]byte{1}
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
@@ -185,15 +179,11 @@ func Test_notifyNewHeadEvent(t *testing.T) {
})
t.Run("non_genesis_values", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
notifier := &mock.MockStateNotifier{RecordEvents: true}
genesisRoot := [32]byte{1}
srv := &Service{
cfg: &config{
StateNotifier: notifier,
ForkChoiceStore: doublylinkedtree.New(),
},
originBlockRoot: genesisRoot,
}
srv := testServiceWithDB(t)
srv.originBlockRoot = genesisRoot
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))

View File

@@ -8,16 +8,6 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/util"
)
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
s, err := util.NewBeaconState()
require.NoError(t, err)
h, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, h.SetValidators(nil))
err = reportEpochMetrics(t.Context(), s, h)
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
}
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
s, err := util.NewBeaconState()
require.NoError(t, err)

View File

@@ -8,9 +8,10 @@ import (
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func testServiceOptsWithDB(t *testing.T) []Option {
func testServiceOptsWithDB(t testing.TB) []Option {
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
cs := startup.NewClockSynchronizer()
@@ -31,3 +32,15 @@ func testServiceOptsNoDB() []Option {
cs := startup.NewClockSynchronizer()
return []Option{WithClockSynchronizer(cs)}
}
func testServiceNoDB(t testing.TB) *Service {
s, err := NewService(t.Context(), testServiceOptsNoDB()...)
require.NoError(t, err)
return s
}
func testServiceWithDB(t testing.TB) *Service {
s, err := NewService(t.Context(), testServiceOptsWithDB(t)...)
require.NoError(t, err)
return s
}

View File

@@ -33,6 +33,14 @@ func WithMaxGoroutines(x int) Option {
}
}
// WithLCStore for light client store access.
func WithLCStore() Option {
return func(s *Service) error {
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB)
return nil
}
}
// WithWeakSubjectivityCheckpoint for checkpoint sync.
func WithWeakSubjectivityCheckpoint(c *ethpb.Checkpoint) Option {
return func(s *Service) error {

View File

@@ -329,7 +329,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
// The latest block header is from the previous epoch
r, err := st.LatestBlockHeader().HashTreeRoot()
if err != nil {
log.WithError(err).Error("could not update proposer index state-root map")
log.WithError(err).Error("Could not update proposer index state-root map")
return nil
}
// The proposer indices cache takes the target root for the previous
@@ -339,12 +339,12 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
}
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
if err != nil {
log.WithError(err).Error("could not update proposer index state-root map")
log.WithError(err).Error("Could not update proposer index state-root map")
return nil
}
err = helpers.UpdateCachedCheckpointToStateRoot(st, &forkchoicetypes.Checkpoint{Epoch: e, Root: target})
if err != nil {
log.WithError(err).Error("could not update proposer index state-root map")
log.WithError(err).Error("Could not update proposer index state-root map")
}
return nil
}
@@ -562,7 +562,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
func (s *Service) runLateBlockTasks() {
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("failed to wait for initial sync")
log.WithError(err).Error("Failed to wait for initial sync")
return
}
@@ -927,10 +927,10 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
log.WithError(err).Error("Could not update epoch boundary caches")
}
// return early if we already started building a block for the current
// head root
@@ -944,7 +944,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if attribute.IsEmpty() {
headBlock, err := s.headBlock()
if err != nil {
log.WithError(err).WithField("head_root", headRoot).Error("unable to retrieve head block to fire payload attributes event")
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
}
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
// call notifyForkchoiceUpdate, so the event is fired here.

View File

@@ -68,11 +68,11 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
}
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
}
log.WithFields(logrus.Fields{
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
@@ -226,7 +226,7 @@ func (s *Service) processLightClientBootstrap(cfg *postBlockProcessConfig) error
if err != nil {
return errors.Wrapf(err, "could not create light client bootstrap")
}
if err := s.cfg.BeaconDB.SaveLightClientBootstrap(cfg.ctx, blockRoot[:], bootstrap); err != nil {
if err := s.lcStore.SaveLightClientBootstrap(cfg.ctx, blockRoot, bootstrap); err != nil {
return errors.Wrapf(err, "could not save light client bootstrap")
}
return nil
@@ -527,7 +527,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
// is meant to be asynchronous and run in the background rather than being
// tied to the execution of a block.
if err := s.cfg.StateGen.MigrateToCold(s.ctx, fRoot); err != nil {
log.WithError(err).Error("could not migrate to cold")
log.WithError(err).Error("Could not migrate to cold")
}
}()
return nil
@@ -616,7 +616,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
// Update deposit cache.
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
if err != nil {
log.WithError(err).Error("could not fetch finalized state")
log.WithError(err).Error("Could not fetch finalized state")
return
}
@@ -634,7 +634,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
if err != nil {
log.WithError(err).Error("could not cast eth1 deposit index")
log.WithError(err).Error("Could not cast eth1 deposit index")
return
}
// The deposit index in the state is always the index of the next deposit
@@ -643,12 +643,12 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
finalizedEth1DepIdx := eth1DepositIndex - 1
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx), common.Hash(finalizedState.Eth1Data().BlockHash),
0 /* Setting a zero value as we have no access to block height */); err != nil {
log.WithError(err).Error("could not insert finalized deposits")
log.WithError(err).Error("Could not insert finalized deposits")
return
}
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
log.WithError(err).Error("could not prune deposit proofs")
log.WithError(err).Error("Could not prune deposit proofs")
}
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
// to the provided eth1 deposit index.

View File

@@ -59,11 +59,8 @@ func Test_pruneAttsFromPool_Electra(t *testing.T) {
cfg.TargetCommitteeSize = 8
params.OverrideBeaconConfig(cfg)
s := Service{
cfg: &config{
AttPool: kv.NewAttCaches(),
},
}
s := testServiceNoDB(t)
s.cfg.AttPool = kv.NewAttCaches()
data := &ethpb.AttestationData{
BeaconBlockRoot: make([]byte, 32),
@@ -471,7 +468,8 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt
}
func TestCurrentSlot_HandlesOverflow(t *testing.T) {
svc := Service{genesisTime: prysmTime.Now().Add(1 * time.Hour)}
svc := testServiceNoDB(t)
svc.genesisTime = prysmTime.Now().Add(1 * time.Hour)
slot := svc.CurrentSlot()
require.Equal(t, primitives.Slot(0), slot, "Unexpected slot")
@@ -3196,116 +3194,47 @@ func TestProcessLightClientBootstrap(t *testing.T) {
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
defer reset()
s, tr := minimalTestService(t)
s, tr := minimalTestService(t, WithLCStore())
ctx := tr.ctx
t.Run("Altair", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Altair)
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
t.Run(version.String(testVersion), func(t *testing.T) {
l := util.NewTestLightClient(t, testVersion)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().VersionToForkEpochMap()[testVersion])*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
require.NoError(t, s.processLightClientBootstrap(cfg))
require.NoError(t, s.processLightClientBootstrap(cfg))
// Check that the light client bootstrap is saved
b, err := s.cfg.BeaconDB.LightClientBootstrap(ctx, currentBlockRoot[:])
require.NoError(t, err)
require.NotNil(t, b)
// Check that the light client bootstrap is saved
b, err := s.lcStore.LightClientBootstrap(ctx, currentBlockRoot)
require.NoError(t, err)
require.NotNil(t, b)
stateRoot, err := l.State.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
require.Equal(t, b.Version(), version.Altair)
})
t.Run("Capella", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Capella)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
require.NoError(t, s.processLightClientBootstrap(cfg))
// Check that the light client bootstrap is saved
b, err := s.cfg.BeaconDB.LightClientBootstrap(ctx, currentBlockRoot[:])
require.NoError(t, err)
require.NotNil(t, b)
stateRoot, err := l.State.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
require.Equal(t, b.Version(), version.Capella)
})
t.Run("Deneb", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Deneb)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
require.NoError(t, s.processLightClientBootstrap(cfg))
// Check that the light client bootstrap is saved
b, err := s.cfg.BeaconDB.LightClientBootstrap(ctx, currentBlockRoot[:])
require.NoError(t, err)
require.NotNil(t, b)
stateRoot, err := l.State.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
require.Equal(t, b.Version(), version.Deneb)
})
reset()
stateRoot, err := l.State.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
require.Equal(t, b.Version(), testVersion)
})
}
}
type testIsAvailableParams struct {

View File

@@ -69,7 +69,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
log.WithError(err).Error("Failed to receive genesis data")
return
}
if s.genesisTime.IsZero() {
@@ -103,7 +103,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("could not process new slot")
log.WithError(err).Error("Could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
@@ -144,7 +144,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("could not get head block")
log.WithError(err).Error("Could not get head block")
return
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
@@ -161,7 +161,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
return
}
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
log.WithError(err).Error("could not update forkchoice")
log.WithError(err).Error("Could not update forkchoice")
}
}

View File

@@ -183,7 +183,7 @@ func (s *Service) updateCheckpoints(
return errors.Wrap(err, "could not get head state")
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
log.WithError(err).Error("could not report epoch metrics")
log.WithError(err).Error("Could not report epoch metrics")
}
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {

View File

@@ -69,6 +69,7 @@ type Service struct {
slasherEnabled bool
lcStore *lightClient.Store
startWaitingDataColumnSidecars chan bool // for testing purposes only
syncCommitteeHeadState *cache.SyncCommitteeHeadStateCache
}
// config options for the service.
@@ -180,14 +181,15 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
seenIndex: make(map[[32]byte][]bool),
}
srv := &Service{
ctx: ctx,
cancel: cancel,
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
blobNotifiers: bn,
cfg: &config{},
blockBeingSynced: &currentlySyncingBlock{roots: make(map[[32]byte]struct{})},
ctx: ctx,
cancel: cancel,
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
blobNotifiers: bn,
cfg: &config{},
blockBeingSynced: &currentlySyncingBlock{roots: make(map[[32]byte]struct{})},
syncCommitteeHeadState: cache.NewSyncCommitteeHeadState(),
}
for _, opt := range opts {
if err := opt(srv); err != nil {
@@ -371,7 +373,7 @@ func (s *Service) startFromExecutionChain() error {
if e.Type == statefeed.ChainStarted {
data, ok := e.Data.(*statefeed.ChainStartedData)
if !ok {
log.Error("event data is not type *statefeed.ChainStartedData")
log.Error("Event data is not type *statefeed.ChainStartedData")
return
}
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
@@ -408,7 +410,7 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
log.WithError(err).Fatal("Failed to initialize blockchain service from execution start event")
}
}

View File

@@ -4,7 +4,6 @@ import (
"io"
"testing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -17,10 +16,7 @@ func init() {
}
func TestChainService_SaveHead_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
s := testServiceWithDB(t)
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, err)

View File

@@ -345,12 +345,8 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
}
func TestChainService_SaveHeadNoDB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()
fc := doublylinkedtree.New()
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, fc), ForkChoiceStore: fc},
}
s := testServiceWithDB(t)
blk := util.NewBeaconBlock()
blk.Block.Slot = 1
r, err := blk.HashTreeRoot()
@@ -371,10 +367,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
}
s := testServiceWithDB(t)
b := util.NewBeaconBlock()
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
@@ -391,22 +384,16 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
}
func TestServiceStop_SaveCachedBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
ctx: ctx,
cancel: cancel,
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
}
s := testServiceWithDB(t)
s.initSyncBlocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
bb := util.NewBeaconBlock()
r, err := bb.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(bb)
require.NoError(t, err)
require.NoError(t, s.saveInitSyncBlock(ctx, r, wsb))
require.NoError(t, s.saveInitSyncBlock(s.ctx, r, wsb))
require.NoError(t, s.Stop())
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
}
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
@@ -428,11 +415,8 @@ func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
}
func BenchmarkHasBlockDB(b *testing.B) {
beaconDB := testDB.SetupDB(b)
ctx := b.Context()
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
s := testServiceWithDB(b)
blk := util.NewBeaconBlock()
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
@@ -448,10 +432,7 @@ func BenchmarkHasBlockDB(b *testing.B) {
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
ctx := b.Context()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
}
s := testServiceWithDB(b)
blk := util.NewBeaconBlock()
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)

View File

@@ -38,7 +38,7 @@ func (s *Service) startupHeadRoot() [32]byte {
if headStr == "head" {
root, err := s.cfg.BeaconDB.HeadBlockRoot()
if err != nil {
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
return fRoot
}
log.Infof("Using Head root of %#x", root)
@@ -46,7 +46,7 @@ func (s *Service) startupHeadRoot() [32]byte {
}
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
if err != nil {
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
return fRoot
}
return [32]byte(root)
@@ -64,16 +64,16 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
}
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
if err != nil {
log.WithError(err).Error("could not get head block, starting with finalized block as head")
log.WithError(err).Error("Could not get head block, starting with finalized block as head")
return nil
}
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("Head block is older than finalized block, starting with finalized block as head")
return nil
}
chain, err := s.buildForkchoiceChain(s.ctx, blk)
if err != nil {
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
return nil
}
s.cfg.ForkChoiceStore.Lock()

View File

@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
})
defer resetCfg()
require.Equal(t, service.startupHeadRoot(), gr)
require.LogsContain(t, hook, "could not get head block root, starting with finalized block as head")
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
})
st, _ := util.DeterministicGenesisState(t, 64)

View File

@@ -555,11 +555,11 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
ojc := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
if err != nil {
logrus.WithError(err).Error("could not update head")
logrus.WithError(err).Error("Could not update head")
}
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
if err != nil {
logrus.WithError(err).Error("could not insert node to forkchoice")
logrus.WithError(err).Error("Could not insert node to forkchoice")
}
}

View File

@@ -3,8 +3,6 @@ package blockchain
import (
"testing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -17,11 +15,8 @@ import (
)
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
b := util.NewBeaconBlock()
b.Block.Slot = 1792480
util.SaveBlock(t, t.Context(), beaconDB, b)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -68,15 +63,15 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := testServiceWithDB(t)
beaconDB := s.cfg.BeaconDB
util.SaveBlock(t, t.Context(), beaconDB, b)
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
require.Equal(t, !tt.disabled, wv.enabled)
require.NoError(t, err)
fcs := doublylinkedtree.New()
s := &Service{
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt, ForkChoiceStore: fcs},
wsVerifier: wv,
}
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
s.cfg.WeakSubjectivityCheckpt = tt.checkpt
s.wsVerifier = wv
require.Equal(t, !tt.disabled, wv.enabled)
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
err = s.wsVerifier.VerifyWeakSubjectivity(t.Context(), cp.Epoch)
if tt.wantErr == nil {

View File

@@ -178,7 +178,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
s.lock.Lock()
defer s.lock.Unlock()
if clearCount != s.cleared.Load() {
log.Warn("cache rotated during async committee update operation - abandoning cache update")
log.Warn("Cache rotated during async committee update operation - abandoning cache update")
return nil
}

View File

@@ -31,7 +31,7 @@ func Test_BaseReward(t *testing.T) {
valIdx: 2,
st: genState(1),
want: 0,
errString: "validator index 2 does not exist",
errString: "index 2 out of bounds",
},
{
name: "active balance is 32eth",
@@ -89,7 +89,7 @@ func Test_BaseRewardWithTotalBalance(t *testing.T) {
valIdx: 2,
activeBalance: 1,
want: 0,
errString: "validator index 2 does not exist",
errString: "index 2 out of bounds",
},
{
name: "active balance is 1",

View File

@@ -119,6 +119,7 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
require.NoError(t, err)
_, err = Eth1DataHasEnoughSupport(s, eth1data)
_ = err
fuzz.FreeMemory(i)
}
}
@@ -319,6 +320,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
require.NoError(t, err)
err = VerifyDeposit(s, deposit)
_ = err
fuzz.FreeMemory(i)
}
}
@@ -382,5 +384,6 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
_ = err
err = VerifyExitAndSignature(val, s, ve)
_ = err
fuzz.FreeMemory(i)
}
}

View File

@@ -194,11 +194,11 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
if IsValidSwitchToCompoundingRequest(st, cr) {
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
if !ok {
log.Error("failed to find source validator index")
log.Error("Failed to find source validator index")
continue
}
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
log.WithError(err).Error("failed to switch to compounding validator")
log.WithError(err).Error("Failed to switch to compounding validator")
}
continue
}
@@ -280,7 +280,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
}
bal, err := st.PendingBalanceToWithdraw(srcIdx)
if err != nil {
log.WithError(err).Error("failed to fetch pending balance to withdraw")
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
continue
}
if bal > 0 {
@@ -290,7 +290,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
// Initiate the exit of the source validator.
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
if err != nil {
log.WithError(err).Error("failed to compute consolidation epoch")
log.WithError(err).Error("Failed to compute consolidation epoch")
continue
}
srcV.ExitEpoch = exitEpoch

View File

@@ -100,7 +100,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 12390192)
require.ErrorContains(t, "validator index 12390192 does not exist", err)
require.ErrorContains(t, "index 12390192 out of bounds", err)
require.Equal(t, false, ok)
}
@@ -187,7 +187,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := helpers.IsNextPeriodSyncCommittee(state, 120391029)
require.ErrorContains(t, "validator index 120391029 does not exist", err)
require.ErrorContains(t, "index 120391029 out of bounds", err)
require.Equal(t, false, ok)
}
@@ -287,7 +287,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
require.ErrorContains(t, "validator index 129301923 does not exist", err)
require.ErrorContains(t, "index 129301923 out of bounds", err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
@@ -374,7 +374,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 21093019)
require.ErrorContains(t, "validator index 21093019 does not exist", err)
require.ErrorContains(t, "index 21093019 out of bounds", err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}

View File

@@ -561,17 +561,6 @@ func TestActiveValidatorIndices(t *testing.T) {
},
want: []primitives.ValidatorIndex{0, 2, 3},
},*/
{
name: "impossible_zero_validators", // Regression test for issue #13051
args: args{
state: &ethpb.BeaconState{
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: make([]*ethpb.Validator, 0),
},
epoch: 10,
},
wantedErr: "state has nil validator slice",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@@ -10,6 +10,7 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",

View File

@@ -1,20 +1,55 @@
package light_client
import (
"context"
"sync"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/pkg/errors"
)
var ErrLightClientBootstrapNotFound = errors.New("light client bootstrap not found")
type Store struct {
mu sync.RWMutex
beaconDB iface.HeadAccessDatabase
lastFinalityUpdate interfaces.LightClientFinalityUpdate
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate
}
func NewLightClientStore() *Store {
return &Store{}
func NewLightClientStore(db iface.HeadAccessDatabase) *Store {
return &Store{
beaconDB: db,
}
}
func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (interfaces.LightClientBootstrap, error) {
s.mu.RLock()
defer s.mu.RUnlock()
// Fetch the light client bootstrap from the database
bootstrap, err := s.beaconDB.LightClientBootstrap(ctx, blockRoot[:])
if err != nil {
return nil, err
}
if bootstrap == nil { // not found
return nil, ErrLightClientBootstrapNotFound
}
return bootstrap, nil
}
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte, bootstrap interfaces.LightClientBootstrap) error {
s.mu.Lock()
defer s.mu.Unlock()
// Save the light client bootstrap to the database
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
return err
}
return nil
}
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate) {

View File

@@ -8,6 +8,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
)
// ValidatorIndexMap builds a lookup map for quickly determining the index of
@@ -21,8 +22,12 @@ func ValidatorIndexMap(validators []*ethpb.Validator) map[[fieldparams.BLSPubkey
if record == nil {
continue
}
if record.EffectiveBalance == 0 {
continue
}
key := bytesutil.ToBytes48(record.PublicKey)
m[key] = primitives.ValidatorIndex(idx)
}
log.Info("ValidatorIndexMap built with ", len(m), " entries")
return m
}

View File

@@ -12,7 +12,7 @@ import (
"github.com/OffchainLabs/prysm/v6/runtime/logging"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
errors "github.com/pkg/errors"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
@@ -122,7 +122,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
}
log.WithFields(lf).WithFields(logging.BlockFieldsFromBlob(sidecars[0])).
Debug("invalid BlobSidecars received")
Debug("Invalid BlobSidecars received")
}
return errors.Wrapf(err, "invalid BlobSidecars received for block %#x", root)
}

View File

@@ -255,7 +255,7 @@ func pruneBefore(before primitives.Epoch, l fsLayout) (map[primitives.Epoch]*pru
}
continue
}
log.WithError(err).Error("encountered unhandled error during pruning")
log.WithError(err).Error("Encountered unhandled error during pruning")
return nil, errors.Wrap(errPruneFailed, err.Error())
}
if ident.epoch >= before {

View File

@@ -564,14 +564,6 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
cfg.EpochsPerSyncCommitteePeriod = 1
params.OverrideBeaconConfig(cfg)
versionToForkEpoch := map[int]primitives.Epoch{
version.Altair: params.BeaconConfig().AltairForkEpoch,
version.Bellatrix: params.BeaconConfig().BellatrixForkEpoch,
version.Capella: params.BeaconConfig().CapellaForkEpoch,
version.Deneb: params.BeaconConfig().DenebForkEpoch,
version.Electra: params.BeaconConfig().ElectraForkEpoch,
}
db := setupDB(t)
ctx := t.Context()
@@ -583,7 +575,7 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
t.Run(version.String(testVersion), func(t *testing.T) {
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(versionToForkEpoch[testVersion]) * uint64(params.BeaconConfig().SlotsPerEpoch)))
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().VersionToForkEpochMap()[testVersion]) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())

View File

@@ -620,7 +620,7 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
blobIndex := kzgIndexes[i]
proof, err := blocks.MerkleProofKZGCommitment(blockBody, blobIndex)
if err != nil {
log.WithError(err).WithField("index", blobIndex).Error("failed to get Merkle proof for KZG commitment")
log.WithError(err).WithField("index", blobIndex).Error("Failed to get Merkle proof for KZG commitment")
continue
}
sidecar := &ethpb.BlobSidecar{
@@ -634,14 +634,14 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
roBlob, err := blocks.NewROBlobWithRoot(sidecar, blockRoot)
if err != nil {
log.WithError(err).WithField("index", blobIndex).Error("failed to create RO blob with root")
log.WithError(err).WithField("index", blobIndex).Error("Failed to create RO blob with root")
continue
}
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
verifiedBlob, err := v.VerifiedROBlob()
if err != nil {
log.WithError(err).WithField("index", blobIndex).Error("failed to verify RO blob")
log.WithError(err).WithField("index", blobIndex).Error("Failed to verify RO blob")
continue
}

View File

@@ -248,14 +248,14 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
for i := range s.chainStartData.ChainstartDeposits {
proof, err := s.depositTrie.MerkleProof(i)
if err != nil {
log.WithError(err).Error("unable to generate deposit proof")
log.WithError(err).Error("Unable to generate deposit proof")
}
s.chainStartData.ChainstartDeposits[i].Proof = proof
}
root, err := s.depositTrie.HashTreeRoot()
if err != nil { // This should never happen.
log.WithError(err).Error("unable to determine root of deposit trie, aborting chain start")
log.WithError(err).Error("Unable to determine root of deposit trie, aborting chain start")
return
}
s.chainStartData.Eth1Data = &ethpb.Eth1Data{

View File

@@ -557,8 +557,8 @@ func (s *Service) initPOWService() {
}
}
// Handle edge case with embedded genesis state by fetching genesis header to determine
// its height.
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 {
// its height only if the deposit requests have not started yet (Pre Pectra EIP-6110 behavior).
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 && !s.depositRequestsStarted {
genHash := common.BytesToHash(s.chainStartData.Eth1Data.BlockHash)
genBlock := s.chainStartData.GenesisBlock
// In the event our provided chainstart data references a non-existent block hash,

View File

@@ -127,7 +127,7 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
_, remErr := f.store.removeNode(ctx, node)
if remErr != nil {
log.WithError(remErr).Error("could not remove node")
log.WithError(remErr).Error("Could not remove node")
}
return errors.Wrap(err, "could not update checkpoints")
}

View File

@@ -235,7 +235,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.finalizedStateAtStartUp = nil
if features.Get().EnableLightClient {
beacon.lcStore = lightclient.NewLightClientStore()
beacon.lcStore = lightclient.NewLightClientStore(beacon.db)
}
return beacon, nil

View File

@@ -78,16 +78,16 @@ func (c *AttCaches) aggregateParallel(atts map[attestation.Id][]ethpb.Att, leftO
for as := range ch {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(as)
if err != nil {
log.WithError(err).Error("could not aggregate unaggregated attestations")
log.WithError(err).Error("Could not aggregate unaggregated attestations")
continue
}
if aggregated == nil {
log.Error("nil aggregated attestation")
log.Error("Nil aggregated attestation")
continue
}
if aggregated.IsAggregated() {
if err := c.SaveAggregatedAttestations([]ethpb.Att{aggregated}); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
log.WithError(err).Error("Could not save aggregated attestation")
continue
}
} else {

View File

@@ -214,13 +214,13 @@ func defaultAggregateTopicParams(activeValidators uint64) *pubsub.TopicScorePara
aggPerSlot := aggregatorsPerSlot(activeValidators)
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
if err != nil {
log.WithError(err).Warn("skipping initializing topic scoring")
log.WithError(err).Warn("Skipping initializing topic scoring")
return nil
}
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
if err != nil {
log.WithError(err).Warn("skipping initializing topic scoring")
log.WithError(err).Warn("Skipping initializing topic scoring")
return nil
}
meshWeight := -scoreByWeight(aggregateWeight, meshThreshold)
@@ -256,13 +256,13 @@ func defaultSyncContributionTopicParams() *pubsub.TopicScoreParams {
aggPerSlot := params.BeaconConfig().SyncCommitteeSubnetCount * params.BeaconConfig().TargetAggregatorsPerSyncSubcommittee
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
if err != nil {
log.WithError(err).Warn("skipping initializing topic scoring")
log.WithError(err).Warn("Skipping initializing topic scoring")
return nil
}
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
if err != nil {
log.WithError(err).Warn("skipping initializing topic scoring")
log.WithError(err).Warn("Skipping initializing topic scoring")
return nil
}
meshWeight := -scoreByWeight(syncContributionWeight, meshThreshold)
@@ -305,7 +305,7 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) *pubsub.TopicSco
// Determine the amount of validators expected in a subnet in a single slot.
numPerSlot := time.Duration(subnetWeight / uint64(params.BeaconConfig().SlotsPerEpoch))
if numPerSlot == 0 {
log.Warn("numPerSlot is 0, skipping initializing topic scoring")
log.Warn("Number per slot is 0, skipping initializing topic scoring")
return nil
}
comsPerSlot := committeeCountPerSlot(activeValidators)
@@ -318,20 +318,20 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) *pubsub.TopicSco
}
rate := numPerSlot * 2 / gossipSubD
if rate == 0 {
log.Warn("rate is 0, skipping initializing topic scoring")
log.Warn("Skipping initializing topic scoring because rate is 0")
return nil
}
// Determine expected first deliveries based on the message rate.
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate))
if err != nil {
log.WithError(err).Warn("skipping initializing topic scoring")
log.WithError(err).Warn("Skipping initializing topic scoring")
return nil
}
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
// Determine expected mesh deliveries based on message rate applied with a dampening factor.
meshThreshold, err := decayThreshold(scoreDecay(meshDecay*oneEpochDuration()), float64(numPerSlot)/dampeningFactor)
if err != nil {
log.WithError(err).Warn("skipping initializing topic scoring")
log.WithError(err).Warn("Skipping initializing topic scoring")
return nil
}
meshWeight := -scoreByWeight(topicWeight, meshThreshold)
@@ -381,7 +381,7 @@ func defaultSyncSubnetTopicParams(activeValidators uint64) *pubsub.TopicScorePar
rate := subnetWeight * 2 / gossipSubD
if rate == 0 {
log.Warn("rate is 0, skipping initializing topic scoring")
log.Warn("Skipping initializing topic scoring because rate is 0")
return nil
}
// Determine expected first deliveries based on the message rate.

View File

@@ -222,7 +222,7 @@ func (s *Service) Start() {
if len(s.cfg.StaticPeers) > 0 {
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
if err != nil {
log.WithError(err).Error("could not convert ENR to multiaddr")
log.WithError(err).Error("Could not convert ENR to multiaddr")
}
// Set trusted peers for those that are provided as static addresses.
pids := peerIdsFromMultiAddrs(addrs)
@@ -429,7 +429,7 @@ func (s *Service) awaitStateInitialized() {
}
clock, err := s.cfg.ClockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Fatal("failed to receive initial genesis data")
log.WithError(err).Fatal("Failed to receive initial genesis data")
}
s.genesisTime = clock.GenesisTime()
gvr := clock.GenesisValidatorsRoot()

View File

@@ -88,7 +88,7 @@ func createENR() *enr.Record {
}
db, err := enode.OpenDB("")
if err != nil {
log.Error("could not open node's peer database")
log.Error("Could not open node's peer database")
}
lNode := enode.NewLocalNode(db, key)
return lNode.Node().Record()

View File

@@ -895,7 +895,7 @@ func (s *Service) beaconEndpoints(
template: "/eth/v1/beacon/states/{state_id}/pending_deposits",
name: namespace + ".GetPendingDeposits",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
},
handler: server.GetPendingDeposits,
methods: []string{http.MethodGet},
@@ -904,7 +904,7 @@ func (s *Service) beaconEndpoints(
template: "/eth/v1/beacon/states/{state_id}/pending_consolidations",
name: namespace + ".GetPendingConsolidations",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
},
handler: server.GetPendingConsolidations,
methods: []string{http.MethodGet},
@@ -913,7 +913,7 @@ func (s *Service) beaconEndpoints(
template: "/eth/v1/beacon/states/{state_id}/pending_partial_withdrawals",
name: namespace + ".GetPendingPartialWithdrawals",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
},
handler: server.GetPendingPartialWithdrawals,
methods: []string{http.MethodGet},

View File

@@ -46,11 +46,11 @@ go_library(
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//container/multi-value-slice:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -22,8 +22,8 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
mvslice "github.com/OffchainLabs/prysm/v6/container/multi-value-slice"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/httputil"
@@ -501,7 +501,7 @@ func (s *Server) SubmitVoluntaryExit(w http.ResponseWriter, r *http.Request) {
}
val, err := headState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
if err != nil {
if errors.Is(err, consensus_types.ErrOutOfBounds) {
if errors.Is(err, mvslice.ErrOutOfBounds) {
httputil.HandleError(w, "Could not get validator: "+err.Error(), http.StatusBadRequest)
return
}

View File

@@ -529,6 +529,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
st := tc.getState()
v := &eth.Validator{ExitEpoch: math.MaxUint64, EffectiveBalance: params.BeaconConfig().MinActivationBalance, WithdrawalCredentials: make([]byte, 32)}
require.NoError(t, st.SetValidators([]*eth.Validator{v}))
require.NoError(t, st.SetBalances([]uint64{0}))
currentSlot := primitives.Slot(0)
// to avoid slot processing
require.NoError(t, st.SetSlot(currentSlot+1))

View File

@@ -25,6 +25,7 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
],
)

View File

@@ -6,6 +6,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -16,6 +17,7 @@ import (
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
)
@@ -33,13 +35,13 @@ func (s *Server) GetLightClientBootstrap(w http.ResponseWriter, req *http.Reques
}
blockRoot := bytesutil.ToBytes32(blockRootParam)
bootstrap, err := s.BeaconDB.LightClientBootstrap(ctx, blockRoot[:])
bootstrap, err := s.LCStore.LightClientBootstrap(ctx, blockRoot)
if err != nil {
httputil.HandleError(w, "Could not get light client bootstrap: "+err.Error(), http.StatusInternalServerError)
return
}
if bootstrap == nil {
httputil.HandleError(w, "Light client bootstrap not found", http.StatusNotFound)
if errors.Is(err, lightclient.ErrLightClientBootstrapNotFound) {
httputil.HandleError(w, "Light client bootstrap not found", http.StatusNotFound)
} else {
httputil.HandleError(w, "Could not get light client bootstrap: "+err.Error(), http.StatusInternalServerError)
}
return
}

View File

@@ -42,32 +42,24 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
cfg.FuluForkEpoch = 5
params.OverrideBeaconConfig(cfg)
versionToForkEpoch := map[int]primitives.Epoch{
version.Altair: params.BeaconConfig().AltairForkEpoch,
version.Bellatrix: params.BeaconConfig().BellatrixForkEpoch,
version.Capella: params.BeaconConfig().CapellaForkEpoch,
version.Deneb: params.BeaconConfig().DenebForkEpoch,
version.Electra: params.BeaconConfig().ElectraForkEpoch,
}
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
t.Run(version.String(testVersion), func(t *testing.T) {
l := util.NewTestLightClient(t, testVersion)
slot := primitives.Slot(versionToForkEpoch[testVersion] * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
slot := primitives.Slot(params.BeaconConfig().VersionToForkEpochMap()[testVersion] * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
blockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(l.Ctx, slot, l.State, l.Block)
require.NoError(t, err)
db := dbtesting.SetupDB(t)
lcStore := lightclient.NewLightClientStore(dbtesting.SetupDB(t))
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
err = lcStore.SaveLightClientBootstrap(l.Ctx, blockRoot, bootstrap)
require.NoError(t, err)
s := &Server{
BeaconDB: db,
LCStore: lcStore,
}
request := httptest.NewRequest("GET", "http://foo.com/", nil)
request.SetPathValue("block_root", hexutil.Encode(blockRoot[:]))
@@ -93,23 +85,24 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
require.NotNil(t, resp.Data.CurrentSyncCommittee)
require.NotNil(t, resp.Data.CurrentSyncCommitteeBranch)
})
t.Run(version.String(testVersion)+"SSZ", func(t *testing.T) {
l := util.NewTestLightClient(t, testVersion)
slot := primitives.Slot(versionToForkEpoch[testVersion] * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
slot := primitives.Slot(params.BeaconConfig().VersionToForkEpochMap()[testVersion] * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
blockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(l.Ctx, slot, l.State, l.Block)
require.NoError(t, err)
db := dbtesting.SetupDB(t)
lcStore := lightclient.NewLightClientStore(dbtesting.SetupDB(t))
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
err = lcStore.SaveLightClientBootstrap(l.Ctx, blockRoot, bootstrap)
require.NoError(t, err)
s := &Server{
BeaconDB: db,
LCStore: lcStore,
}
request := httptest.NewRequest("GET", "http://foo.com/", nil)
request.SetPathValue("block_root", hexutil.Encode(blockRoot[:]))
@@ -146,10 +139,8 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
}
t.Run("no bootstrap found", func(t *testing.T) {
db := dbtesting.SetupDB(t)
s := &Server{
BeaconDB: db,
LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t)),
}
request := httptest.NewRequest("GET", "http://foo.com/", nil)
request.SetPathValue("block_root", hexutil.Encode([]byte{0x00, 0x01, 0x02}))

View File

@@ -33,10 +33,10 @@ go_library(
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//container/multi-value-slice:go_default_library",
"//crypto/bls/common:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -26,9 +26,9 @@ import (
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
validator2 "github.com/OffchainLabs/prysm/v6/consensus-types/validator"
mvslice "github.com/OffchainLabs/prysm/v6/container/multi-value-slice"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/httputil"
@@ -570,7 +570,7 @@ func (s *Server) SubmitBeaconCommitteeSubscription(w http.ResponseWriter, r *htt
subscriptions[i] = consensusItem
val, err := st.ValidatorAtIndexReadOnly(consensusItem.ValidatorIndex)
if err != nil {
if errors.Is(err, consensus_types.ErrOutOfBounds) {
if errors.Is(err, mvslice.ErrOutOfBounds) {
httputil.HandleError(w, "Could not get validator: "+err.Error(), http.StatusBadRequest)
return
}
@@ -819,7 +819,7 @@ func (s *Server) PrepareBeaconProposer(w http.ResponseWriter, r *http.Request) {
if feeRecipient == primitives.ExecutionAddress([20]byte{}) {
feeRecipient = primitives.ExecutionAddress(params.BeaconConfig().DefaultFeeRecipient)
if feeRecipient == primitives.ExecutionAddress([20]byte{}) {
log.WithField("validatorIndex", validatorIndex).Warn("fee recipient is the burn address")
log.WithField("validatorIndex", validatorIndex).Warn("Fee recipient is the burn address")
}
}
val := cache.TrackedValidator{

View File

@@ -136,7 +136,7 @@ func logFailedReorgAttempt(slot primitives.Slot, oldHeadRoot, headRoot [32]byte)
"slot": slot,
"oldHeadRoot": fmt.Sprintf("%#x", oldHeadRoot),
"headRoot": fmt.Sprintf("%#x", headRoot),
}).Warn("late block attempted reorg failed")
}).Warn("Late block attempted reorg failed")
}
func (vs *Server) getHeadNoReorg(ctx context.Context, slot primitives.Slot, parentRoot [32]byte) (state.BeaconState, error) {
@@ -430,7 +430,7 @@ func (vs *Server) PrepareBeaconProposer(
if feeRecipient == primitives.ExecutionAddress([20]byte{}) {
feeRecipient = primitives.ExecutionAddress(params.BeaconConfig().DefaultFeeRecipient)
if feeRecipient == primitives.ExecutionAddress([20]byte{}) {
log.WithField("validatorIndex", r.ValidatorIndex).Warn("fee recipient is the burn address")
log.WithField("validatorIndex", r.ValidatorIndex).Warn("Fee recipient is the burn address")
}
}
val := cache.TrackedValidator{

View File

@@ -132,7 +132,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
if bid.Version() >= version.Deneb {
bidDeneb, ok := bid.(builder.BidDeneb)
if !ok {
log.Warnf("bid type %T does not implement builder.BidDeneb", bid)
log.Warnf("Bid type %T does not implement builder.BidDeneb", bid)
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
} else {
builderKzgCommitments = bidDeneb.BlobKzgCommitments()
@@ -143,7 +143,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
if bid.Version() >= version.Electra {
bidElectra, ok := bid.(builder.BidElectra)
if !ok {
log.Warnf("bid type %T does not implement builder.BidElectra", bid)
log.Warnf("Bid type %T does not implement builder.BidElectra", bid)
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
} else {
executionRequests = bidElectra.ExecutionRequests()

View File

@@ -28,8 +28,14 @@ func BuildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProo
return nil, err
}
body := blk.Block().Body()
// Pre-compute subtrees once before the loop to avoid redundant calculations
proofComponents, err := blocks.PrecomputeMerkleProofComponents(body)
if err != nil {
return nil, err
}
for i := range blobSidecars {
proof, err := blocks.MerkleProofKZGCommitment(body, i)
proof, err := blocks.MerkleProofKZGCommitmentFromComponents(proofComponents, i)
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,224 @@
package validator
import (
"errors"
"testing"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/ethereum/go-ethereum/common/hexutil"
)
// BuildBlobSidecarsOriginal is the original implementation for comparison
func BuildBlobSidecarsOriginal(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
if blk.Version() < version.Deneb {
return nil, nil // No blobs before deneb.
}
commits, err := blk.Block().Body().BlobKzgCommitments()
if err != nil {
return nil, err
}
cLen := len(commits)
if cLen != len(blobs) || cLen != len(kzgProofs) {
return nil, errors.New("blob KZG commitments don't match number of blobs or KZG proofs")
}
blobSidecars := make([]*ethpb.BlobSidecar, cLen)
header, err := blk.Header()
if err != nil {
return nil, err
}
body := blk.Block().Body()
for i := range blobSidecars {
proof, err := blocks.MerkleProofKZGCommitment(body, i)
if err != nil {
return nil, err
}
blobSidecars[i] = &ethpb.BlobSidecar{
Index: uint64(i),
Blob: blobs[i],
KzgCommitment: commits[i],
KzgProof: kzgProofs[i],
SignedBlockHeader: header,
CommitmentInclusionProof: proof,
}
}
return blobSidecars, nil
}
func setupBenchmarkData(b *testing.B, numBlobs int) (interfaces.SignedBeaconBlock, [][]byte, [][]byte) {
b.Helper()
// Create KZG commitments
kzgCommitments := make([][]byte, numBlobs)
for i := 0; i < numBlobs; i++ {
kzgCommitments[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
}
// Create block with KZG commitments
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockDeneb())
if err != nil {
b.Fatal(err)
}
if err := blk.SetBlobKzgCommitments(kzgCommitments); err != nil {
b.Fatal(err)
}
// Create blobs
blobs := make([][]byte, numBlobs)
for i := 0; i < numBlobs; i++ {
blobs[i] = make([]byte, fieldparams.BlobLength)
// Add some variation to the blob data
blobs[i][0] = byte(i)
}
// Create KZG proofs
proof, err := hexutil.Decode("0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
if err != nil {
b.Fatal(err)
}
kzgProofs := make([][]byte, numBlobs)
for i := 0; i < numBlobs; i++ {
kzgProofs[i] = proof
}
return blk, blobs, kzgProofs
}
func BenchmarkBuildBlobSidecars_Original_1Blob(b *testing.B) {
blk, blobs, kzgProofs := setupBenchmarkData(b, 1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := BuildBlobSidecarsOriginal(blk, blobs, kzgProofs)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkBuildBlobSidecars_Optimized_1Blob(b *testing.B) {
blk, blobs, kzgProofs := setupBenchmarkData(b, 1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := BuildBlobSidecars(blk, blobs, kzgProofs)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkBuildBlobSidecars_Original_2Blobs(b *testing.B) {
blk, blobs, kzgProofs := setupBenchmarkData(b, 2)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := BuildBlobSidecarsOriginal(blk, blobs, kzgProofs)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkBuildBlobSidecars_Optimized_3Blobs(b *testing.B) {
blk, blobs, kzgProofs := setupBenchmarkData(b, 3)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := BuildBlobSidecars(blk, blobs, kzgProofs)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkBuildBlobSidecars_Original_3Blobs(b *testing.B) {
blk, blobs, kzgProofs := setupBenchmarkData(b, 3)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := BuildBlobSidecarsOriginal(blk, blobs, kzgProofs)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkBuildBlobSidecars_Optimized_4Blobs(b *testing.B) {
blk, blobs, kzgProofs := setupBenchmarkData(b, 4)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := BuildBlobSidecars(blk, blobs, kzgProofs)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkBuildBlobSidecars_Original_9Blobs(b *testing.B) {
blk, blobs, kzgProofs := setupBenchmarkData(b, 9)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := BuildBlobSidecarsOriginal(blk, blobs, kzgProofs)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkBuildBlobSidecars_Optimized_9Blobs(b *testing.B) {
blk, blobs, kzgProofs := setupBenchmarkData(b, 9)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := BuildBlobSidecars(blk, blobs, kzgProofs)
if err != nil {
b.Fatal(err)
}
}
}
// Benchmark the individual components to understand where the improvements come from
func BenchmarkMerkleProofKZGCommitment_Original(b *testing.B) {
blk, _, _ := setupBenchmarkData(b, 4)
body := blk.Block().Body()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < 4; j++ {
_, err := blocks.MerkleProofKZGCommitment(body, j)
if err != nil {
b.Fatal(err)
}
}
}
}
func BenchmarkMerkleProofKZGCommitment_Optimized(b *testing.B) {
blk, _, _ := setupBenchmarkData(b, 4)
body := blk.Block().Body()
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Pre-compute components once
components, err := blocks.PrecomputeMerkleProofComponents(body)
if err != nil {
b.Fatal(err)
}
// Generate proofs for each index
for j := 0; j < 4; j++ {
_, err := blocks.MerkleProofKZGCommitmentFromComponents(components, j)
if err != nil {
b.Fatal(err)
}
}
}
}

View File

@@ -89,7 +89,7 @@ func (vs *Server) deposits(
}
if !vs.Eth1InfoFetcher.ExecutionClientConnected() {
log.Warn("not connected to eth1 node, skip pending deposit insertion")
log.Warn("Not connected to eth1 node, skip pending deposit insertion")
return []*ethpb.Deposit{}, nil
}
@@ -113,7 +113,7 @@ func (vs *Server) deposits(
// If there are no pending deposits, exit early.
allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight)
if len(allPendingContainers) == 0 {
log.Debug("no pending deposits for inclusion in block")
log.Debug("No pending deposits for inclusion in block")
return []*ethpb.Deposit{}, nil
}

View File

@@ -80,7 +80,7 @@ func (vs *Server) getLocalPayloadFromEngine(
val, tracked := vs.TrackedValidatorsCache.Validator(proposerId)
if !tracked {
logrus.WithFields(logFields).Warn("could not find tracked proposer index")
logrus.WithFields(logFields).Warn("Could not find tracked proposer index")
}
setFeeRecipientIfBurnAddress(&val)

View File

@@ -3271,7 +3271,7 @@ func TestProposer_GetParentHeadState(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte(str), [32]byte(headStr))
require.NotEqual(t, [32]byte(str), [32]byte(genesisStr))
require.LogsContain(t, hook, "late block attempted reorg failed")
require.LogsContain(t, hook, "Late block attempted reorg failed")
})
}

View File

@@ -247,7 +247,7 @@ func GetChunkFromDatabase(
func closeDB(d *slasherkv.Store) {
if err := d.Close(); err != nil {
log.WithError(err).Error("could not close database")
log.WithError(err).Error("Could not close database")
}
}

View File

@@ -31,7 +31,6 @@ go_test(
"//beacon-chain/state/state-native/custom-types:go_default_library",
"//beacon-chain/state/state-native/types:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -7,7 +7,6 @@ import (
customtypes "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/custom-types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
mvslice "github.com/OffchainLabs/prysm/v6/container/multi-value-slice"
@@ -21,32 +20,19 @@ func TestFieldTrie_NewTrie(t *testing.T) {
t.Run("native state", func(t *testing.T) {
runNewTrie(t)
})
t.Run("native state with multivalue slice", func(t *testing.T) {
cfg := &features.Flags{}
cfg.EnableExperimentalState = true
reset := features.InitWithReset(cfg)
runNewTrie(t)
reset()
})
}
func runNewTrie(t *testing.T) {
newState, _ := util.DeterministicGenesisState(t, 40)
roots := newState.BlockRoots()
var elements interface{}
blockRoots := make([][32]byte, len(roots))
for i, r := range roots {
blockRoots[i] = [32]byte(r)
}
elements = customtypes.BlockRoots(blockRoots)
if features.Get().EnableExperimentalState {
mvRoots := buildTestCompositeSlice[[32]byte](blockRoots)
elements = mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: mockIdentifier{},
MultiValueSlice: mvRoots,
}
mvRoots := buildTestCompositeSlice[[32]byte](blockRoots)
elements := mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: mockIdentifier{},
MultiValueSlice: mvRoots,
}
trie, err := NewFieldTrie(types.BlockRoots, types.BasicArray, elements, uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
@@ -69,27 +55,15 @@ func TestFieldTrie_RecomputeTrie(t *testing.T) {
t.Run("native state", func(t *testing.T) {
runRecomputeTrie(t)
})
t.Run("native state with multivalue slice", func(t *testing.T) {
cfg := &features.Flags{}
cfg.EnableExperimentalState = true
reset := features.InitWithReset(cfg)
runRecomputeTrie(t)
reset()
})
}
func runRecomputeTrie(t *testing.T) {
newState, _ := util.DeterministicGenesisState(t, 32)
var elements interface{}
elements = newState.Validators()
if features.Get().EnableExperimentalState {
mvRoots := buildTestCompositeSlice[*ethpb.Validator](newState.Validators())
elements = mvslice.MultiValueSliceComposite[*ethpb.Validator]{
Identifiable: mockIdentifier{},
MultiValueSlice: mvRoots,
}
mvRoots := buildTestCompositeSlice[*ethpb.Validator](newState.Validators())
elements := mvslice.MultiValueSliceComposite[*ethpb.Validator]{
Identifiable: mockIdentifier{},
MultiValueSlice: mvRoots,
}
trie, err := NewFieldTrie(types.Validators, types.CompositeArray, elements, params.BeaconConfig().ValidatorRegistryLimit)
@@ -125,26 +99,14 @@ func TestFieldTrie_RecomputeTrie_CompressedArray(t *testing.T) {
t.Run("native state", func(t *testing.T) {
runRecomputeTrie_CompressedArray(t)
})
t.Run("native state with multivalue slice", func(t *testing.T) {
cfg := &features.Flags{}
cfg.EnableExperimentalState = true
reset := features.InitWithReset(cfg)
runRecomputeTrie_CompressedArray(t)
reset()
})
}
func runRecomputeTrie_CompressedArray(t *testing.T) {
newState, _ := util.DeterministicGenesisState(t, 32)
var elements interface{}
elements = newState.Balances()
if features.Get().EnableExperimentalState {
mvBals := buildTestCompositeSlice(newState.Balances())
elements = mvslice.MultiValueSliceComposite[uint64]{
Identifiable: mockIdentifier{},
MultiValueSlice: mvBals,
}
mvBals := buildTestCompositeSlice(newState.Balances())
elements := mvslice.MultiValueSliceComposite[uint64]{
Identifiable: mockIdentifier{},
MultiValueSlice: mvBals,
}
trie, err := NewFieldTrie(types.Balances, types.CompressedArray, elements, stateutil.ValidatorLimitForBalancesChunks())

View File

@@ -59,10 +59,8 @@ go_library(
"//beacon-chain/state/state-native/custom-types:go_default_library",
"//beacon-chain/state/state-native/types:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -135,7 +133,6 @@ go_test(
"//beacon-chain/state/state-native/types:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -13,9 +13,7 @@ Add the new getter and setter to `/beacon-chain/state/interfaces.go`.
- Update `spec_parameters.go`.
- Update `state_trie.go`:
- Add a `[version]Fields` variable that contains all fields of the new state version.
- Add a `[version]SharedFieldRefCount` constant that represents the number of fields whose references are shared between states.
- Add an `experimentalState[Version]SharedFieldCountRef` constant that represents the number of **non multi-value slice** fields whose references are shared
between states.
- Add a `[version]SharedFieldRefCount` constant that represents the number of fields whose references are shared between states. Multi-value slice references are not shared in this way so don't include them.
- Add the following functions: `InitializeFromProto[Version]()`, `InitializeFromProtoUnsafe[Version]()`.
- Update the following functions: `Copy()`, `initializeMerkleLayers()`, `RecordStateMetrics()` (applies only to multi-value slice fields), `rootSelector()`,
`finalizerCleanup()` (applies only to multi-value slice fields).

View File

@@ -8,7 +8,6 @@ import (
customtypes "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/custom-types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -24,19 +23,14 @@ type BeaconState struct {
slot primitives.Slot
fork *ethpb.Fork
latestBlockHeader *ethpb.BeaconBlockHeader
blockRoots customtypes.BlockRoots
blockRootsMultiValue *MultiValueBlockRoots
stateRoots customtypes.StateRoots
stateRootsMultiValue *MultiValueStateRoots
historicalRoots customtypes.HistoricalRoots
eth1Data *ethpb.Eth1Data
eth1DataVotes []*ethpb.Eth1Data
eth1DepositIndex uint64
validators []*ethpb.Validator
validatorsMultiValue *MultiValueValidators
balances []uint64
balancesMultiValue *MultiValueBalances
randaoMixes customtypes.RandaoMixes
randaoMixesMultiValue *MultiValueRandaoMixes
slashings []uint64
previousEpochAttestations []*ethpb.PendingAttestation
@@ -47,7 +41,6 @@ type BeaconState struct {
previousJustifiedCheckpoint *ethpb.Checkpoint
currentJustifiedCheckpoint *ethpb.Checkpoint
finalizedCheckpoint *ethpb.Checkpoint
inactivityScores []uint64
inactivityScoresMultiValue *MultiValueInactivityScores
currentSyncCommittee *ethpb.SyncCommittee
nextSyncCommittee *ethpb.SyncCommittee
@@ -130,28 +123,12 @@ type beaconStateMarshalable struct {
}
func (b *BeaconState) MarshalJSON() ([]byte, error) {
var bRoots customtypes.BlockRoots
var sRoots customtypes.StateRoots
var mixes customtypes.RandaoMixes
var balances []uint64
var inactivityScores []uint64
var vals []*ethpb.Validator
if features.Get().EnableExperimentalState {
bRoots = b.blockRootsMultiValue.Value(b)
sRoots = b.stateRootsMultiValue.Value(b)
mixes = b.randaoMixesMultiValue.Value(b)
balances = b.balancesMultiValue.Value(b)
inactivityScores = b.inactivityScoresMultiValue.Value(b)
vals = b.validatorsMultiValue.Value(b)
} else {
bRoots = b.blockRoots
sRoots = b.stateRoots
mixes = b.randaoMixes
balances = b.balances
inactivityScores = b.inactivityScores
vals = b.validators
}
bRoots := b.blockRootsMultiValue.Value(b)
sRoots := b.stateRootsMultiValue.Value(b)
mixes := b.randaoMixesMultiValue.Value(b)
balances := b.balancesMultiValue.Value(b)
inactivityScores := b.inactivityScoresMultiValue.Value(b)
vals := b.validatorsMultiValue.Value(b)
marshalable := &beaconStateMarshalable{
Version: b.version,

View File

@@ -2,10 +2,7 @@ package state_native
import (
customtypes "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/custom-types"
"github.com/OffchainLabs/prysm/v6/config/features"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
// LatestBlockHeader stored within the beacon state.
@@ -58,13 +55,10 @@ func (b *BeaconState) BlockRoots() [][]byte {
}
func (b *BeaconState) blockRootsVal() customtypes.BlockRoots {
if features.Get().EnableExperimentalState {
if b.blockRootsMultiValue == nil {
return nil
}
return b.blockRootsMultiValue.Value(b)
if b.blockRootsMultiValue == nil {
return nil
}
return b.blockRoots
return b.blockRootsMultiValue.Value(b)
}
// BlockRootAtIndex retrieves a specific block root based on an
@@ -73,33 +67,12 @@ func (b *BeaconState) BlockRootAtIndex(idx uint64) ([]byte, error) {
b.lock.RLock()
defer b.lock.RUnlock()
if features.Get().EnableExperimentalState {
if b.blockRootsMultiValue == nil {
return []byte{}, nil
}
r, err := b.blockRootsMultiValue.At(b, idx)
if err != nil {
return nil, err
}
return r[:], nil
}
if b.blockRoots == nil {
if b.blockRootsMultiValue == nil {
return []byte{}, nil
}
r, err := b.blockRootAtIndex(idx)
r, err := b.blockRootsMultiValue.At(b, idx)
if err != nil {
return nil, err
}
return r[:], nil
}
// blockRootAtIndex retrieves a specific block root based on an
// input index value.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) blockRootAtIndex(idx uint64) ([32]byte, error) {
if uint64(len(b.blockRoots)) <= idx {
return [32]byte{}, errors.Wrapf(consensus_types.ErrOutOfBounds, "block root index %d does not exist", idx)
}
return b.blockRoots[idx], nil
}

View File

@@ -4,7 +4,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
customtypes "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/custom-types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/runtime/version"
)
@@ -82,11 +81,7 @@ func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, er
return 0, 0, 0, ErrNilParticipation
}
if features.Get().EnableExperimentalState {
return stateutil.UnrealizedCheckpointBalances(cp, pp, stateutil.NewValMultiValueSliceReader(b.validatorsMultiValue, b), currentEpoch)
} else {
return stateutil.UnrealizedCheckpointBalances(cp, pp, stateutil.NewValSliceReader(b.validators), currentEpoch)
}
return stateutil.UnrealizedCheckpointBalances(cp, pp, stateutil.NewValMultiValueSliceReader(b.validatorsMultiValue, b), currentEpoch)
}
// currentEpochParticipationVal corresponding to participation bits on the beacon chain.

View File

@@ -2,9 +2,6 @@ package state_native
import (
customtypes "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/custom-types"
"github.com/OffchainLabs/prysm/v6/config/features"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
"github.com/pkg/errors"
)
// RandaoMixes of block proposers on the beacon chain.
@@ -20,13 +17,10 @@ func (b *BeaconState) RandaoMixes() [][]byte {
}
func (b *BeaconState) randaoMixesVal() customtypes.RandaoMixes {
if features.Get().EnableExperimentalState {
if b.randaoMixesMultiValue == nil {
return nil
}
return b.randaoMixesMultiValue.Value(b)
if b.randaoMixesMultiValue == nil {
return nil
}
return b.randaoMixes
return b.randaoMixesMultiValue.Value(b)
}
// RandaoMixAtIndex retrieves a specific block root based on an
@@ -35,36 +29,14 @@ func (b *BeaconState) RandaoMixAtIndex(idx uint64) ([]byte, error) {
b.lock.RLock()
defer b.lock.RUnlock()
if features.Get().EnableExperimentalState {
if b.randaoMixesMultiValue == nil {
return nil, nil
}
r, err := b.randaoMixesMultiValue.At(b, idx)
if err != nil {
return nil, err
}
return r[:], nil
}
if b.randaoMixes == nil {
if b.randaoMixesMultiValue == nil {
return nil, nil
}
m, err := b.randaoMixAtIndex(idx)
r, err := b.randaoMixesMultiValue.At(b, idx)
if err != nil {
return nil, err
}
return m[:], nil
}
// randaoMixAtIndex retrieves a specific block root based on an
// input index value.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) randaoMixAtIndex(idx uint64) ([32]byte, error) {
if uint64(len(b.randaoMixes)) <= idx {
return [32]byte{}, errors.Wrapf(consensus_types.ErrOutOfBounds, "randao mix index %d does not exist", idx)
}
return b.randaoMixes[idx], nil
return r[:], nil
}
// RandaoMixesLength returns the length of the randao mixes slice.
@@ -72,14 +44,8 @@ func (b *BeaconState) RandaoMixesLength() int {
b.lock.RLock()
defer b.lock.RUnlock()
if features.Get().EnableExperimentalState {
if b.randaoMixesMultiValue == nil {
return 0
}
return b.randaoMixesMultiValue.Len(b)
}
if b.randaoMixes == nil {
if b.randaoMixesMultiValue == nil {
return 0
}
return len(b.randaoMixes)
return b.randaoMixesMultiValue.Len(b)
}

View File

@@ -2,8 +2,6 @@ package state_native
import (
customtypes "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/custom-types"
"github.com/OffchainLabs/prysm/v6/config/features"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/pkg/errors"
@@ -24,20 +22,14 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
var bals []uint64
var inactivityScores []uint64
if features.Get().EnableExperimentalState {
if b.balancesMultiValue != nil {
bals = b.balancesMultiValue.Value(b)
}
if b.inactivityScoresMultiValue != nil {
inactivityScores = b.inactivityScoresMultiValue.Value(b)
}
if b.validatorsMultiValue != nil {
vals = b.validatorsMultiValue.Value(b)
}
} else {
bals = b.balances
inactivityScores = b.inactivityScores
vals = b.validators
if b.balancesMultiValue != nil {
bals = b.balancesMultiValue.Value(b)
}
if b.inactivityScoresMultiValue != nil {
inactivityScores = b.inactivityScoresMultiValue.Value(b)
}
if b.validatorsMultiValue != nil {
vals = b.validatorsMultiValue.Value(b)
}
switch b.version {
@@ -536,13 +528,10 @@ func (b *BeaconState) StateRoots() [][]byte {
}
func (b *BeaconState) stateRootsVal() customtypes.StateRoots {
if features.Get().EnableExperimentalState {
if b.stateRootsMultiValue == nil {
return nil
}
return b.stateRootsMultiValue.Value(b)
if b.stateRootsMultiValue == nil {
return nil
}
return b.stateRoots
return b.stateRootsMultiValue.Value(b)
}
// StateRootAtIndex retrieves a specific state root based on an
@@ -551,39 +540,16 @@ func (b *BeaconState) StateRootAtIndex(idx uint64) ([]byte, error) {
b.lock.RLock()
defer b.lock.RUnlock()
if features.Get().EnableExperimentalState {
if b.stateRootsMultiValue == nil {
return nil, nil
}
r, err := b.stateRootsMultiValue.At(b, idx)
if err != nil {
return nil, err
}
return r[:], nil
}
if b.stateRoots == nil {
if b.stateRootsMultiValue == nil {
return nil, nil
}
r, err := b.stateRootAtIndex(idx)
r, err := b.stateRootsMultiValue.At(b, idx)
if err != nil {
return nil, err
}
return r[:], nil
}
// stateRootAtIndex retrieves a specific state root based on an
// input index value.
// This assumes that a lock is already held on BeaconState.
//
// WARNING: This function does not work with the multi-value slice feature.
func (b *BeaconState) stateRootAtIndex(idx uint64) ([32]byte, error) {
if uint64(len(b.stateRoots)) <= idx {
return [32]byte{}, errors.Wrapf(consensus_types.ErrOutOfBounds, "state root index %d does not exist", idx)
}
return b.stateRoots[idx], nil
}
// ProtobufBeaconStatePhase0 transforms an input into beacon state in the form of protobuf.
// Error is returned if the input is not type protobuf beacon state.
func ProtobufBeaconStatePhase0(s interface{}) (*ethpb.BeaconState, error) {

View File

@@ -2,16 +2,12 @@ package state_native
import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/pkg/errors"
)
// Validators participating in consensus on the beacon chain.
@@ -32,17 +28,10 @@ func (b *BeaconState) ValidatorsReadOnly() []state.ReadOnlyValidator {
func (b *BeaconState) validatorsVal() []*ethpb.Validator {
var v []*ethpb.Validator
if features.Get().EnableExperimentalState {
if b.validatorsMultiValue == nil {
return nil
}
v = b.validatorsMultiValue.Value(b)
} else {
if b.validators == nil {
return nil
}
v = b.validators
if b.validatorsMultiValue == nil {
return nil
}
v = b.validatorsMultiValue.Value(b)
res := make([]*ethpb.Validator, len(v))
for i := 0; i < len(res); i++ {
@@ -56,18 +45,10 @@ func (b *BeaconState) validatorsVal() []*ethpb.Validator {
}
func (b *BeaconState) validatorsReadOnlyVal() []state.ReadOnlyValidator {
var v []*ethpb.Validator
if features.Get().EnableExperimentalState {
if b.validatorsMultiValue == nil {
return nil
}
v = b.validatorsMultiValue.Value(b)
} else {
if b.validators == nil {
return nil
}
v = b.validators
if b.validatorsMultiValue == nil {
return nil
}
v := b.validatorsMultiValue.Value(b)
res := make([]state.ReadOnlyValidator, len(v))
var err error
@@ -84,34 +65,11 @@ func (b *BeaconState) validatorsReadOnlyVal() []state.ReadOnlyValidator {
return res
}
// references of validators participating in consensus on the beacon chain.
// This assumes that a lock is already held on BeaconState. This does not
// copy fully and instead just copies the reference.
func (b *BeaconState) validatorsReferences() []*ethpb.Validator {
if b.validators == nil {
return nil
}
res := make([]*ethpb.Validator, len(b.validators), len(b.validators)+int(params.BeaconConfig().MaxDeposits))
for i := 0; i < len(res); i++ {
validator := b.validators[i]
if validator == nil {
continue
}
// copy validator reference instead.
res[i] = validator
}
return res
}
func (b *BeaconState) validatorsLen() int {
if features.Get().EnableExperimentalState {
if b.validatorsMultiValue == nil {
return 0
}
return b.validatorsMultiValue.Len(b)
if b.validatorsMultiValue == nil {
return 0
}
return len(b.validators)
return b.validatorsMultiValue.Len(b)
}
// ValidatorAtIndex is the validator at the provided index.
@@ -123,25 +81,14 @@ func (b *BeaconState) ValidatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Va
}
func (b *BeaconState) validatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Validator, error) {
if features.Get().EnableExperimentalState {
if b.validatorsMultiValue == nil {
return &ethpb.Validator{}, nil
}
v, err := b.validatorsMultiValue.At(b, uint64(idx))
if err != nil {
return nil, err
}
return ethpb.CopyValidator(v), nil
}
if b.validators == nil {
if b.validatorsMultiValue == nil {
return &ethpb.Validator{}, nil
}
if uint64(len(b.validators)) <= uint64(idx) {
return nil, errors.Wrapf(consensus_types.ErrOutOfBounds, "validator index %d does not exist", idx)
v, err := b.validatorsMultiValue.At(b, uint64(idx))
if err != nil {
return nil, err
}
val := b.validators[idx]
return ethpb.CopyValidator(val), nil
return ethpb.CopyValidator(v), nil
}
// ValidatorAtIndexReadOnly is the validator at the provided index. This method
@@ -154,25 +101,14 @@ func (b *BeaconState) ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (s
}
func (b *BeaconState) validatorAtIndexReadOnly(idx primitives.ValidatorIndex) (state.ReadOnlyValidator, error) {
if features.Get().EnableExperimentalState {
if b.validatorsMultiValue == nil {
return nil, state.ErrNilValidatorsInState
}
v, err := b.validatorsMultiValue.At(b, uint64(idx))
if err != nil {
return nil, err
}
return NewValidator(v)
}
if b.validators == nil {
if b.validatorsMultiValue == nil {
return nil, state.ErrNilValidatorsInState
}
if uint64(len(b.validators)) <= uint64(idx) {
return nil, errors.Wrapf(consensus_types.ErrOutOfBounds, "validator index %d does not exist", idx)
v, err := b.validatorsMultiValue.At(b, uint64(idx))
if err != nil {
return nil, err
}
val := b.validators[idx]
return NewValidator(val)
return NewValidator(v)
}
// ValidatorIndexByPubkey returns a given validator by its 48-byte public key.
@@ -181,20 +117,40 @@ func (b *BeaconState) ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]by
return 0, false
}
b.lock.RLock()
defer b.lock.RUnlock()
var numOfVals int
if features.Get().EnableExperimentalState {
numOfVals = b.validatorsMultiValue.Len(b)
} else {
numOfVals = len(b.validators)
}
numOfVals := b.validatorsMultiValue.Len(b)
// Try fast lookup via validator map first
idx, ok := b.valMapHandler.Get(key)
if ok && primitives.ValidatorIndex(numOfVals) <= idx {
return primitives.ValidatorIndex(0), false
if ok && idx < primitives.ValidatorIndex(numOfVals) {
b.lock.RUnlock()
return idx, true
}
return idx, ok
if ok {
b.lock.RUnlock()
return 0, false // Found in map but out of bounds
}
// Release lock before calling ReadFromEveryValidator to avoid recursive lock
b.lock.RUnlock()
// Fallback: search through all validators using ReadFromEveryValidator
var foundIdx primitives.ValidatorIndex
var found bool
err := b.ReadFromEveryValidator(func(i int, val state.ReadOnlyValidator) error {
if val.PublicKey() == key {
foundIdx = primitives.ValidatorIndex(i)
found = true
return ErrNilParticipation // Return error to break early
}
return nil
})
if err != nil && err != ErrNilParticipation {
return 0, false
}
return foundIdx, found
}
// PubkeyAtIndex returns the pubkey at the given
@@ -203,18 +159,9 @@ func (b *BeaconState) PubkeyAtIndex(idx primitives.ValidatorIndex) [fieldparams.
b.lock.RLock()
defer b.lock.RUnlock()
var v *ethpb.Validator
if features.Get().EnableExperimentalState {
var err error
v, err = b.validatorsMultiValue.At(b, uint64(idx))
if err != nil {
return [fieldparams.BLSPubkeyLength]byte{}
}
} else {
if uint64(idx) >= uint64(len(b.validators)) {
return [fieldparams.BLSPubkeyLength]byte{}
}
v = b.validators[idx]
v, err := b.validatorsMultiValue.At(b, uint64(idx))
if err != nil {
return [fieldparams.BLSPubkeyLength]byte{}
}
if v == nil {
@@ -231,20 +178,10 @@ func (b *BeaconState) AggregateKeyFromIndices(idxs []uint64) (bls.PublicKey, err
pubKeys := make([][]byte, len(idxs))
for i, idx := range idxs {
var v *ethpb.Validator
if features.Get().EnableExperimentalState {
var err error
v, err = b.validatorsMultiValue.At(b, idx)
if err != nil {
return nil, err
}
} else {
if idx >= uint64(len(b.validators)) {
return nil, consensus_types.ErrOutOfBounds
}
v = b.validators[idx]
v, err := b.validatorsMultiValue.At(b, idx)
if err != nil {
return nil, err
}
if v == nil {
return nil, ErrNilWrappedValidator
}
@@ -261,15 +198,11 @@ func (b *BeaconState) PublicKeys() ([][fieldparams.BLSPubkeyLength]byte, error)
l := b.validatorsLen()
res := make([][fieldparams.BLSPubkeyLength]byte, l)
for i := 0; i < l; i++ {
if features.Get().EnableExperimentalState {
val, err := b.validatorsMultiValue.At(b, uint64(i))
if err != nil {
return nil, err
}
copy(res[i][:], val.PublicKey)
} else {
copy(res[i][:], b.validators[i].PublicKey)
val, err := b.validatorsMultiValue.At(b, uint64(i))
if err != nil {
return nil, err
}
copy(res[i][:], val.PublicKey)
}
return res, nil
}
@@ -289,30 +222,6 @@ func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyV
b.lock.RLock()
defer b.lock.RUnlock()
if features.Get().EnableExperimentalState {
return b.readFromEveryValidatorMVSlice(f)
}
if b.validators == nil {
return state.ErrNilValidatorsInState
}
validators := b.validators
for i, v := range validators {
v, err := NewValidator(v)
if err != nil {
return err
}
if err = f(i, v); err != nil {
return err
}
}
return nil
}
// WARNING: This function works only for the multi-value slice feature.
func (b *BeaconState) readFromEveryValidatorMVSlice(f func(idx int, val state.ReadOnlyValidator) error) error {
if b.validatorsMultiValue == nil {
return state.ErrNilValidatorsInState
}
@@ -342,18 +251,10 @@ func (b *BeaconState) Balances() []uint64 {
}
func (b *BeaconState) balancesVal() []uint64 {
if features.Get().EnableExperimentalState {
if b.balancesMultiValue == nil {
return nil
}
return b.balancesMultiValue.Value(b)
}
if b.balances == nil {
if b.balancesMultiValue == nil {
return nil
}
res := make([]uint64, len(b.balances))
copy(res, b.balances)
return res
return b.balancesMultiValue.Value(b)
}
// BalanceAtIndex of validator with the provided index.
@@ -365,19 +266,10 @@ func (b *BeaconState) BalanceAtIndex(idx primitives.ValidatorIndex) (uint64, err
}
func (b *BeaconState) balanceAtIndex(idx primitives.ValidatorIndex) (uint64, error) {
if features.Get().EnableExperimentalState {
if b.balancesMultiValue == nil {
return 0, nil
}
return b.balancesMultiValue.At(b, uint64(idx))
}
if b.balances == nil {
if b.balancesMultiValue == nil {
return 0, nil
}
if uint64(len(b.balances)) <= uint64(idx) {
return 0, errors.Wrapf(consensus_types.ErrOutOfBounds, "balance index %d does not exist", idx)
}
return b.balances[idx], nil
return b.balancesMultiValue.At(b, uint64(idx))
}
// BalancesLength returns the length of the balances slice.
@@ -385,13 +277,10 @@ func (b *BeaconState) BalancesLength() int {
b.lock.RLock()
defer b.lock.RUnlock()
if features.Get().EnableExperimentalState {
if b.balancesMultiValue == nil {
return 0
}
return b.balancesMultiValue.Len(b)
if b.balancesMultiValue == nil {
return 0
}
return len(b.balances)
return b.balancesMultiValue.Len(b)
}
// Slashings of validators on the beacon chain.
@@ -431,18 +320,10 @@ func (b *BeaconState) InactivityScores() ([]uint64, error) {
}
func (b *BeaconState) inactivityScoresVal() []uint64 {
if features.Get().EnableExperimentalState {
if b.inactivityScoresMultiValue == nil {
return nil
}
return b.inactivityScoresMultiValue.Value(b)
}
if b.inactivityScores == nil {
if b.inactivityScoresMultiValue == nil {
return nil
}
res := make([]uint64, len(b.inactivityScores))
copy(res, b.inactivityScores)
return res
return b.inactivityScoresMultiValue.Value(b)
}
// PendingBalanceToWithdraw returns the sum of all pending withdrawals for the given validator.

View File

@@ -3,18 +3,12 @@ package state_native
import (
"testing"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func FuzzMultiValueBalances(f *testing.F) {
resetFn := features.InitWithReset(&features.Flags{
EnableExperimentalState: true,
})
defer resetFn()
bals := make([]uint64, 65536)
firstState, err := InitializeFromProtoPhase0(&ethpb.BeaconState{Balances: bals})
require.NoError(f, err)

View File

@@ -8,7 +8,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -17,663 +16,358 @@ import (
)
func TestStateReferenceSharing_Finalizer_Phase0(t *testing.T) {
// This test showcases the logic on the RandaoMixes field with the GC finalizer.
// This test showcases the logic on the Slashings field with the GC finalizer.
s, err := InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{RandaoMixes: [][]byte{[]byte("foo")}})
s, err := InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{Slashings: []uint64{10, 30, 40}})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected a single reference for RANDAO mixes")
assert.Equal(t, uint(1), a.sharedFieldReferences[types.Slashings].Refs(), "Expected a single reference for RANDAO mixes")
func() {
// Create object in a different scope for GC
b := a.Copy()
assert.Equal(t, uint(2), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 references to RANDAO mixes")
assert.Equal(t, uint(2), a.sharedFieldReferences[types.Slashings].Refs(), "Expected 2 references to RANDAO mixes")
_ = b
}()
runtime.GC() // Should run finalizer on object b
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 1 shared reference to RANDAO mixes!")
assert.Equal(t, uint(1), a.sharedFieldReferences[types.Slashings].Refs(), "Expected 1 shared reference to RANDAO mixes!")
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assert.Equal(t, uint(2), b.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 shared references to RANDAO mixes")
require.NoError(t, b.UpdateRandaoMixesAtIndex(0, bytesutil.ToBytes32([]byte("bar"))))
if b.sharedFieldReferences[types.RandaoMixes].Refs() != 1 || a.sharedFieldReferences[types.RandaoMixes].Refs() != 1 {
t.Error("Expected 1 shared reference to RANDAO mix for both a and b")
assert.Equal(t, uint(2), b.sharedFieldReferences[types.Slashings].Refs(), "Expected 2 shared references to RANDAO mixes")
require.NoError(t, b.UpdateSlashingsAtIndex(0, 100))
if b.sharedFieldReferences[types.Slashings].Refs() != 1 || a.sharedFieldReferences[types.Slashings].Refs() != 1 {
t.Error("Expected 1 shared reference to Slashings for both a and b")
}
}
func TestStateReferenceSharing_Finalizer_Altair(t *testing.T) {
// This test showcases the logic on the RandaoMixes field with the GC finalizer.
// This test showcases the logic on the Slashings field with the GC finalizer.
s, err := InitializeFromProtoUnsafeAltair(&ethpb.BeaconStateAltair{RandaoMixes: [][]byte{[]byte("foo")}})
s, err := InitializeFromProtoUnsafeAltair(&ethpb.BeaconStateAltair{Slashings: []uint64{10, 30, 40}})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected a single reference for RANDAO mixes")
assert.Equal(t, uint(1), a.sharedFieldReferences[types.Slashings].Refs(), "Expected a single reference for RANDAO mixes")
func() {
// Create object in a different scope for GC
b := a.Copy()
assert.Equal(t, uint(2), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 references to RANDAO mixes")
assert.Equal(t, uint(2), a.sharedFieldReferences[types.Slashings].Refs(), "Expected 2 references to RANDAO mixes")
_ = b
}()
runtime.GC() // Should run finalizer on object b
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 1 shared reference to RANDAO mixes!")
assert.Equal(t, uint(1), a.sharedFieldReferences[types.Slashings].Refs(), "Expected 1 shared reference to RANDAO mixes!")
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assert.Equal(t, uint(2), b.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 shared references to RANDAO mixes")
require.NoError(t, b.UpdateRandaoMixesAtIndex(0, bytesutil.ToBytes32([]byte("bar"))))
if b.sharedFieldReferences[types.RandaoMixes].Refs() != 1 || a.sharedFieldReferences[types.RandaoMixes].Refs() != 1 {
t.Error("Expected 1 shared reference to RANDAO mix for both a and b")
assert.Equal(t, uint(2), b.sharedFieldReferences[types.Slashings].Refs(), "Expected 2 shared references to RANDAO mixes")
require.NoError(t, b.UpdateSlashingsAtIndex(0, 100))
if b.sharedFieldReferences[types.Slashings].Refs() != 1 || a.sharedFieldReferences[types.Slashings].Refs() != 1 {
t.Error("Expected 1 shared reference to Slashings for both a and b")
}
}
func TestStateReferenceSharing_Finalizer_Bellatrix(t *testing.T) {
// This test showcases the logic on the RandaoMixes field with the GC finalizer.
// This test showcases the logic on the Slashings field with the GC finalizer.
s, err := InitializeFromProtoUnsafeBellatrix(&ethpb.BeaconStateBellatrix{RandaoMixes: [][]byte{[]byte("foo")}})
s, err := InitializeFromProtoUnsafeBellatrix(&ethpb.BeaconStateBellatrix{Slashings: []uint64{10, 30, 40}})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected a single reference for RANDAO mixes")
assert.Equal(t, uint(1), a.sharedFieldReferences[types.Slashings].Refs(), "Expected a single reference for RANDAO mixes")
func() {
// Create object in a different scope for GC
b := a.Copy()
assert.Equal(t, uint(2), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 references to RANDAO mixes")
assert.Equal(t, uint(2), a.sharedFieldReferences[types.Slashings].Refs(), "Expected 2 references to RANDAO mixes")
_ = b
}()
runtime.GC() // Should run finalizer on object b
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 1 shared reference to RANDAO mixes!")
assert.Equal(t, uint(1), a.sharedFieldReferences[types.Slashings].Refs(), "Expected 1 shared reference to RANDAO mixes!")
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assert.Equal(t, uint(2), b.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 shared references to RANDAO mixes")
require.NoError(t, b.UpdateRandaoMixesAtIndex(0, bytesutil.ToBytes32([]byte("bar"))))
if b.sharedFieldReferences[types.RandaoMixes].Refs() != 1 || a.sharedFieldReferences[types.RandaoMixes].Refs() != 1 {
t.Error("Expected 1 shared reference to RANDAO mix for both a and b")
assert.Equal(t, uint(2), b.sharedFieldReferences[types.Slashings].Refs(), "Expected 2 shared references to RANDAO mixes")
require.NoError(t, b.UpdateSlashingsAtIndex(0, 100))
if b.sharedFieldReferences[types.Slashings].Refs() != 1 || a.sharedFieldReferences[types.Slashings].Refs() != 1 {
t.Error("Expected 1 shared reference to Slashings for both a and b")
}
}
func TestStateReferenceSharing_Finalizer_Capella(t *testing.T) {
// This test showcases the logic on the RandaoMixes field with the GC finalizer.
// This test showcases the logic on the Slashings field with the GC finalizer.
s, err := InitializeFromProtoUnsafeCapella(&ethpb.BeaconStateCapella{RandaoMixes: [][]byte{[]byte("foo")}})
s, err := InitializeFromProtoUnsafeCapella(&ethpb.BeaconStateCapella{Slashings: []uint64{10, 30, 40}})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected a single reference for RANDAO mixes")
assert.Equal(t, uint(1), a.sharedFieldReferences[types.Slashings].Refs(), "Expected a single reference for RANDAO mixes")
func() {
// Create object in a different scope for GC
b := a.Copy()
assert.Equal(t, uint(2), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 references to RANDAO mixes")
assert.Equal(t, uint(2), a.sharedFieldReferences[types.Slashings].Refs(), "Expected 2 references to RANDAO mixes")
_ = b
}()
runtime.GC() // Should run finalizer on object b
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 1 shared reference to RANDAO mixes!")
assert.Equal(t, uint(1), a.sharedFieldReferences[types.Slashings].Refs(), "Expected 1 shared reference to RANDAO mixes!")
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assert.Equal(t, uint(2), b.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 shared references to RANDAO mixes")
require.NoError(t, b.UpdateRandaoMixesAtIndex(0, bytesutil.ToBytes32([]byte("bar"))))
if b.sharedFieldReferences[types.RandaoMixes].Refs() != 1 || a.sharedFieldReferences[types.RandaoMixes].Refs() != 1 {
t.Error("Expected 1 shared reference to RANDAO mix for both a and b")
assert.Equal(t, uint(2), b.sharedFieldReferences[types.Slashings].Refs(), "Expected 2 shared references to RANDAO mixes")
require.NoError(t, b.UpdateSlashingsAtIndex(0, 100))
if b.sharedFieldReferences[types.Slashings].Refs() != 1 || a.sharedFieldReferences[types.Slashings].Refs() != 1 {
t.Error("Expected 1 shared reference to Slashings for both a and b")
}
}
func TestStateReferenceSharing_Finalizer_Deneb(t *testing.T) {
// This test showcases the logic on the RandaoMixes field with the GC finalizer.
// This test showcases the logic on the Slashings field with the GC finalizer.
s, err := InitializeFromProtoUnsafeDeneb(&ethpb.BeaconStateDeneb{RandaoMixes: [][]byte{[]byte("foo")}})
s, err := InitializeFromProtoUnsafeDeneb(&ethpb.BeaconStateDeneb{Slashings: []uint64{10, 30, 40}})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected a single reference for RANDAO mixes")
assert.Equal(t, uint(1), a.sharedFieldReferences[types.Slashings].Refs(), "Expected a single reference for RANDAO mixes")
func() {
// Create object in a different scope for GC
b := a.Copy()
assert.Equal(t, uint(2), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 references to RANDAO mixes")
assert.Equal(t, uint(2), a.sharedFieldReferences[types.Slashings].Refs(), "Expected 2 references to RANDAO mixes")
_ = b
}()
runtime.GC() // Should run finalizer on object b
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 1 shared reference to RANDAO mixes!")
assert.Equal(t, uint(1), a.sharedFieldReferences[types.Slashings].Refs(), "Expected 1 shared reference to RANDAO mixes!")
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assert.Equal(t, uint(2), b.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 shared references to RANDAO mixes")
require.NoError(t, b.UpdateRandaoMixesAtIndex(0, bytesutil.ToBytes32([]byte("bar"))))
if b.sharedFieldReferences[types.RandaoMixes].Refs() != 1 || a.sharedFieldReferences[types.RandaoMixes].Refs() != 1 {
t.Error("Expected 1 shared reference to RANDAO mix for both a and b")
assert.Equal(t, uint(2), b.sharedFieldReferences[types.Slashings].Refs(), "Expected 2 shared references to RANDAO mixes")
require.NoError(t, b.UpdateSlashingsAtIndex(0, 100))
if b.sharedFieldReferences[types.Slashings].Refs() != 1 || a.sharedFieldReferences[types.Slashings].Refs() != 1 {
t.Error("Expected 1 shared reference to Slashings for both a and b")
}
}
func TestStateReferenceCopy_NoUnexpectedRootsMutation_Phase0(t *testing.T) {
root1, root2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
func TestStateReferenceCopy_NoUnexpectedSlashingsMutation_Phase0(t *testing.T) {
val1, val2 := uint64(10), uint64(20)
s, err := InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{
BlockRoots: [][]byte{
root1[:],
},
StateRoots: [][]byte{
root1[:],
},
Slashings: []uint64{val1},
})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
require.NoError(t, err)
assertRefCount(t, a, types.BlockRoots, 1)
assertRefCount(t, a, types.StateRoots, 1)
assertRefCount(t, a, types.Slashings, 1)
// Copy, increases reference count.
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assertRefCount(t, a, types.BlockRoots, 2)
assertRefCount(t, a, types.StateRoots, 2)
assertRefCount(t, b, types.BlockRoots, 2)
assertRefCount(t, b, types.StateRoots, 2)
assertRefCount(t, a, types.Slashings, 2)
assertRefCount(t, b, types.Slashings, 2)
// Assert shared state.
blockRootsA := a.BlockRoots()
stateRootsA := a.StateRoots()
blockRootsB := b.BlockRoots()
stateRootsB := b.StateRoots()
assertValFound(t, blockRootsA, root1[:])
assertValFound(t, blockRootsB, root1[:])
assertValFound(t, stateRootsA, root1[:])
assertValFound(t, stateRootsB, root1[:])
slashingsA := a.Slashings()
slashingsB := b.Slashings()
assertValFound(t, slashingsA, val1)
assertValFound(t, slashingsB, val1)
// Mutator should only affect calling state: a.
require.NoError(t, a.UpdateBlockRootAtIndex(0, root2))
require.NoError(t, a.UpdateStateRootAtIndex(0, root2))
require.NoError(t, a.UpdateSlashingsAtIndex(0, val2))
// Assert no shared state mutation occurred only on state a (copy on write).
assertValNotFound(t, a.BlockRoots(), root1[:])
assertValNotFound(t, a.StateRoots(), root1[:])
assertValFound(t, a.BlockRoots(), root2[:])
assertValFound(t, a.StateRoots(), root2[:])
assertValFound(t, b.BlockRoots(), root1[:])
assertValFound(t, b.StateRoots(), root1[:])
assert.DeepEqual(t, root2[:], a.BlockRoots()[0], "Expected mutation not found")
assert.DeepEqual(t, root2[:], a.StateRoots()[0], "Expected mutation not found")
assert.DeepEqual(t, root1[:], blockRootsB[0], "Unexpected mutation found")
assert.DeepEqual(t, root1[:], stateRootsB[0], "Unexpected mutation found")
assertValFound(t, a.Slashings(), val2)
assertValNotFound(t, a.Slashings(), val1)
assertValFound(t, b.Slashings(), val1)
assertValNotFound(t, b.Slashings(), val2)
assertValFound(t, slashingsB, val1)
assertValNotFound(t, slashingsB, val2)
assert.DeepEqual(t, val2, a.Slashings()[0], "Expected mutation not found")
assert.DeepEqual(t, val1, slashingsB[0], "Unexpected mutation found")
// Copy on write happened, reference counters are reset.
assertRefCount(t, a, types.BlockRoots, 1)
assertRefCount(t, a, types.StateRoots, 1)
assertRefCount(t, b, types.BlockRoots, 1)
assertRefCount(t, b, types.StateRoots, 1)
assertRefCount(t, a, types.Slashings, 1)
assertRefCount(t, b, types.Slashings, 1)
}
func TestStateReferenceCopy_NoUnexpectedRootsMutation_Altair(t *testing.T) {
root1, root2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
func TestStateReferenceCopy_NoUnexpectedSlashingMutation_Altair(t *testing.T) {
val1, val2 := uint64(10), uint64(20)
s, err := InitializeFromProtoUnsafeAltair(&ethpb.BeaconStateAltair{
BlockRoots: [][]byte{
root1[:],
},
StateRoots: [][]byte{
root1[:],
},
Slashings: []uint64{val1},
})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
require.NoError(t, err)
assertRefCount(t, a, types.BlockRoots, 1)
assertRefCount(t, a, types.StateRoots, 1)
assertRefCount(t, a, types.Slashings, 1)
// Copy, increases reference count.
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assertRefCount(t, a, types.BlockRoots, 2)
assertRefCount(t, a, types.StateRoots, 2)
assertRefCount(t, b, types.BlockRoots, 2)
assertRefCount(t, b, types.StateRoots, 2)
assertRefCount(t, a, types.Slashings, 2)
assertRefCount(t, b, types.Slashings, 2)
// Assert shared state.
blockRootsA := a.BlockRoots()
stateRootsA := a.StateRoots()
blockRootsB := b.BlockRoots()
stateRootsB := b.StateRoots()
assertValFound(t, blockRootsA, root1[:])
assertValFound(t, blockRootsB, root1[:])
assertValFound(t, stateRootsA, root1[:])
assertValFound(t, stateRootsB, root1[:])
slashingsA := a.Slashings()
slashingsB := b.Slashings()
assertValFound(t, slashingsA, val1)
assertValFound(t, slashingsB, val1)
// Mutator should only affect calling state: a.
require.NoError(t, a.UpdateBlockRootAtIndex(0, root2))
require.NoError(t, a.UpdateStateRootAtIndex(0, root2))
require.NoError(t, a.UpdateSlashingsAtIndex(0, val2))
// Assert no shared state mutation occurred only on state a (copy on write).
assertValNotFound(t, a.BlockRoots(), root1[:])
assertValNotFound(t, a.StateRoots(), root1[:])
assertValFound(t, a.BlockRoots(), root2[:])
assertValFound(t, a.StateRoots(), root2[:])
assertValFound(t, b.BlockRoots(), root1[:])
assertValFound(t, b.StateRoots(), root1[:])
assert.DeepEqual(t, root2[:], a.BlockRoots()[0], "Expected mutation not found")
assert.DeepEqual(t, root2[:], a.StateRoots()[0], "Expected mutation not found")
assert.DeepEqual(t, root1[:], blockRootsB[0], "Unexpected mutation found")
assert.DeepEqual(t, root1[:], stateRootsB[0], "Unexpected mutation found")
assertValFound(t, a.Slashings(), val2)
assertValNotFound(t, a.Slashings(), val1)
assertValFound(t, b.Slashings(), val1)
assertValNotFound(t, b.Slashings(), val2)
assertValFound(t, slashingsB, val1)
assertValNotFound(t, slashingsB, val2)
assert.DeepEqual(t, val2, a.Slashings()[0], "Expected mutation not found")
assert.DeepEqual(t, val1, slashingsB[0], "Unexpected mutation found")
// Copy on write happened, reference counters are reset.
assertRefCount(t, a, types.BlockRoots, 1)
assertRefCount(t, a, types.StateRoots, 1)
assertRefCount(t, b, types.BlockRoots, 1)
assertRefCount(t, b, types.StateRoots, 1)
assertRefCount(t, a, types.Slashings, 1)
assertRefCount(t, b, types.Slashings, 1)
}
func TestStateReferenceCopy_NoUnexpectedRootsMutation_Bellatrix(t *testing.T) {
root1, root2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
func TestStateReferenceCopy_NoUnexpectedSlashingMutation_Bellatrix(t *testing.T) {
val1, val2 := uint64(10), uint64(20)
s, err := InitializeFromProtoUnsafeBellatrix(&ethpb.BeaconStateBellatrix{
BlockRoots: [][]byte{
root1[:],
},
StateRoots: [][]byte{
root1[:],
},
Slashings: []uint64{val1},
})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
require.NoError(t, err)
assertRefCount(t, a, types.BlockRoots, 1)
assertRefCount(t, a, types.StateRoots, 1)
assertRefCount(t, a, types.Slashings, 1)
// Copy, increases reference count.
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assertRefCount(t, a, types.BlockRoots, 2)
assertRefCount(t, a, types.StateRoots, 2)
assertRefCount(t, b, types.BlockRoots, 2)
assertRefCount(t, b, types.StateRoots, 2)
assertRefCount(t, a, types.Slashings, 2)
assertRefCount(t, b, types.Slashings, 2)
// Assert shared state.
blockRootsA := a.BlockRoots()
stateRootsA := a.StateRoots()
blockRootsB := b.BlockRoots()
stateRootsB := b.StateRoots()
assertValFound(t, blockRootsA, root1[:])
assertValFound(t, blockRootsB, root1[:])
assertValFound(t, stateRootsA, root1[:])
assertValFound(t, stateRootsB, root1[:])
slashingsA := a.Slashings()
slashingsB := b.Slashings()
assertValFound(t, slashingsA, val1)
assertValFound(t, slashingsB, val1)
// Mutator should only affect calling state: a.
require.NoError(t, a.UpdateBlockRootAtIndex(0, root2))
require.NoError(t, a.UpdateStateRootAtIndex(0, root2))
require.NoError(t, a.UpdateSlashingsAtIndex(0, val2))
// Assert no shared state mutation occurred only on state a (copy on write).
assertValNotFound(t, a.BlockRoots(), root1[:])
assertValNotFound(t, a.StateRoots(), root1[:])
assertValFound(t, a.BlockRoots(), root2[:])
assertValFound(t, a.StateRoots(), root2[:])
assertValFound(t, b.BlockRoots(), root1[:])
assertValFound(t, b.StateRoots(), root1[:])
assert.DeepEqual(t, root2[:], a.BlockRoots()[0], "Expected mutation not found")
assert.DeepEqual(t, root2[:], a.StateRoots()[0], "Expected mutation not found")
assert.DeepEqual(t, root1[:], blockRootsB[0], "Unexpected mutation found")
assert.DeepEqual(t, root1[:], stateRootsB[0], "Unexpected mutation found")
assertValFound(t, a.Slashings(), val2)
assertValNotFound(t, a.Slashings(), val1)
assertValFound(t, b.Slashings(), val1)
assertValNotFound(t, b.Slashings(), val2)
assertValFound(t, slashingsB, val1)
assertValNotFound(t, slashingsB, val2)
assert.DeepEqual(t, val2, a.Slashings()[0], "Expected mutation not found")
assert.DeepEqual(t, val1, slashingsB[0], "Unexpected mutation found")
// Copy on write happened, reference counters are reset.
assertRefCount(t, a, types.BlockRoots, 1)
assertRefCount(t, a, types.StateRoots, 1)
assertRefCount(t, b, types.BlockRoots, 1)
assertRefCount(t, b, types.StateRoots, 1)
assertRefCount(t, a, types.Slashings, 1)
assertRefCount(t, b, types.Slashings, 1)
}
func TestStateReferenceCopy_NoUnexpectedRootsMutation_Capella(t *testing.T) {
root1, root2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
func TestStateReferenceCopy_NoUnexpectedSlashingMutation_Capella(t *testing.T) {
val1, val2 := uint64(10), uint64(20)
s, err := InitializeFromProtoUnsafeCapella(&ethpb.BeaconStateCapella{
BlockRoots: [][]byte{
root1[:],
},
StateRoots: [][]byte{
root1[:],
},
Slashings: []uint64{val1},
})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
require.NoError(t, err)
assertRefCount(t, a, types.BlockRoots, 1)
assertRefCount(t, a, types.StateRoots, 1)
assertRefCount(t, a, types.Slashings, 1)
// Copy, increases reference count.
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assertRefCount(t, a, types.BlockRoots, 2)
assertRefCount(t, a, types.StateRoots, 2)
assertRefCount(t, b, types.BlockRoots, 2)
assertRefCount(t, b, types.StateRoots, 2)
assertRefCount(t, a, types.Slashings, 2)
assertRefCount(t, b, types.Slashings, 2)
// Assert shared state.
blockRootsA := a.BlockRoots()
stateRootsA := a.StateRoots()
blockRootsB := b.BlockRoots()
stateRootsB := b.StateRoots()
assertValFound(t, blockRootsA, root1[:])
assertValFound(t, blockRootsB, root1[:])
assertValFound(t, stateRootsA, root1[:])
assertValFound(t, stateRootsB, root1[:])
slashingsA := a.Slashings()
slashingsB := b.Slashings()
assertValFound(t, slashingsA, val1)
assertValFound(t, slashingsB, val1)
// Mutator should only affect calling state: a.
require.NoError(t, a.UpdateBlockRootAtIndex(0, root2))
require.NoError(t, a.UpdateStateRootAtIndex(0, root2))
require.NoError(t, a.UpdateSlashingsAtIndex(0, val2))
// Assert no shared state mutation occurred only on state a (copy on write).
assertValNotFound(t, a.BlockRoots(), root1[:])
assertValNotFound(t, a.StateRoots(), root1[:])
assertValFound(t, a.BlockRoots(), root2[:])
assertValFound(t, a.StateRoots(), root2[:])
assertValFound(t, b.BlockRoots(), root1[:])
assertValFound(t, b.StateRoots(), root1[:])
assert.DeepEqual(t, root2[:], a.BlockRoots()[0], "Expected mutation not found")
assert.DeepEqual(t, root2[:], a.StateRoots()[0], "Expected mutation not found")
assert.DeepEqual(t, root1[:], blockRootsB[0], "Unexpected mutation found")
assert.DeepEqual(t, root1[:], stateRootsB[0], "Unexpected mutation found")
assertValFound(t, a.Slashings(), val2)
assertValNotFound(t, a.Slashings(), val1)
assertValFound(t, b.Slashings(), val1)
assertValNotFound(t, b.Slashings(), val2)
assertValFound(t, slashingsB, val1)
assertValNotFound(t, slashingsB, val2)
assert.DeepEqual(t, val2, a.Slashings()[0], "Expected mutation not found")
assert.DeepEqual(t, val1, slashingsB[0], "Unexpected mutation found")
// Copy on write happened, reference counters are reset.
assertRefCount(t, a, types.BlockRoots, 1)
assertRefCount(t, a, types.StateRoots, 1)
assertRefCount(t, b, types.BlockRoots, 1)
assertRefCount(t, b, types.StateRoots, 1)
assertRefCount(t, a, types.Slashings, 1)
assertRefCount(t, b, types.Slashings, 1)
}
func TestStateReferenceCopy_NoUnexpectedRootsMutation_Deneb(t *testing.T) {
root1, root2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
func TestStateReferenceCopy_NoUnexpectedSlashingMutation_Deneb(t *testing.T) {
val1, val2 := uint64(10), uint64(20)
s, err := InitializeFromProtoUnsafeDeneb(&ethpb.BeaconStateDeneb{
BlockRoots: [][]byte{
root1[:],
},
StateRoots: [][]byte{
root1[:],
},
Slashings: []uint64{val1},
})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
require.NoError(t, err)
assertRefCount(t, a, types.BlockRoots, 1)
assertRefCount(t, a, types.StateRoots, 1)
assertRefCount(t, a, types.Slashings, 1)
// Copy, increases reference count.
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assertRefCount(t, a, types.BlockRoots, 2)
assertRefCount(t, a, types.StateRoots, 2)
assertRefCount(t, b, types.BlockRoots, 2)
assertRefCount(t, b, types.StateRoots, 2)
assertRefCount(t, a, types.Slashings, 2)
assertRefCount(t, b, types.Slashings, 2)
// Assert shared state.
blockRootsA := a.BlockRoots()
stateRootsA := a.StateRoots()
blockRootsB := b.BlockRoots()
stateRootsB := b.StateRoots()
assertValFound(t, blockRootsA, root1[:])
assertValFound(t, blockRootsB, root1[:])
assertValFound(t, stateRootsA, root1[:])
assertValFound(t, stateRootsB, root1[:])
slashingsA := a.Slashings()
slashingsB := b.Slashings()
assertValFound(t, slashingsA, val1)
assertValFound(t, slashingsB, val1)
// Mutator should only affect calling state: a.
require.NoError(t, a.UpdateBlockRootAtIndex(0, root2))
require.NoError(t, a.UpdateStateRootAtIndex(0, root2))
require.NoError(t, a.UpdateSlashingsAtIndex(0, val2))
// Assert no shared state mutation occurred only on state a (copy on write).
assertValNotFound(t, a.BlockRoots(), root1[:])
assertValNotFound(t, a.StateRoots(), root1[:])
assertValFound(t, a.BlockRoots(), root2[:])
assertValFound(t, a.StateRoots(), root2[:])
assertValFound(t, b.BlockRoots(), root1[:])
assertValFound(t, b.StateRoots(), root1[:])
assert.DeepEqual(t, root2[:], a.BlockRoots()[0], "Expected mutation not found")
assert.DeepEqual(t, root2[:], a.StateRoots()[0], "Expected mutation not found")
assert.DeepEqual(t, root1[:], blockRootsB[0], "Unexpected mutation found")
assert.DeepEqual(t, root1[:], stateRootsB[0], "Unexpected mutation found")
assertValFound(t, a.Slashings(), val2)
assertValNotFound(t, a.Slashings(), val1)
assertValFound(t, b.Slashings(), val1)
assertValNotFound(t, b.Slashings(), val2)
assertValFound(t, slashingsB, val1)
assertValNotFound(t, slashingsB, val2)
assert.DeepEqual(t, val2, a.Slashings()[0], "Expected mutation not found")
assert.DeepEqual(t, val1, slashingsB[0], "Unexpected mutation found")
// Copy on write happened, reference counters are reset.
assertRefCount(t, a, types.BlockRoots, 1)
assertRefCount(t, a, types.StateRoots, 1)
assertRefCount(t, b, types.BlockRoots, 1)
assertRefCount(t, b, types.StateRoots, 1)
}
func TestStateReferenceCopy_NoUnexpectedRandaoMutation_Phase0(t *testing.T) {
val1, val2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
s, err := InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{
RandaoMixes: [][]byte{
val1[:],
},
})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
require.NoError(t, err)
assertRefCount(t, a, types.RandaoMixes, 1)
// Copy, increases reference count.
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assertRefCount(t, a, types.RandaoMixes, 2)
assertRefCount(t, b, types.RandaoMixes, 2)
// Assert shared state.
mixesA := a.RandaoMixes()
mixesB := b.RandaoMixes()
assertValFound(t, mixesA, val1[:])
assertValFound(t, mixesB, val1[:])
// Mutator should only affect calling state: a.
require.NoError(t, a.UpdateRandaoMixesAtIndex(0, val2))
// Assert no shared state mutation occurred only on state a (copy on write).
assertValFound(t, a.RandaoMixes(), val2[:])
assertValNotFound(t, a.RandaoMixes(), val1[:])
assertValFound(t, b.RandaoMixes(), val1[:])
assertValNotFound(t, b.RandaoMixes(), val2[:])
assertValFound(t, mixesB, val1[:])
assertValNotFound(t, mixesB, val2[:])
assert.DeepEqual(t, val2[:], a.RandaoMixes()[0], "Expected mutation not found")
assert.DeepEqual(t, val1[:], mixesB[0], "Unexpected mutation found")
// Copy on write happened, reference counters are reset.
assertRefCount(t, a, types.RandaoMixes, 1)
assertRefCount(t, b, types.RandaoMixes, 1)
}
func TestStateReferenceCopy_NoUnexpectedRandaoMutation_Altair(t *testing.T) {
val1, val2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
s, err := InitializeFromProtoUnsafeAltair(&ethpb.BeaconStateAltair{
RandaoMixes: [][]byte{
val1[:],
},
})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
require.NoError(t, err)
assertRefCount(t, a, types.RandaoMixes, 1)
// Copy, increases reference count.
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assertRefCount(t, a, types.RandaoMixes, 2)
assertRefCount(t, b, types.RandaoMixes, 2)
// Assert shared state.
mixesA := a.RandaoMixes()
mixesB := b.RandaoMixes()
assertValFound(t, mixesA, val1[:])
assertValFound(t, mixesB, val1[:])
// Mutator should only affect calling state: a.
require.NoError(t, a.UpdateRandaoMixesAtIndex(0, val2))
// Assert no shared state mutation occurred only on state a (copy on write).
assertValFound(t, a.RandaoMixes(), val2[:])
assertValNotFound(t, a.RandaoMixes(), val1[:])
assertValFound(t, b.RandaoMixes(), val1[:])
assertValNotFound(t, b.RandaoMixes(), val2[:])
assertValFound(t, mixesB, val1[:])
assertValNotFound(t, mixesB, val2[:])
assert.DeepEqual(t, val2[:], a.RandaoMixes()[0], "Expected mutation not found")
assert.DeepEqual(t, val1[:], mixesB[0], "Unexpected mutation found")
// Copy on write happened, reference counters are reset.
assertRefCount(t, a, types.RandaoMixes, 1)
assertRefCount(t, b, types.RandaoMixes, 1)
}
func TestStateReferenceCopy_NoUnexpectedRandaoMutation_Bellatrix(t *testing.T) {
val1, val2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
s, err := InitializeFromProtoUnsafeBellatrix(&ethpb.BeaconStateBellatrix{
RandaoMixes: [][]byte{
val1[:],
},
})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
require.NoError(t, err)
assertRefCount(t, a, types.RandaoMixes, 1)
// Copy, increases reference count.
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assertRefCount(t, a, types.RandaoMixes, 2)
assertRefCount(t, b, types.RandaoMixes, 2)
// Assert shared state.
mixesA := a.RandaoMixes()
mixesB := b.RandaoMixes()
assertValFound(t, mixesA, val1[:])
assertValFound(t, mixesB, val1[:])
// Mutator should only affect calling state: a.
require.NoError(t, a.UpdateRandaoMixesAtIndex(0, val2))
// Assert no shared state mutation occurred only on state a (copy on write).
assertValFound(t, a.RandaoMixes(), val2[:])
assertValNotFound(t, a.RandaoMixes(), val1[:])
assertValFound(t, b.RandaoMixes(), val1[:])
assertValNotFound(t, b.RandaoMixes(), val2[:])
assertValFound(t, mixesB, val1[:])
assertValNotFound(t, mixesB, val2[:])
assert.DeepEqual(t, val2[:], a.RandaoMixes()[0], "Expected mutation not found")
assert.DeepEqual(t, val1[:], mixesB[0], "Unexpected mutation found")
// Copy on write happened, reference counters are reset.
assertRefCount(t, a, types.RandaoMixes, 1)
assertRefCount(t, b, types.RandaoMixes, 1)
}
func TestStateReferenceCopy_NoUnexpectedRandaoMutation_Capella(t *testing.T) {
val1, val2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
s, err := InitializeFromProtoUnsafeCapella(&ethpb.BeaconStateCapella{
RandaoMixes: [][]byte{
val1[:],
},
})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
require.NoError(t, err)
assertRefCount(t, a, types.RandaoMixes, 1)
// Copy, increases reference count.
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assertRefCount(t, a, types.RandaoMixes, 2)
assertRefCount(t, b, types.RandaoMixes, 2)
// Assert shared state.
mixesA := a.RandaoMixes()
mixesB := b.RandaoMixes()
assertValFound(t, mixesA, val1[:])
assertValFound(t, mixesB, val1[:])
// Mutator should only affect calling state: a.
require.NoError(t, a.UpdateRandaoMixesAtIndex(0, val2))
// Assert no shared state mutation occurred only on state a (copy on write).
assertValFound(t, a.RandaoMixes(), val2[:])
assertValNotFound(t, a.RandaoMixes(), val1[:])
assertValFound(t, b.RandaoMixes(), val1[:])
assertValNotFound(t, b.RandaoMixes(), val2[:])
assertValFound(t, mixesB, val1[:])
assertValNotFound(t, mixesB, val2[:])
assert.DeepEqual(t, val2[:], a.RandaoMixes()[0], "Expected mutation not found")
assert.DeepEqual(t, val1[:], mixesB[0], "Unexpected mutation found")
// Copy on write happened, reference counters are reset.
assertRefCount(t, a, types.RandaoMixes, 1)
assertRefCount(t, b, types.RandaoMixes, 1)
}
func TestStateReferenceCopy_NoUnexpectedRandaoMutation_Deneb(t *testing.T) {
val1, val2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
s, err := InitializeFromProtoUnsafeDeneb(&ethpb.BeaconStateDeneb{
RandaoMixes: [][]byte{
val1[:],
},
})
require.NoError(t, err)
a, ok := s.(*BeaconState)
require.Equal(t, true, ok)
require.NoError(t, err)
assertRefCount(t, a, types.RandaoMixes, 1)
// Copy, increases reference count.
copied := a.Copy()
b, ok := copied.(*BeaconState)
require.Equal(t, true, ok)
assertRefCount(t, a, types.RandaoMixes, 2)
assertRefCount(t, b, types.RandaoMixes, 2)
// Assert shared state.
mixesA := a.RandaoMixes()
mixesB := b.RandaoMixes()
assertValFound(t, mixesA, val1[:])
assertValFound(t, mixesB, val1[:])
// Mutator should only affect calling state: a.
require.NoError(t, a.UpdateRandaoMixesAtIndex(0, val2))
// Assert no shared state mutation occurred only on state a (copy on write).
assertValFound(t, a.RandaoMixes(), val2[:])
assertValNotFound(t, a.RandaoMixes(), val1[:])
assertValFound(t, b.RandaoMixes(), val1[:])
assertValNotFound(t, b.RandaoMixes(), val2[:])
assertValFound(t, mixesB, val1[:])
assertValNotFound(t, mixesB, val2[:])
assert.DeepEqual(t, val2[:], a.RandaoMixes()[0], "Expected mutation not found")
assert.DeepEqual(t, val1[:], mixesB[0], "Unexpected mutation found")
// Copy on write happened, reference counters are reset.
assertRefCount(t, a, types.RandaoMixes, 1)
assertRefCount(t, b, types.RandaoMixes, 1)
assertRefCount(t, a, types.Slashings, 1)
assertRefCount(t, b, types.Slashings, 1)
}
func TestStateReferenceCopy_NoUnexpectedAttestationsMutation(t *testing.T) {
@@ -1017,10 +711,6 @@ func TestValidatorReferences_RemainsConsistent_Bellatrix(t *testing.T) {
}
func TestValidatorReferences_ApplyValidator_BalancesRead(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableExperimentalState: true,
})
defer resetCfg()
s, err := InitializeFromProtoUnsafeAltair(&ethpb.BeaconStateAltair{
Validators: []*ethpb.Validator{
{PublicKey: []byte{'A'}},
@@ -1061,7 +751,7 @@ func assertRefCount(t *testing.T, b *BeaconState, idx types.FieldIndex, want uin
}
// assertValFound checks whether item with a given value exists in list.
func assertValFound(t *testing.T, vals [][]byte, val []byte) {
func assertValFound(t *testing.T, vals []uint64, val uint64) {
for i := range vals {
if reflect.DeepEqual(vals[i], val) {
return
@@ -1072,7 +762,7 @@ func assertValFound(t *testing.T, vals [][]byte, val []byte) {
}
// assertValNotFound checks whether item with a given value doesn't exist in list.
func assertValNotFound(t *testing.T, vals [][]byte, val []byte) {
func assertValNotFound(t *testing.T, vals []uint64, val uint64) {
for i := range vals {
if reflect.DeepEqual(vals[i], val) {
t.Log(string(debug.Stack()))

View File

@@ -2,10 +2,6 @@ package state_native
import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
@@ -26,21 +22,10 @@ func (b *BeaconState) SetBlockRoots(val [][]byte) error {
b.lock.Lock()
defer b.lock.Unlock()
if features.Get().EnableExperimentalState {
if b.blockRootsMultiValue != nil {
b.blockRootsMultiValue.Detach(b)
}
b.blockRootsMultiValue = NewMultiValueBlockRoots(val)
} else {
b.sharedFieldReferences[types.BlockRoots].MinusRef()
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
rootsArr := make([][32]byte, fieldparams.BlockRootsLength)
for i := 0; i < len(rootsArr); i++ {
copy(rootsArr[i][:], val[i])
}
b.blockRoots = rootsArr
if b.blockRootsMultiValue != nil {
b.blockRootsMultiValue.Detach(b)
}
b.blockRootsMultiValue = NewMultiValueBlockRoots(val)
b.markFieldAsDirty(types.BlockRoots)
b.rebuildTrie[types.BlockRoots] = true
@@ -53,25 +38,8 @@ func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) err
b.lock.Lock()
defer b.lock.Unlock()
if features.Get().EnableExperimentalState {
if err := b.blockRootsMultiValue.UpdateAt(b, idx, blockRoot); err != nil {
return errors.Wrap(err, "could not update block roots")
}
} else {
if uint64(len(b.blockRoots)) <= idx {
return errors.Wrapf(consensus_types.ErrOutOfBounds, "block root index %d does not exist", idx)
}
r := b.blockRoots
if ref := b.sharedFieldReferences[types.BlockRoots]; ref.Refs() > 1 {
// Copy elements in underlying array by reference.
r = make([][32]byte, len(b.blockRoots))
copy(r, b.blockRoots)
ref.MinusRef()
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
}
r[idx] = blockRoot
b.blockRoots = r
if err := b.blockRootsMultiValue.UpdateAt(b, idx, blockRoot); err != nil {
return errors.Wrap(err, "could not update block roots")
}
b.markFieldAsDirty(types.BlockRoots)

View File

@@ -2,10 +2,6 @@ package state_native
import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
"github.com/pkg/errors"
)
@@ -15,21 +11,10 @@ func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
b.lock.Lock()
defer b.lock.Unlock()
if features.Get().EnableExperimentalState {
if b.randaoMixesMultiValue != nil {
b.randaoMixesMultiValue.Detach(b)
}
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(val)
} else {
b.sharedFieldReferences[types.RandaoMixes].MinusRef()
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
rootsArr := make([][32]byte, fieldparams.RandaoMixesLength)
for i := 0; i < len(rootsArr); i++ {
copy(rootsArr[i][:], val[i])
}
b.randaoMixes = rootsArr
if b.randaoMixesMultiValue != nil {
b.randaoMixesMultiValue.Detach(b)
}
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(val)
b.markFieldAsDirty(types.RandaoMixes)
b.rebuildTrie[types.RandaoMixes] = true
@@ -39,29 +24,8 @@ func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
// UpdateRandaoMixesAtIndex for the beacon state. Updates the randao mixes
// at a specific index to a new value.
func (b *BeaconState) UpdateRandaoMixesAtIndex(idx uint64, val [32]byte) error {
if features.Get().EnableExperimentalState {
if err := b.randaoMixesMultiValue.UpdateAt(b, idx, val); err != nil {
return errors.Wrap(err, "could not update randao mixes")
}
} else {
if uint64(len(b.randaoMixes)) <= idx {
return errors.Wrapf(consensus_types.ErrOutOfBounds, "randao mix index %d does not exist", idx)
}
b.lock.Lock()
m := b.randaoMixes
if ref := b.sharedFieldReferences[types.RandaoMixes]; ref.Refs() > 1 {
// Copy elements in underlying array by reference.
m = make([][32]byte, len(b.randaoMixes))
copy(m, b.randaoMixes)
ref.MinusRef()
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
}
m[idx] = val
b.randaoMixes = m
b.lock.Unlock()
if err := b.randaoMixesMultiValue.UpdateAt(b, idx, val); err != nil {
return errors.Wrap(err, "could not update randao mixes")
}
b.lock.Lock()

View File

@@ -2,10 +2,6 @@ package state_native
import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
"github.com/pkg/errors"
)
@@ -15,21 +11,10 @@ func (b *BeaconState) SetStateRoots(val [][]byte) error {
b.lock.Lock()
defer b.lock.Unlock()
if features.Get().EnableExperimentalState {
if b.stateRootsMultiValue != nil {
b.stateRootsMultiValue.Detach(b)
}
b.stateRootsMultiValue = NewMultiValueStateRoots(val)
} else {
b.sharedFieldReferences[types.StateRoots].MinusRef()
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
rootsArr := make([][32]byte, fieldparams.StateRootsLength)
for i := 0; i < len(rootsArr); i++ {
copy(rootsArr[i][:], val[i])
}
b.stateRoots = rootsArr
if b.stateRootsMultiValue != nil {
b.stateRootsMultiValue.Detach(b)
}
b.stateRootsMultiValue = NewMultiValueStateRoots(val)
b.markFieldAsDirty(types.StateRoots)
b.rebuildTrie[types.StateRoots] = true
@@ -39,29 +24,8 @@ func (b *BeaconState) SetStateRoots(val [][]byte) error {
// UpdateStateRootAtIndex for the beacon state. Updates the state root
// at a specific index to a new value.
func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) error {
if features.Get().EnableExperimentalState {
if err := b.stateRootsMultiValue.UpdateAt(b, idx, stateRoot); err != nil {
return errors.Wrap(err, "could not update state roots")
}
} else {
if uint64(len(b.stateRoots)) <= idx {
return errors.Wrapf(consensus_types.ErrOutOfBounds, "state root index %d does not exist", idx)
}
b.lock.Lock()
r := b.stateRoots
if ref := b.sharedFieldReferences[types.StateRoots]; ref.Refs() > 1 {
// Copy elements in underlying array by reference.
r = make([][32]byte, len(b.stateRoots))
copy(r, b.stateRoots)
ref.MinusRef()
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
}
r[idx] = stateRoot
b.stateRoots = r
b.lock.Unlock()
if err := b.stateRootsMultiValue.UpdateAt(b, idx, stateRoot); err != nil {
return errors.Wrap(err, "could not update state roots")
}
b.lock.Lock()

View File

@@ -4,9 +4,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -20,16 +17,10 @@ func (b *BeaconState) SetValidators(val []*ethpb.Validator) error {
b.lock.Lock()
defer b.lock.Unlock()
if features.Get().EnableExperimentalState {
if b.validatorsMultiValue != nil {
b.validatorsMultiValue.Detach(b)
}
b.validatorsMultiValue = NewMultiValueValidators(val)
} else {
b.validators = val
b.sharedFieldReferences[types.Validators].MinusRef()
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
if b.validatorsMultiValue != nil {
b.validatorsMultiValue.Detach(b)
}
b.validatorsMultiValue = NewMultiValueValidators(val)
b.markFieldAsDirty(types.Validators)
b.rebuildTrie[types.Validators] = true
@@ -41,58 +32,26 @@ func (b *BeaconState) SetValidators(val []*ethpb.Validator) error {
// validator registry.
func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val state.ReadOnlyValidator) (*ethpb.Validator, error)) error {
var changedVals []uint64
if features.Get().EnableExperimentalState {
l := b.validatorsMultiValue.Len(b)
for i := 0; i < l; i++ {
v, err := b.validatorsMultiValue.At(b, uint64(i))
if err != nil {
return err
}
ro, err := NewValidator(v)
if err != nil {
return err
}
newVal, err := f(i, ro)
if err != nil {
return err
}
if newVal != nil {
changedVals = append(changedVals, uint64(i))
if err = b.validatorsMultiValue.UpdateAt(b, uint64(i), newVal); err != nil {
return errors.Wrapf(err, "could not update validator at index %d", i)
}
l := b.validatorsMultiValue.Len(b)
for i := 0; i < l; i++ {
v, err := b.validatorsMultiValue.At(b, uint64(i))
if err != nil {
return err
}
ro, err := NewValidator(v)
if err != nil {
return err
}
newVal, err := f(i, ro)
if err != nil {
return err
}
if newVal != nil {
changedVals = append(changedVals, uint64(i))
if err = b.validatorsMultiValue.UpdateAt(b, uint64(i), newVal); err != nil {
return errors.Wrapf(err, "could not update validator at index %d", i)
}
}
} else {
b.lock.Lock()
v := b.validators
if ref := b.sharedFieldReferences[types.Validators]; ref.Refs() > 1 {
v = b.validatorsReferences()
ref.MinusRef()
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
}
b.lock.Unlock()
for i, val := range v {
ro, err := NewValidator(val)
if err != nil {
return err
}
newVal, err := f(i, ro)
if err != nil {
return err
}
if newVal != nil {
changedVals = append(changedVals, uint64(i))
v[i] = newVal
}
}
b.lock.Lock()
b.validators = v
b.lock.Unlock()
}
b.lock.Lock()
@@ -108,27 +67,8 @@ func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val state.ReadOnlyVa
// UpdateValidatorAtIndex for the beacon state. Updates the validator
// at a specific index to a new value.
func (b *BeaconState) UpdateValidatorAtIndex(idx primitives.ValidatorIndex, val *ethpb.Validator) error {
if features.Get().EnableExperimentalState {
if err := b.validatorsMultiValue.UpdateAt(b, uint64(idx), val); err != nil {
return errors.Wrap(err, "could not update validator")
}
} else {
if uint64(len(b.validators)) <= uint64(idx) {
return errors.Wrapf(consensus_types.ErrOutOfBounds, "validator index %d does not exist", idx)
}
b.lock.Lock()
v := b.validators
if ref := b.sharedFieldReferences[types.Validators]; ref.Refs() > 1 {
v = b.validatorsReferences()
ref.MinusRef()
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
}
v[idx] = val
b.validators = v
b.lock.Unlock()
if err := b.validatorsMultiValue.UpdateAt(b, uint64(idx), val); err != nil {
return errors.Wrap(err, "could not update validator")
}
b.lock.Lock()
@@ -145,16 +85,10 @@ func (b *BeaconState) SetBalances(val []uint64) error {
b.lock.Lock()
defer b.lock.Unlock()
if features.Get().EnableExperimentalState {
if b.balancesMultiValue != nil {
b.balancesMultiValue.Detach(b)
}
b.balancesMultiValue = NewMultiValueBalances(val)
} else {
b.sharedFieldReferences[types.Balances].MinusRef()
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
b.balances = val
if b.balancesMultiValue != nil {
b.balancesMultiValue.Detach(b)
}
b.balancesMultiValue = NewMultiValueBalances(val)
b.markFieldAsDirty(types.Balances)
b.rebuildTrie[types.Balances] = true
@@ -164,27 +98,8 @@ func (b *BeaconState) SetBalances(val []uint64) error {
// UpdateBalancesAtIndex for the beacon state. This method updates the balance
// at a specific index to a new value.
func (b *BeaconState) UpdateBalancesAtIndex(idx primitives.ValidatorIndex, val uint64) error {
if features.Get().EnableExperimentalState {
if err := b.balancesMultiValue.UpdateAt(b, uint64(idx), val); err != nil {
return errors.Wrap(err, "could not update balances")
}
} else {
if uint64(len(b.balances)) <= uint64(idx) {
return errors.Wrapf(consensus_types.ErrOutOfBounds, "balance index %d does not exist", idx)
}
b.lock.Lock()
bals := b.balances
if b.sharedFieldReferences[types.Balances].Refs() > 1 {
bals = b.balancesVal()
b.sharedFieldReferences[types.Balances].MinusRef()
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
}
bals[idx] = val
b.balances = bals
b.lock.Unlock()
if err := b.balancesMultiValue.UpdateAt(b, uint64(idx), val); err != nil {
return errors.Wrap(err, "could not update balances")
}
b.lock.Lock()
@@ -236,25 +151,8 @@ func (b *BeaconState) UpdateSlashingsAtIndex(idx, val uint64) error {
// AppendValidator for the beacon state. Appends the new value
// to the end of list.
func (b *BeaconState) AppendValidator(val *ethpb.Validator) error {
var valIdx primitives.ValidatorIndex
if features.Get().EnableExperimentalState {
b.validatorsMultiValue.Append(b, val)
valIdx = primitives.ValidatorIndex(b.validatorsMultiValue.Len(b) - 1)
} else {
b.lock.Lock()
vals := b.validators
if b.sharedFieldReferences[types.Validators].Refs() > 1 {
vals = b.validatorsReferences()
b.sharedFieldReferences[types.Validators].MinusRef()
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
}
b.validators = append(vals, val)
valIdx = primitives.ValidatorIndex(len(b.validators) - 1)
b.lock.Unlock()
}
b.validatorsMultiValue.Append(b, val)
valIdx := primitives.ValidatorIndex(b.validatorsMultiValue.Len(b) - 1)
b.lock.Lock()
defer b.lock.Unlock()
@@ -268,26 +166,8 @@ func (b *BeaconState) AppendValidator(val *ethpb.Validator) error {
// AppendBalance for the beacon state. Appends the new value
// to the end of list.
func (b *BeaconState) AppendBalance(bal uint64) error {
var balIdx uint64
if features.Get().EnableExperimentalState {
b.balancesMultiValue.Append(b, bal)
balIdx = uint64(b.balancesMultiValue.Len(b) - 1)
} else {
b.lock.Lock()
bals := b.balances
if b.sharedFieldReferences[types.Balances].Refs() > 1 {
bals = make([]uint64, 0, len(b.balances)+int(params.BeaconConfig().MaxDeposits))
bals = append(bals, b.balances...)
b.sharedFieldReferences[types.Balances].MinusRef()
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
}
b.balances = append(bals, bal)
balIdx = uint64(len(b.balances) - 1)
b.lock.Unlock()
}
b.balancesMultiValue.Append(b, bal)
balIdx := uint64(b.balancesMultiValue.Len(b) - 1)
b.lock.Lock()
defer b.lock.Unlock()
@@ -303,22 +183,7 @@ func (b *BeaconState) AppendInactivityScore(s uint64) error {
return errNotSupported("AppendInactivityScore", b.version)
}
if features.Get().EnableExperimentalState {
b.inactivityScoresMultiValue.Append(b, s)
} else {
b.lock.Lock()
scores := b.inactivityScores
if b.sharedFieldReferences[types.InactivityScores].Refs() > 1 {
scores = make([]uint64, 0, len(b.inactivityScores)+int(params.BeaconConfig().MaxDeposits))
scores = append(scores, b.inactivityScores...)
b.sharedFieldReferences[types.InactivityScores].MinusRef()
b.sharedFieldReferences[types.InactivityScores] = stateutil.NewRef(1)
}
b.inactivityScores = append(scores, s)
b.lock.Unlock()
}
b.inactivityScoresMultiValue.Append(b, s)
b.lock.Lock()
defer b.lock.Unlock()
@@ -337,16 +202,10 @@ func (b *BeaconState) SetInactivityScores(val []uint64) error {
return errNotSupported("SetInactivityScores", b.version)
}
if features.Get().EnableExperimentalState {
if b.inactivityScoresMultiValue != nil {
b.inactivityScoresMultiValue.Detach(b)
}
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(val)
} else {
b.sharedFieldReferences[types.InactivityScores].MinusRef()
b.sharedFieldReferences[types.InactivityScores] = stateutil.NewRef(1)
b.inactivityScores = val
if b.inactivityScoresMultiValue != nil {
b.inactivityScoresMultiValue.Detach(b)
}
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(val)
b.markFieldAsDirty(types.InactivityScores)
return nil

View File

@@ -11,7 +11,6 @@ import (
customtypes "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/custom-types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -116,20 +115,13 @@ var (
)
const (
phase0SharedFieldRefCount = 10
altairSharedFieldRefCount = 11
bellatrixSharedFieldRefCount = 12
capellaSharedFieldRefCount = 13
denebSharedFieldRefCount = 13
electraSharedFieldRefCount = 16
fuluSharedFieldRefCount = 17
experimentalStatePhase0SharedFieldRefCount = 5
experimentalStateAltairSharedFieldRefCount = 5
experimentalStateBellatrixSharedFieldRefCount = 6
experimentalStateCapellaSharedFieldRefCount = 7
experimentalStateDenebSharedFieldRefCount = 7
experimentalStateElectraSharedFieldRefCount = 10
experimentalStateFuluSharedFieldRefCount = 11
phase0SharedFieldRefCount = 5
altairSharedFieldRefCount = 5
bellatrixSharedFieldRefCount = 6
capellaSharedFieldRefCount = 7
denebSharedFieldRefCount = 7
electraSharedFieldRefCount = 10
fuluSharedFieldRefCount = 11
)
// InitializeFromProtoPhase0 the beacon state from a protobuf representation.
@@ -208,37 +200,12 @@ func InitializeFromProtoUnsafePhase0(st *ethpb.BeaconState) (state.BeaconState,
valMapHandler: stateutil.NewValMapHandler(st.Validators),
}
if features.Get().EnableExperimentalState {
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStatePhase0SharedFieldRefCount)
} else {
bRoots := make([][32]byte, fieldparams.BlockRootsLength)
for i, r := range st.BlockRoots {
bRoots[i] = bytesutil.ToBytes32(r)
}
b.blockRoots = bRoots
sRoots := make([][32]byte, fieldparams.StateRootsLength)
for i, r := range st.StateRoots {
sRoots[i] = bytesutil.ToBytes32(r)
}
b.stateRoots = sRoots
mixes := make([][32]byte, fieldparams.RandaoMixesLength)
for i, m := range st.RandaoMixes {
mixes[i] = bytesutil.ToBytes32(m)
}
b.randaoMixes = mixes
b.balances = st.Balances
b.validators = st.Validators
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, phase0SharedFieldRefCount)
}
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, phase0SharedFieldRefCount)
for _, f := range phase0Fields {
b.dirtyFields[f] = true
@@ -257,13 +224,6 @@ func InitializeFromProtoUnsafePhase0(st *ethpb.BeaconState) (state.BeaconState,
b.sharedFieldReferences[types.Slashings] = stateutil.NewRef(1)
b.sharedFieldReferences[types.PreviousEpochAttestations] = stateutil.NewRef(1)
b.sharedFieldReferences[types.CurrentEpochAttestations] = stateutil.NewRef(1)
if !features.Get().EnableExperimentalState {
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
}
state.Count.Inc()
// Finalizer runs when dst is being destroyed in garbage collection.
@@ -314,39 +274,13 @@ func InitializeFromProtoUnsafeAltair(st *ethpb.BeaconStateAltair) (state.BeaconS
valMapHandler: stateutil.NewValMapHandler(st.Validators),
}
if features.Get().EnableExperimentalState {
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateAltairSharedFieldRefCount)
} else {
bRoots := make([][32]byte, fieldparams.BlockRootsLength)
for i, r := range st.BlockRoots {
bRoots[i] = bytesutil.ToBytes32(r)
}
b.blockRoots = bRoots
sRoots := make([][32]byte, fieldparams.StateRootsLength)
for i, r := range st.StateRoots {
sRoots[i] = bytesutil.ToBytes32(r)
}
b.stateRoots = sRoots
mixes := make([][32]byte, fieldparams.RandaoMixesLength)
for i, m := range st.RandaoMixes {
mixes[i] = bytesutil.ToBytes32(m)
}
b.randaoMixes = mixes
b.balances = st.Balances
b.validators = st.Validators
b.inactivityScores = st.InactivityScores
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, altairSharedFieldRefCount)
}
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, altairSharedFieldRefCount)
for _, f := range altairFields {
b.dirtyFields[f] = true
@@ -365,14 +299,6 @@ func InitializeFromProtoUnsafeAltair(st *ethpb.BeaconStateAltair) (state.BeaconS
b.sharedFieldReferences[types.Slashings] = stateutil.NewRef(1)
b.sharedFieldReferences[types.PreviousEpochParticipationBits] = stateutil.NewRef(1) // New in Altair.
b.sharedFieldReferences[types.CurrentEpochParticipationBits] = stateutil.NewRef(1) // New in Altair.
if !features.Get().EnableExperimentalState {
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
b.sharedFieldReferences[types.InactivityScores] = stateutil.NewRef(1)
}
state.Count.Inc()
// Finalizer runs when dst is being destroyed in garbage collection.
@@ -424,39 +350,13 @@ func InitializeFromProtoUnsafeBellatrix(st *ethpb.BeaconStateBellatrix) (state.B
valMapHandler: stateutil.NewValMapHandler(st.Validators),
}
if features.Get().EnableExperimentalState {
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateBellatrixSharedFieldRefCount)
} else {
bRoots := make([][32]byte, fieldparams.BlockRootsLength)
for i, r := range st.BlockRoots {
bRoots[i] = bytesutil.ToBytes32(r)
}
b.blockRoots = bRoots
sRoots := make([][32]byte, fieldparams.StateRootsLength)
for i, r := range st.StateRoots {
sRoots[i] = bytesutil.ToBytes32(r)
}
b.stateRoots = sRoots
mixes := make([][32]byte, fieldparams.RandaoMixesLength)
for i, m := range st.RandaoMixes {
mixes[i] = bytesutil.ToBytes32(m)
}
b.randaoMixes = mixes
b.balances = st.Balances
b.validators = st.Validators
b.inactivityScores = st.InactivityScores
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, bellatrixSharedFieldRefCount)
}
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, bellatrixSharedFieldRefCount)
for _, f := range bellatrixFields {
b.dirtyFields[f] = true
@@ -476,14 +376,6 @@ func InitializeFromProtoUnsafeBellatrix(st *ethpb.BeaconStateBellatrix) (state.B
b.sharedFieldReferences[types.PreviousEpochParticipationBits] = stateutil.NewRef(1)
b.sharedFieldReferences[types.CurrentEpochParticipationBits] = stateutil.NewRef(1)
b.sharedFieldReferences[types.LatestExecutionPayloadHeader] = stateutil.NewRef(1) // New in Bellatrix.
if !features.Get().EnableExperimentalState {
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
b.sharedFieldReferences[types.InactivityScores] = stateutil.NewRef(1)
}
state.Count.Inc()
// Finalizer runs when dst is being destroyed in garbage collection.
@@ -538,39 +430,13 @@ func InitializeFromProtoUnsafeCapella(st *ethpb.BeaconStateCapella) (state.Beaco
valMapHandler: stateutil.NewValMapHandler(st.Validators),
}
if features.Get().EnableExperimentalState {
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateCapellaSharedFieldRefCount)
} else {
bRoots := make([][32]byte, fieldparams.BlockRootsLength)
for i, r := range st.BlockRoots {
bRoots[i] = bytesutil.ToBytes32(r)
}
b.blockRoots = bRoots
sRoots := make([][32]byte, fieldparams.StateRootsLength)
for i, r := range st.StateRoots {
sRoots[i] = bytesutil.ToBytes32(r)
}
b.stateRoots = sRoots
mixes := make([][32]byte, fieldparams.RandaoMixesLength)
for i, m := range st.RandaoMixes {
mixes[i] = bytesutil.ToBytes32(m)
}
b.randaoMixes = mixes
b.balances = st.Balances
b.validators = st.Validators
b.inactivityScores = st.InactivityScores
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount)
}
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount)
for _, f := range capellaFields {
b.dirtyFields[f] = true
@@ -591,14 +457,6 @@ func InitializeFromProtoUnsafeCapella(st *ethpb.BeaconStateCapella) (state.Beaco
b.sharedFieldReferences[types.CurrentEpochParticipationBits] = stateutil.NewRef(1)
b.sharedFieldReferences[types.LatestExecutionPayloadHeaderCapella] = stateutil.NewRef(1) // New in Capella.
b.sharedFieldReferences[types.HistoricalSummaries] = stateutil.NewRef(1) // New in Capella.
if !features.Get().EnableExperimentalState {
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
b.sharedFieldReferences[types.InactivityScores] = stateutil.NewRef(1)
}
state.Count.Inc()
// Finalizer runs when dst is being destroyed in garbage collection.
@@ -651,39 +509,13 @@ func InitializeFromProtoUnsafeDeneb(st *ethpb.BeaconStateDeneb) (state.BeaconSta
valMapHandler: stateutil.NewValMapHandler(st.Validators),
}
if features.Get().EnableExperimentalState {
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateDenebSharedFieldRefCount)
} else {
bRoots := make([][32]byte, fieldparams.BlockRootsLength)
for i, r := range st.BlockRoots {
bRoots[i] = bytesutil.ToBytes32(r)
}
b.blockRoots = bRoots
sRoots := make([][32]byte, fieldparams.StateRootsLength)
for i, r := range st.StateRoots {
sRoots[i] = bytesutil.ToBytes32(r)
}
b.stateRoots = sRoots
mixes := make([][32]byte, fieldparams.RandaoMixesLength)
for i, m := range st.RandaoMixes {
mixes[i] = bytesutil.ToBytes32(m)
}
b.randaoMixes = mixes
b.balances = st.Balances
b.validators = st.Validators
b.inactivityScores = st.InactivityScores
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, denebSharedFieldRefCount)
}
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, denebSharedFieldRefCount)
for _, f := range denebFields {
b.dirtyFields[f] = true
@@ -704,14 +536,6 @@ func InitializeFromProtoUnsafeDeneb(st *ethpb.BeaconStateDeneb) (state.BeaconSta
b.sharedFieldReferences[types.CurrentEpochParticipationBits] = stateutil.NewRef(1)
b.sharedFieldReferences[types.LatestExecutionPayloadHeaderDeneb] = stateutil.NewRef(1) // New in Deneb.
b.sharedFieldReferences[types.HistoricalSummaries] = stateutil.NewRef(1)
if !features.Get().EnableExperimentalState {
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
b.sharedFieldReferences[types.InactivityScores] = stateutil.NewRef(1)
}
state.Count.Inc()
// Finalizer runs when dst is being destroyed in garbage collection.
@@ -773,39 +597,13 @@ func InitializeFromProtoUnsafeElectra(st *ethpb.BeaconStateElectra) (state.Beaco
valMapHandler: stateutil.NewValMapHandler(st.Validators),
}
if features.Get().EnableExperimentalState {
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateElectraSharedFieldRefCount)
} else {
bRoots := make([][32]byte, fieldparams.BlockRootsLength)
for i, r := range st.BlockRoots {
bRoots[i] = bytesutil.ToBytes32(r)
}
b.blockRoots = bRoots
sRoots := make([][32]byte, fieldparams.StateRootsLength)
for i, r := range st.StateRoots {
sRoots[i] = bytesutil.ToBytes32(r)
}
b.stateRoots = sRoots
mixes := make([][32]byte, fieldparams.RandaoMixesLength)
for i, m := range st.RandaoMixes {
mixes[i] = bytesutil.ToBytes32(m)
}
b.randaoMixes = mixes
b.balances = st.Balances
b.validators = st.Validators
b.inactivityScores = st.InactivityScores
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, electraSharedFieldRefCount)
}
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, electraSharedFieldRefCount)
for _, f := range electraFields {
b.dirtyFields[f] = true
@@ -829,14 +627,6 @@ func InitializeFromProtoUnsafeElectra(st *ethpb.BeaconStateElectra) (state.Beaco
b.sharedFieldReferences[types.PendingDeposits] = stateutil.NewRef(1) // New in Electra.
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1) // New in Electra.
b.sharedFieldReferences[types.PendingConsolidations] = stateutil.NewRef(1) // New in Electra.
if !features.Get().EnableExperimentalState {
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
b.sharedFieldReferences[types.InactivityScores] = stateutil.NewRef(1)
}
state.Count.Inc()
// Finalizer runs when dst is being destroyed in garbage collection.
@@ -903,39 +693,13 @@ func InitializeFromProtoUnsafeFulu(st *ethpb.BeaconStateFulu) (state.BeaconState
valMapHandler: stateutil.NewValMapHandler(st.Validators),
}
if features.Get().EnableExperimentalState {
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateFuluSharedFieldRefCount)
} else {
bRoots := make([][32]byte, fieldparams.BlockRootsLength)
for i, r := range st.BlockRoots {
bRoots[i] = bytesutil.ToBytes32(r)
}
b.blockRoots = bRoots
sRoots := make([][32]byte, fieldparams.StateRootsLength)
for i, r := range st.StateRoots {
sRoots[i] = bytesutil.ToBytes32(r)
}
b.stateRoots = sRoots
mixes := make([][32]byte, fieldparams.RandaoMixesLength)
for i, m := range st.RandaoMixes {
mixes[i] = bytesutil.ToBytes32(m)
}
b.randaoMixes = mixes
b.balances = st.Balances
b.validators = st.Validators
b.inactivityScores = st.InactivityScores
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, fuluSharedFieldRefCount)
}
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, fuluSharedFieldRefCount)
for _, f := range fuluFields {
b.dirtyFields[f] = true
@@ -960,14 +724,6 @@ func InitializeFromProtoUnsafeFulu(st *ethpb.BeaconStateFulu) (state.BeaconState
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1)
b.sharedFieldReferences[types.PendingConsolidations] = stateutil.NewRef(1)
b.sharedFieldReferences[types.ProposerLookahead] = stateutil.NewRef(1) // New in Fulu.
if !features.Get().EnableExperimentalState {
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
b.sharedFieldReferences[types.InactivityScores] = stateutil.NewRef(1)
}
state.Count.Inc()
// Finalizer runs when dst is being destroyed in garbage collection.
@@ -1015,11 +771,8 @@ func (b *BeaconState) Copy() state.BeaconState {
earliestConsolidationEpoch: b.earliestConsolidationEpoch,
// Large arrays, infrequently changed, constant size.
blockRoots: b.blockRoots,
blockRootsMultiValue: b.blockRootsMultiValue,
stateRoots: b.stateRoots,
stateRootsMultiValue: b.stateRootsMultiValue,
randaoMixes: b.randaoMixes,
randaoMixesMultiValue: b.randaoMixesMultiValue,
previousEpochAttestations: b.previousEpochAttestations,
currentEpochAttestations: b.currentEpochAttestations,
@@ -1028,15 +781,12 @@ func (b *BeaconState) Copy() state.BeaconState {
proposerLookahead: b.proposerLookahead,
// Large arrays, increases over time.
balances: b.balances,
balancesMultiValue: b.balancesMultiValue,
historicalRoots: b.historicalRoots,
historicalSummaries: b.historicalSummaries,
validators: b.validators,
validatorsMultiValue: b.validatorsMultiValue,
previousEpochParticipation: b.previousEpochParticipation,
currentEpochParticipation: b.currentEpochParticipation,
inactivityScores: b.inactivityScores,
inactivityScoresMultiValue: b.inactivityScoresMultiValue,
pendingDeposits: b.pendingDeposits,
pendingPartialWithdrawals: b.pendingPartialWithdrawals,
@@ -1068,51 +818,30 @@ func (b *BeaconState) Copy() state.BeaconState {
valMapHandler: b.valMapHandler,
}
if features.Get().EnableExperimentalState {
b.blockRootsMultiValue.Copy(b, dst)
b.stateRootsMultiValue.Copy(b, dst)
b.randaoMixesMultiValue.Copy(b, dst)
b.balancesMultiValue.Copy(b, dst)
if b.version > version.Phase0 {
b.inactivityScoresMultiValue.Copy(b, dst)
}
b.validatorsMultiValue.Copy(b, dst)
b.blockRootsMultiValue.Copy(b, dst)
b.stateRootsMultiValue.Copy(b, dst)
b.randaoMixesMultiValue.Copy(b, dst)
b.balancesMultiValue.Copy(b, dst)
if b.version > version.Phase0 {
b.inactivityScoresMultiValue.Copy(b, dst)
}
b.validatorsMultiValue.Copy(b, dst)
if features.Get().EnableExperimentalState {
switch b.version {
case version.Phase0:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStatePhase0SharedFieldRefCount)
case version.Altair:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateAltairSharedFieldRefCount)
case version.Bellatrix:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateBellatrixSharedFieldRefCount)
case version.Capella:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateCapellaSharedFieldRefCount)
case version.Deneb:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateDenebSharedFieldRefCount)
case version.Electra:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateElectraSharedFieldRefCount)
case version.Fulu:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, experimentalStateFuluSharedFieldRefCount)
}
} else {
switch b.version {
case version.Phase0:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, phase0SharedFieldRefCount)
case version.Altair:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, altairSharedFieldRefCount)
case version.Bellatrix:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, bellatrixSharedFieldRefCount)
case version.Capella:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount)
case version.Deneb:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, denebSharedFieldRefCount)
case version.Electra:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, electraSharedFieldRefCount)
case version.Fulu:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, fuluSharedFieldRefCount)
}
switch b.version {
case version.Phase0:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, phase0SharedFieldRefCount)
case version.Altair:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, altairSharedFieldRefCount)
case version.Bellatrix:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, bellatrixSharedFieldRefCount)
case version.Capella:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount)
case version.Deneb:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, denebSharedFieldRefCount)
case version.Electra:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, electraSharedFieldRefCount)
case version.Fulu:
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, fuluSharedFieldRefCount)
}
for field, ref := range b.sharedFieldReferences {
@@ -1256,10 +985,6 @@ func (b *BeaconState) FieldReferencesCount() map[string]uint64 {
func (b *BeaconState) RecordStateMetrics() {
b.lock.RLock()
defer b.lock.RUnlock()
// Only run this for nodes running with the experimental state.
if !features.Get().EnableExperimentalState {
return
}
// Validators
if b.validatorsMultiValue != nil {
@@ -1413,11 +1138,7 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
case types.FinalizedCheckpoint:
return ssz.CheckpointRoot(b.finalizedCheckpoint)
case types.InactivityScores:
if features.Get().EnableExperimentalState {
return stateutil.Uint64ListRootWithRegistryLimit(b.inactivityScoresMultiValue.Value(b))
} else {
return stateutil.Uint64ListRootWithRegistryLimit(b.inactivityScores)
}
return stateutil.Uint64ListRootWithRegistryLimit(b.inactivityScoresMultiValue.Value(b))
case types.CurrentSyncCommittee:
return stateutil.SyncCommitteeRoot(b.currentSyncCommittee)
case types.NextSyncCommittee:
@@ -1561,25 +1282,23 @@ func finalizerCleanup(b *BeaconState) {
delete(b.stateFieldLeaves, i)
}
if features.Get().EnableExperimentalState {
if b.blockRootsMultiValue != nil {
b.blockRootsMultiValue.Detach(b)
}
if b.stateRootsMultiValue != nil {
b.stateRootsMultiValue.Detach(b)
}
if b.randaoMixesMultiValue != nil {
b.randaoMixesMultiValue.Detach(b)
}
if b.balancesMultiValue != nil {
b.balancesMultiValue.Detach(b)
}
if b.inactivityScoresMultiValue != nil {
b.inactivityScoresMultiValue.Detach(b)
}
if b.validatorsMultiValue != nil {
b.validatorsMultiValue.Detach(b)
}
if b.blockRootsMultiValue != nil {
b.blockRootsMultiValue.Detach(b)
}
if b.stateRootsMultiValue != nil {
b.stateRootsMultiValue.Detach(b)
}
if b.randaoMixesMultiValue != nil {
b.randaoMixesMultiValue.Detach(b)
}
if b.balancesMultiValue != nil {
b.balancesMultiValue.Detach(b)
}
if b.inactivityScoresMultiValue != nil {
b.inactivityScoresMultiValue.Detach(b)
}
if b.validatorsMultiValue != nil {
b.validatorsMultiValue.Detach(b)
}
state.Count.Sub(1)
@@ -1587,145 +1306,94 @@ func finalizerCleanup(b *BeaconState) {
func (b *BeaconState) blockRootsRootSelector(field types.FieldIndex) ([32]byte, error) {
if b.rebuildTrie[field] {
if features.Get().EnableExperimentalState {
err := b.resetFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.blockRootsMultiValue,
}, fieldparams.BlockRootsLength)
if err != nil {
return [32]byte{}, err
}
} else {
err := b.resetFieldTrie(field, b.blockRoots, fieldparams.BlockRootsLength)
if err != nil {
return [32]byte{}, err
}
err := b.resetFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.blockRootsMultiValue,
}, fieldparams.BlockRootsLength)
if err != nil {
return [32]byte{}, err
}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
if features.Get().EnableExperimentalState {
return b.recomputeFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.blockRootsMultiValue,
})
} else {
return b.recomputeFieldTrie(field, b.blockRoots)
}
return b.recomputeFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.blockRootsMultiValue,
})
}
func (b *BeaconState) stateRootsRootSelector(field types.FieldIndex) ([32]byte, error) {
if b.rebuildTrie[field] {
if features.Get().EnableExperimentalState {
err := b.resetFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.stateRootsMultiValue,
}, fieldparams.StateRootsLength)
if err != nil {
return [32]byte{}, err
}
} else {
err := b.resetFieldTrie(field, b.stateRoots, fieldparams.StateRootsLength)
if err != nil {
return [32]byte{}, err
}
err := b.resetFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.stateRootsMultiValue,
}, fieldparams.StateRootsLength)
if err != nil {
return [32]byte{}, err
}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
if features.Get().EnableExperimentalState {
return b.recomputeFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.stateRootsMultiValue,
})
} else {
return b.recomputeFieldTrie(field, b.stateRoots)
}
return b.recomputeFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.stateRootsMultiValue,
})
}
func (b *BeaconState) validatorsRootSelector(field types.FieldIndex) ([32]byte, error) {
if b.rebuildTrie[field] {
if features.Get().EnableExperimentalState {
err := b.resetFieldTrie(field, mvslice.MultiValueSliceComposite[*ethpb.Validator]{
Identifiable: b,
MultiValueSlice: b.validatorsMultiValue,
}, fieldparams.ValidatorRegistryLimit)
if err != nil {
return [32]byte{}, err
}
} else {
err := b.resetFieldTrie(field, b.validators, fieldparams.ValidatorRegistryLimit)
if err != nil {
return [32]byte{}, err
}
err := b.resetFieldTrie(field, mvslice.MultiValueSliceComposite[*ethpb.Validator]{
Identifiable: b,
MultiValueSlice: b.validatorsMultiValue,
}, fieldparams.ValidatorRegistryLimit)
if err != nil {
return [32]byte{}, err
}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
if features.Get().EnableExperimentalState {
return b.recomputeFieldTrie(field, mvslice.MultiValueSliceComposite[*ethpb.Validator]{
Identifiable: b,
MultiValueSlice: b.validatorsMultiValue,
})
} else {
return b.recomputeFieldTrie(field, b.validators)
}
return b.recomputeFieldTrie(field, mvslice.MultiValueSliceComposite[*ethpb.Validator]{
Identifiable: b,
MultiValueSlice: b.validatorsMultiValue,
})
}
func (b *BeaconState) balancesRootSelector(field types.FieldIndex) ([32]byte, error) {
if b.rebuildTrie[field] {
if features.Get().EnableExperimentalState {
err := b.resetFieldTrie(field, mvslice.MultiValueSliceComposite[uint64]{
Identifiable: b,
MultiValueSlice: b.balancesMultiValue,
}, stateutil.ValidatorLimitForBalancesChunks())
if err != nil {
return [32]byte{}, err
}
} else {
err := b.resetFieldTrie(field, b.balances, stateutil.ValidatorLimitForBalancesChunks())
if err != nil {
return [32]byte{}, err
}
err := b.resetFieldTrie(field, mvslice.MultiValueSliceComposite[uint64]{
Identifiable: b,
MultiValueSlice: b.balancesMultiValue,
}, stateutil.ValidatorLimitForBalancesChunks())
if err != nil {
return [32]byte{}, err
}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
if features.Get().EnableExperimentalState {
return b.recomputeFieldTrie(field, mvslice.MultiValueSliceComposite[uint64]{
Identifiable: b,
MultiValueSlice: b.balancesMultiValue,
})
} else {
return b.recomputeFieldTrie(field, b.balances)
}
return b.recomputeFieldTrie(field, mvslice.MultiValueSliceComposite[uint64]{
Identifiable: b,
MultiValueSlice: b.balancesMultiValue,
})
}
func (b *BeaconState) randaoMixesRootSelector(field types.FieldIndex) ([32]byte, error) {
if b.rebuildTrie[field] {
if features.Get().EnableExperimentalState {
err := b.resetFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.randaoMixesMultiValue,
}, fieldparams.RandaoMixesLength)
if err != nil {
return [32]byte{}, err
}
} else {
err := b.resetFieldTrie(field, b.randaoMixes, fieldparams.RandaoMixesLength)
if err != nil {
return [32]byte{}, err
}
err := b.resetFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.randaoMixesMultiValue,
}, fieldparams.RandaoMixesLength)
if err != nil {
return [32]byte{}, err
}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
if features.Get().EnableExperimentalState {
return b.recomputeFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.randaoMixesMultiValue,
})
} else {
return b.recomputeFieldTrie(field, b.randaoMixes)
}
return b.recomputeFieldTrie(field, mvslice.MultiValueSliceComposite[[32]byte]{
Identifiable: b,
MultiValueSlice: b.randaoMixesMultiValue,
})
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
statenative "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -805,10 +804,6 @@ func TestBeaconState_ValidatorMutation_Bellatrix(t *testing.T) {
}
func TestBeaconState_InitializeInactivityScoresCorrectly_Deneb(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableExperimentalState: true,
})
defer resetCfg()
st, _ := util.DeterministicGenesisStateDeneb(t, 200)
_, err := st.HashTreeRoot(t.Context())
require.NoError(t, err)

View File

@@ -151,7 +151,7 @@ func (rs *stateReplayer) ReplayToSlot(ctx context.Context, replayTo primitives.S
"startSlot": s.Slot(),
"endSlot": replayTo,
"diff": replayTo - s.Slot(),
}).Debug("calling process_slots on remaining slots")
}).Debug("Calling process_slots on remaining slots")
// err will be handled after the bookend log
s, err = ReplayProcessSlots(ctx, s, replayTo)
@@ -161,7 +161,7 @@ func (rs *stateReplayer) ReplayToSlot(ctx context.Context, replayTo primitives.S
duration := time.Since(start)
log.WithFields(logrus.Fields{
"duration": duration,
}).Debug("time spent in process_slots")
}).Debug("Time spent in process_slots")
replayToSlotSummary.Observe(float64(duration.Milliseconds()))
return s, nil

View File

@@ -59,7 +59,7 @@ func hashValidatorHelper(validators []*ethpb.Validator, roots [][32]byte, j int,
for i := 0; i < groupSize; i++ {
fRoots, err := ValidatorFieldRoots(validators[j*groupSize+i])
if err != nil {
logrus.WithError(err).Error("could not get validator field roots")
logrus.WithError(err).Error("Could not get validator field roots")
return
}
for k, root := range fRoots {

View File

@@ -15,5 +15,5 @@ func VerifyBeaconStateValidatorAtIndexReadOnlyHandlesNilSlice(t *testing.T, fact
require.NoError(t, err)
_, err = st.ValidatorAtIndexReadOnly(0)
assert.Equal(t, state.ErrNilValidatorsInState, err)
assert.ErrorContains(t, "index 0 out of bounds", err)
}

View File

@@ -83,6 +83,15 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
return errors.Wrap(err, "save data column sidecars")
}
slotStartTime := slots.StartTime(uint64(s.cfg.clock.GenesisTime().Unix()), slot)
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": slot,
"fromColumnsCount": storedColumnsCount,
"sinceSlotStartTime": time.Since(slotStartTime),
"reconstructionAndSaveDuration": time.Since(startTime),
}).Debug("Data columns reconstructed and saved")
// Update reconstruction metrics
dataColumnReconstructionHistogram.Observe(float64(time.Since(startTime).Milliseconds()))
dataColumnReconstructionCounter.Add(float64(len(reconstructedSidecars) - len(verifiedSidecars)))
@@ -92,12 +101,6 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
return errors.Wrap(err, "schedule reconstructed data columns broadcast")
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": slot,
"fromColumnsCount": storedColumnsCount,
}).Debug("Data columns reconstructed and saved")
return nil
}

View File

@@ -365,7 +365,7 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
robs, err := sortedBlockWithVerifiedBlobSlice(blocks)
if err != nil {
log.WithField("peer", p).WithError(err).Debug("invalid BeaconBlocksByRange response")
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlocksByRange response")
continue
}
if len(features.Get().BlacklistedRoots) > 0 {

View File

@@ -106,7 +106,7 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
blocksFetcher := cfg.blocksFetcher
if blocksFetcher == nil {
if cfg.bs == nil {
log.Warn("rpc fetcher starting without blob availability cache, duplicate blobs may be requested.")
log.Warn("Rpc fetcher starting without blob availability cache, duplicate blobs may be requested.")
}
blocksFetcher = newBlocksFetcher(ctx, &blocksFetcherConfig{
ctxMap: cfg.ctxMap,

View File

@@ -153,7 +153,7 @@ func (s *Service) processFetchedData(ctx context.Context, data *blocksQueueFetch
func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQueueFetchedData) (uint64, error) {
bwb, err := validUnprocessed(ctx, data.bwb, s.cfg.Chain.HeadSlot(), s.isProcessedBlock)
if err != nil {
log.WithError(err).Debug("batch did not contain a valid sequence of unprocessed blocks")
log.WithError(err).Debug("Batch did not contain a valid sequence of unprocessed blocks")
return 0, err
}
if len(bwb) == 0 {

View File

@@ -130,7 +130,7 @@ func (s *Service) Start() {
log.Info("Waiting for state to be initialized")
clock, err := s.cfg.ClockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("initial-sync failed to receive startup event")
log.WithError(err).Error("Initial-sync failed to receive startup event")
return
}
s.clock = clock
@@ -138,7 +138,7 @@ func (s *Service) Start() {
ctxMap, err := sync.ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
if err != nil {
log.WithField("genesisValidatorRoot", clock.GenesisValidatorsRoot()).
WithError(err).Error("unable to initialize context version map using genesis validator")
WithError(err).Error("Unable to initialize context version map using genesis validator")
return
}
s.ctxMap = ctxMap

View File

@@ -231,7 +231,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
t.Fatalf("Test should have exited by now, timed out")
}
assert.LogsContain(t, hook, "Waiting for state to be initialized")
assert.LogsContain(t, hook, "initial-sync failed to receive startup event")
assert.LogsContain(t, hook, "Initial-sync failed to receive startup event")
assert.LogsDoNotContain(t, hook, "Subscription to state notifier failed")
})

View File

@@ -54,7 +54,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
"endSlot": rp.end,
"size": rp.size,
"current": s.cfg.clock.CurrentSlot(),
}).Debug("error in validating range availability")
}).Debug("Error in validating range availability")
s.writeErrorResponseToStream(responseCodeResourceUnavailable, p2ptypes.ErrResourceUnavailable.Error(), stream)
tracing.AnnotateError(span, err)
return nil
@@ -78,7 +78,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
defer ticker.Stop()
batcher, err := newBlockRangeBatcher(rp, s.cfg.beaconDB, s.rateLimiter, s.cfg.chain.IsCanonical, ticker)
if err != nil {
log.WithError(err).Info("error in BlocksByRange batch")
log.WithError(err).Info("Error in BlocksByRange batch")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, err)
return err

View File

@@ -92,7 +92,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
if sc.Slot() < minReqSlot {
s.writeErrorResponseToStream(responseCodeResourceUnavailable, types.ErrBlobLTMinRequest.Error(), stream)
log.WithError(types.ErrBlobLTMinRequest).
Debugf("requested blob for block %#x before minimum_request_epoch", blobIdents[i].BlockRoot)
Debugf("Requested blob for block %#x before minimum_request_epoch", blobIdents[i].BlockRoot)
return types.ErrBlobLTMinRequest
}

View File

@@ -110,7 +110,7 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
}
if err := batch.error(); err != nil {
log.WithError(err).Debug("error in DataColumnSidecarsByRange batch")
log.WithError(err).Debug("Error in DataColumnSidecarsByRange batch")
// If we hit a rate limit, the error response has already been written, and the stream is already closed.
if !errors.Is(err, p2ptypes.ErrRateLimited) {

Some files were not shown because too many files have changed in this diff Show More