mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
12 Commits
v6.1.1
...
hdiff-fuzz
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60de0e5097 | ||
|
|
603aaa8f86 | ||
|
|
8c5f7cf1fa | ||
|
|
15c610fdbd | ||
|
|
c62395960a | ||
|
|
18efd620dc | ||
|
|
6139d58fa5 | ||
|
|
0ea5e2cf9d | ||
|
|
29fe707143 | ||
|
|
d68196822b | ||
|
|
924fe4de98 | ||
|
|
fe9dd255c7 |
1
.bazelrc
1
.bazelrc
@@ -34,6 +34,7 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
|
||||
build:release --compilation_mode=opt
|
||||
build:release --stamp
|
||||
build:release --define pgo_enabled=1
|
||||
build:release --strip=always
|
||||
|
||||
# Build binary with cgo symbolizer for debugging / profiling.
|
||||
build:cgo_symbolizer --copt=-g
|
||||
|
||||
1
.github/workflows/go.yml
vendored
1
.github/workflows/go.yml
vendored
@@ -56,7 +56,6 @@ jobs:
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: v2.4
|
||||
only-new-issues: true
|
||||
|
||||
build:
|
||||
name: Build
|
||||
|
||||
@@ -2,72 +2,13 @@ version: "2"
|
||||
run:
|
||||
go: 1.23.5
|
||||
linters:
|
||||
default: all
|
||||
disable:
|
||||
- asasalint
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- cyclop
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- err113
|
||||
- errname
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
enable:
|
||||
- errcheck
|
||||
- ineffassign
|
||||
- govet
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- intrange
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- noctx
|
||||
- noinlineerr
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- spancheck
|
||||
disable:
|
||||
- staticcheck
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- thelper
|
||||
- unparam
|
||||
- usetesting
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
settings:
|
||||
gocognit:
|
||||
min-complexity: 65
|
||||
- unused
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
@@ -83,6 +24,7 @@ linters:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
@@ -96,4 +38,4 @@ formatters:
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- examples$
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
blocktypes "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v6/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -218,24 +218,18 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
// stVersion should represent the version of the pre-state; header should also be from the pre-state.
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header interfaces.ExecutionData, blk blocktypes.ROBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if blk == nil {
|
||||
return false, errors.New("signed beacon block can't be nil")
|
||||
}
|
||||
if preStateVersion < version.Bellatrix {
|
||||
if stVersion < version.Bellatrix {
|
||||
return true, nil
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(header, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
@@ -268,28 +262,32 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
return false, errors.New("nil execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
if err == nil {
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"parentRoot": fmt.Sprintf("%#x", parentRoot),
|
||||
"root": fmt.Sprintf("%#x", blk.Root()),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}
|
||||
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
log.WithFields(logFields).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
}
|
||||
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
|
||||
log.WithFields(logFields).WithError(err).Error("Invalid payload status")
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
lastValidHash: bytesutil.ToBytes32(lastValidHash),
|
||||
}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
log.WithFields(logFields).WithError(err).Error("Unexpected execution engine error")
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
|
||||
@@ -481,33 +481,12 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
a := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{},
|
||||
},
|
||||
}
|
||||
a := util.NewBeaconBlockAltair()
|
||||
altairBlk, err := consensusblocks.NewSignedBeaconBlock(a)
|
||||
require.NoError(t, err)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
BlockNumber: 1,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 1
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
@@ -544,12 +523,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
blk: altairBlk,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "nil beacon block",
|
||||
postState: bellatrixState,
|
||||
errString: "signed beacon block can't be nil",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "new payload with optimistic block",
|
||||
postState: bellatrixState,
|
||||
@@ -576,15 +549,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "altair pre state, happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -595,24 +561,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "not at merge transition",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -623,15 +572,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -642,15 +584,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "undefined error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -662,15 +597,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "invalid block hash error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -701,7 +629,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
|
||||
rob, err := consensusblocks.NewROBlock(tt.blk)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
if tt.invalidBlock {
|
||||
@@ -725,17 +655,12 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
rob, err := consensusblocks.NewROBlock(bellatrixBlk)
|
||||
require.NoError(t, err)
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
@@ -752,7 +677,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
|
||||
require.NoError(t, err)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, bellatrixBlk)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, validated)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,46 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
// ConvertToAltair converts a Phase 0 beacon state to an Altair beacon state.
|
||||
func ConvertToAltair(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: make([]byte, numValidators),
|
||||
CurrentEpochParticipation: make([]byte, numValidators),
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: make([]uint64, numValidators),
|
||||
}
|
||||
newState, err := state_native.InitializeFromProtoUnsafeAltair(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newState, nil
|
||||
}
|
||||
|
||||
// UpgradeToAltair updates input state to return the version Altair state.
|
||||
//
|
||||
// Spec code:
|
||||
@@ -64,39 +104,7 @@ import (
|
||||
// post.next_sync_committee = get_next_sync_committee(post)
|
||||
// return post
|
||||
func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: make([]byte, numValidators),
|
||||
CurrentEpochParticipation: make([]byte, numValidators),
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: make([]uint64, numValidators),
|
||||
}
|
||||
|
||||
newState, err := state_native.InitializeFromProtoUnsafeAltair(s)
|
||||
newState, err := ConvertToAltair(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -15,6 +15,129 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ConvertToElectra converts a Deneb beacon state to an Electra beacon state. It does not perform any fork logic.
|
||||
func ConvertToElectra(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := beaconState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := beaconState.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := beaconState.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := beaconState.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summaries, err := beaconState.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
excessBlobGas, err := payloadHeader.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobGasUsed, err := payloadHeader.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: beaconState.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
Epoch: time.CurrentEpoch(beaconState),
|
||||
},
|
||||
LatestBlockHeader: beaconState.LatestBlockHeader(),
|
||||
BlockRoots: beaconState.BlockRoots(),
|
||||
StateRoots: beaconState.StateRoots(),
|
||||
HistoricalRoots: beaconState.HistoricalRoots(),
|
||||
Eth1Data: beaconState.Eth1Data(),
|
||||
Eth1DataVotes: beaconState.Eth1DataVotes(),
|
||||
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
|
||||
Validators: beaconState.Validators(),
|
||||
Balances: beaconState.Balances(),
|
||||
RandaoMixes: beaconState.RandaoMixes(),
|
||||
Slashings: beaconState.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: beaconState.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingDeposits: make([]*ethpb.PendingDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
}
|
||||
|
||||
// need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeElectra(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize post electra beaconState")
|
||||
}
|
||||
return post, nil
|
||||
}
|
||||
|
||||
// UpgradeToElectra updates inputs a generic state to return the version Electra state.
|
||||
//
|
||||
// nolint:dupword
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/prysmctl/testnet:__pkg__",
|
||||
"//consensus-types/hdiff:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//validator/client:__pkg__",
|
||||
],
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -17,6 +18,25 @@ import (
|
||||
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/fork.md#upgrading-the-state
|
||||
func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
s, err := ConvertToFulu(beaconState)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert to fulu")
|
||||
}
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pl := make([]primitives.ValidatorIndex, len(proposerLookahead))
|
||||
for i, v := range proposerLookahead {
|
||||
pl[i] = primitives.ValidatorIndex(v)
|
||||
}
|
||||
if err := s.SetProposerLookahead(pl); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to set proposer lookahead")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func ConvertToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -105,11 +125,6 @@ func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.Be
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateFulu{
|
||||
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
@@ -171,14 +186,6 @@ func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.Be
|
||||
PendingDeposits: pendingDeposits,
|
||||
PendingPartialWithdrawals: pendingPartialWithdrawals,
|
||||
PendingConsolidations: pendingConsolidations,
|
||||
ProposerLookahead: proposerLookahead,
|
||||
}
|
||||
|
||||
// Need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeFulu(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize post fulu beaconState")
|
||||
}
|
||||
|
||||
return post, nil
|
||||
return state_native.InitializeFromProtoUnsafeFulu(s)
|
||||
}
|
||||
|
||||
@@ -4,17 +4,26 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var errTimeOut = errors.New("operation timed out")
|
||||
|
||||
// PruneAttestationsAtEpoch deletes all attestations from the slasher DB with target epoch
|
||||
// less than or equal to the specified epoch.
|
||||
func (s *Store) PruneAttestationsAtEpoch(
|
||||
_ context.Context, maxEpoch primitives.Epoch,
|
||||
ctx context.Context, maxEpoch primitives.Epoch,
|
||||
) (numPruned uint, err error) {
|
||||
// In some cases, pruning may take a very long time and consume significant memory in the
|
||||
// open Update transaction. Therefore, we impose a 1 minute timeout on this operation.
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// We can prune everything less than the current epoch - history length.
|
||||
encodedEndPruneEpoch := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(encodedEndPruneEpoch, uint64(maxEpoch))
|
||||
@@ -48,13 +57,18 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
return
|
||||
}
|
||||
|
||||
if err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
c := signingRootsBkt.Cursor()
|
||||
|
||||
// We begin a pruning iteration starting from the first item in the bucket.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if ctx.Err() != nil {
|
||||
// Exit the routine if the context has expired.
|
||||
return errTimeOut
|
||||
}
|
||||
|
||||
// We check the epoch from the current key in the database.
|
||||
// If we have hit an epoch that is greater than the end epoch of the pruning process,
|
||||
// we then completely exit the process as we are done.
|
||||
@@ -67,18 +81,27 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
// so it is possible we have a few adjacent objects that have the same slot, such as
|
||||
// (target_epoch = 3 ++ _) => encode(attestation)
|
||||
if err := signingRootsBkt.Delete(k); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete attestation signing root")
|
||||
}
|
||||
if err := attRecordsBkt.Delete(v); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete attestation record")
|
||||
}
|
||||
slasherAttestationsPrunedTotal.Inc()
|
||||
numPruned++
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
})
|
||||
|
||||
if errors.Is(err, errTimeOut) {
|
||||
log.Warning("Aborting pruning routine")
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to prune attestations")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -142,10 +142,9 @@ var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
defer func(start time.Time) {
|
||||
newPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
}(time.Now())
|
||||
|
||||
d := time.Now().Add(time.Duration(params.BeaconConfig().ExecutionEngineTimeoutValue) * time.Second)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
@@ -183,7 +182,10 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
if result.ValidationError != "" {
|
||||
log.WithError(errors.New(result.ValidationError)).Error("Got a validation error in newPayload")
|
||||
log.WithField("status", result.Status.String()).
|
||||
WithField("parentRoot", fmt.Sprintf("%#x", parentBlockRoot)).
|
||||
WithError(errors.New(result.ValidationError)).
|
||||
Error("Got a validation error in newPayload")
|
||||
}
|
||||
switch result.Status {
|
||||
case pb.PayloadStatus_INVALID_BLOCK_HASH:
|
||||
@@ -195,7 +197,7 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
case pb.PayloadStatus_VALID:
|
||||
return result.LatestValidHash, nil
|
||||
default:
|
||||
return nil, ErrUnknownPayloadStatus
|
||||
return nil, errors.Wrapf(ErrUnknownPayloadStatus, "unknown payload status: %s", result.Status.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -928,7 +928,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.ErrorIs(t, err, ErrUnknownPayloadStatus)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
|
||||
@@ -27,5 +27,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -132,6 +132,10 @@ func convertValueForJSON(v reflect.Value, tag string) interface{} {
|
||||
}
|
||||
return m
|
||||
|
||||
// ===== String =====
|
||||
case reflect.String:
|
||||
return v.String()
|
||||
|
||||
// ===== Default =====
|
||||
default:
|
||||
log.WithFields(log.Fields{
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"math"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
@@ -17,6 +18,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestGetDepositContract(t *testing.T) {
|
||||
@@ -715,3 +718,35 @@ func TestGetSpec_BlobSchedule_NotFulu(t *testing.T) {
|
||||
_, exists := data["BLOB_SCHEDULE"]
|
||||
require.Equal(t, false, exists)
|
||||
}
|
||||
|
||||
func TestConvertValueForJSON_NoErrorLogsForStrings(t *testing.T) {
|
||||
logHook := logTest.NewLocal(log.StandardLogger())
|
||||
defer logHook.Reset()
|
||||
|
||||
stringTestCases := []struct {
|
||||
tag string
|
||||
value string
|
||||
}{
|
||||
{"CONFIG_NAME", "mainnet"},
|
||||
{"PRESET_BASE", "mainnet"},
|
||||
{"DEPOSIT_CONTRACT_ADDRESS", "0x00000000219ab540356cBB839Cbe05303d7705Fa"},
|
||||
{"TERMINAL_TOTAL_DIFFICULTY", "58750000000000000000000"},
|
||||
}
|
||||
|
||||
for _, tc := range stringTestCases {
|
||||
t.Run(tc.tag, func(t *testing.T) {
|
||||
logHook.Reset()
|
||||
|
||||
// Convert the string value
|
||||
v := reflect.ValueOf(tc.value)
|
||||
result := convertValueForJSON(v, tc.tag)
|
||||
|
||||
// Verify the result is correct
|
||||
require.Equal(t, tc.value, result)
|
||||
|
||||
// Verify NO error was logged about unsupported field kind
|
||||
require.LogsDoNotContain(t, logHook, "Unsupported config field kind")
|
||||
require.LogsDoNotContain(t, logHook, "kind=string")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,6 +266,8 @@ type WriteOnlyEth1Data interface {
|
||||
SetEth1DepositIndex(val uint64) error
|
||||
ExitEpochAndUpdateChurn(exitBalance primitives.Gwei) (primitives.Epoch, error)
|
||||
ExitEpochAndUpdateChurnForTotalBal(totalActiveBalance primitives.Gwei, exitBalance primitives.Gwei) (primitives.Epoch, error)
|
||||
SetExitBalanceToConsume(val primitives.Gwei) error
|
||||
SetEarliestExitEpoch(val primitives.Epoch) error
|
||||
}
|
||||
|
||||
// WriteOnlyValidators defines a struct which only has write access to validators methods.
|
||||
@@ -333,6 +335,7 @@ type WriteOnlyWithdrawals interface {
|
||||
DequeuePendingPartialWithdrawals(num uint64) error
|
||||
SetNextWithdrawalIndex(i uint64) error
|
||||
SetNextWithdrawalValidatorIndex(i primitives.ValidatorIndex) error
|
||||
SetPendingPartialWithdrawals(val []*ethpb.PendingPartialWithdrawal) error
|
||||
}
|
||||
|
||||
type WriteOnlyConsolidations interface {
|
||||
|
||||
@@ -91,3 +91,33 @@ func (b *BeaconState) exitEpochAndUpdateChurn(totalActiveBalance primitives.Gwei
|
||||
|
||||
return b.earliestExitEpoch, nil
|
||||
}
|
||||
|
||||
// SetExitBalanceToConsume sets the exit balance to consume. This method mutates the state.
|
||||
func (b *BeaconState) SetExitBalanceToConsume(exitBalanceToConsume primitives.Gwei) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetExitBalanceToConsume", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.exitBalanceToConsume = exitBalanceToConsume
|
||||
b.markFieldAsDirty(types.ExitBalanceToConsume)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetEarliestExitEpoch sets the earliest exit epoch. This method mutates the state.
|
||||
func (b *BeaconState) SetEarliestExitEpoch(earliestExitEpoch primitives.Epoch) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetEarliestExitEpoch", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.earliestExitEpoch = earliestExitEpoch
|
||||
b.markFieldAsDirty(types.EarliestExitEpoch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -100,3 +100,24 @@ func (b *BeaconState) DequeuePendingPartialWithdrawals(n uint64) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPendingPartialWithdrawals sets the pending partial withdrawals. This method mutates the state.
|
||||
func (b *BeaconState) SetPendingPartialWithdrawals(pendingPartialWithdrawals []*eth.PendingPartialWithdrawal) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetPendingPartialWithdrawals", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if pendingPartialWithdrawals == nil {
|
||||
return errors.New("cannot set nil pending partial withdrawals")
|
||||
}
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals].MinusRef()
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1)
|
||||
|
||||
b.pendingPartialWithdrawals = pendingPartialWithdrawals
|
||||
b.markFieldAsDirty(types.PendingPartialWithdrawals)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -650,6 +650,11 @@ func InitializeFromProtoUnsafeFulu(st *ethpb.BeaconStateFulu) (state.BeaconState
|
||||
for i, v := range st.ProposerLookahead {
|
||||
proposerLookahead[i] = primitives.ValidatorIndex(v)
|
||||
}
|
||||
// Proposer lookahead must be exactly 2 * SLOTS_PER_EPOCH in length. We fill in with zeroes instead of erroring out here
|
||||
for i := len(proposerLookahead); i < 2*fieldparams.SlotsPerEpoch; i++ {
|
||||
proposerLookahead = append(proposerLookahead, 0)
|
||||
}
|
||||
|
||||
fieldCount := params.BeaconConfig().BeaconStateFuluFieldCount
|
||||
b := &BeaconState{
|
||||
version: version.Fulu,
|
||||
|
||||
3
changelog/james-prysm_fix-config-parsing.md
Normal file
3
changelog/james-prysm_fix-config-parsing.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fixing Unsupported config field kind; value forwarded verbatim errors for type string.
|
||||
2
changelog/kasey_log-invalid-ee-root.md
Normal file
2
changelog/kasey_log-invalid-ee-root.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Adding context to logs around execution engine notification failures.
|
||||
3
changelog/potuz_hdiff_diff_type.md
Normal file
3
changelog/potuz_hdiff_diff_type.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add native state diff type and marshalling functions
|
||||
2
changelog/pvl-slasherkv-timeout.md
Normal file
2
changelog/pvl-slasherkv-timeout.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Added a 1 minute timeout on PruneAttestationOnEpoch operations to prevent very large bolt transactions
|
||||
3
changelog/pvl-strip.md
Normal file
3
changelog/pvl-strip.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Bazel builds with `--config=release` now properly apply `--strip=always` to strip debug symbols from the release assets.
|
||||
3
changelog/sahil-4555-refactor-to-reflect-typefor.md
Normal file
3
changelog/sahil-4555-refactor-to-reflect-typefor.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Replaced reflect.TypeOf with reflect.TypeFor
|
||||
3
changelog/syjn99_ssz-ql-nested-list.md
Normal file
3
changelog/syjn99_ssz-ql-nested-list.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- SSZ-QL: Support nested `List` type (e.g., `ExecutionPayload.Transactions`)
|
||||
3
changelog/ttsao_simplify-golangci-config.md
Normal file
3
changelog/ttsao_simplify-golangci-config.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Update golangci-lint config to enable only basic linters that currently pass
|
||||
@@ -42,6 +42,12 @@ func NewWrappedExecutionData(v proto.Message) (interfaces.ExecutionData, error)
|
||||
return WrappedExecutionPayloadDeneb(pbStruct.Payload)
|
||||
case *enginev1.ExecutionBundleFulu:
|
||||
return WrappedExecutionPayloadDeneb(pbStruct.Payload)
|
||||
case *enginev1.ExecutionPayloadHeader:
|
||||
return WrappedExecutionPayloadHeader(pbStruct)
|
||||
case *enginev1.ExecutionPayloadHeaderCapella:
|
||||
return WrappedExecutionPayloadHeaderCapella(pbStruct)
|
||||
case *enginev1.ExecutionPayloadHeaderDeneb:
|
||||
return WrappedExecutionPayloadHeaderDeneb(pbStruct)
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrUnsupportedVersion, "type %T", pbStruct)
|
||||
}
|
||||
|
||||
56
consensus-types/hdiff/BUILD.bazel
Normal file
56
consensus-types/hdiff/BUILD.bazel
Normal file
@@ -0,0 +1,56 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["state_diff.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/consensus-types/hdiff",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/capella:go_default_library",
|
||||
"//beacon-chain/core/deneb:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/fulu:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/helpers:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"fuzz_test.go",
|
||||
"property_test.go",
|
||||
"security_test.go",
|
||||
"state_diff_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
491
consensus-types/hdiff/fuzz_test.go
Normal file
491
consensus-types/hdiff/fuzz_test.go
Normal file
@@ -0,0 +1,491 @@
|
||||
package hdiff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
// FuzzNewHdiff tests parsing variations of realistic diffs
|
||||
func FuzzNewHdiff(f *testing.F) {
|
||||
// Add seed corpus with various valid diffs from realistic scenarios
|
||||
sizes := []uint64{8, 16, 32}
|
||||
for _, size := range sizes {
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, size)
|
||||
|
||||
// Create various realistic target states
|
||||
scenarios := []string{"slot_change", "balance_change", "validator_change", "multiple_changes"}
|
||||
for _, scenario := range scenarios {
|
||||
target := source.Copy()
|
||||
|
||||
switch scenario {
|
||||
case "slot_change":
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
case "balance_change":
|
||||
balances := target.Balances()
|
||||
if len(balances) > 0 {
|
||||
balances[0] += 1000000000
|
||||
_ = target.SetBalances(balances)
|
||||
}
|
||||
case "validator_change":
|
||||
validators := target.Validators()
|
||||
if len(validators) > 0 {
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
case "multiple_changes":
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
balances := target.Balances()
|
||||
validators := target.Validators()
|
||||
if len(balances) > 0 && len(validators) > 0 {
|
||||
balances[0] += 2000000000
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetBalances(balances)
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
}
|
||||
|
||||
validDiff, err := Diff(source, target)
|
||||
if err == nil {
|
||||
f.Add(validDiff.StateDiff, validDiff.ValidatorDiffs, validDiff.BalancesDiff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, stateDiff, validatorDiffs, balancesDiff []byte) {
|
||||
// Limit input sizes to reasonable bounds
|
||||
if len(stateDiff) > 5000 || len(validatorDiffs) > 5000 || len(balancesDiff) > 5000 {
|
||||
return
|
||||
}
|
||||
|
||||
input := HdiffBytes{
|
||||
StateDiff: stateDiff,
|
||||
ValidatorDiffs: validatorDiffs,
|
||||
BalancesDiff: balancesDiff,
|
||||
}
|
||||
|
||||
// Test parsing - should not panic even with corrupted but bounded data
|
||||
_, err := newHdiff(input)
|
||||
_ = err // Expected to fail with corrupted data
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzNewStateDiff tests the newStateDiff function with random compressed input
|
||||
func FuzzNewStateDiff(f *testing.F) {
|
||||
// Add seed corpus
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, 16)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
|
||||
diff, err := diffToState(source, target)
|
||||
if err == nil {
|
||||
serialized := diff.serialize()
|
||||
f.Add(serialized)
|
||||
}
|
||||
|
||||
// Add edge cases
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01})
|
||||
f.Add([]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07})
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("newStateDiff panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Should never panic, only return error
|
||||
_, err := newStateDiff(data)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzNewValidatorDiffs tests validator diff deserialization
|
||||
func FuzzNewValidatorDiffs(f *testing.F) {
|
||||
// Add seed corpus
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, 8)
|
||||
target := source.Copy()
|
||||
vals := target.Validators()
|
||||
if len(vals) > 0 {
|
||||
modifiedVal := ðpb.Validator{
|
||||
PublicKey: vals[0].PublicKey,
|
||||
WithdrawalCredentials: vals[0].WithdrawalCredentials,
|
||||
EffectiveBalance: vals[0].EffectiveBalance + 1000,
|
||||
Slashed: !vals[0].Slashed,
|
||||
ActivationEligibilityEpoch: vals[0].ActivationEligibilityEpoch,
|
||||
ActivationEpoch: vals[0].ActivationEpoch,
|
||||
ExitEpoch: vals[0].ExitEpoch,
|
||||
WithdrawableEpoch: vals[0].WithdrawableEpoch,
|
||||
}
|
||||
vals[0] = modifiedVal
|
||||
_ = target.SetValidators(vals)
|
||||
|
||||
// Create a simple diff for fuzzing - we'll just use raw bytes
|
||||
_, err := diffToVals(source, target)
|
||||
if err == nil {
|
||||
// Add some realistic validator diff bytes for the corpus
|
||||
f.Add([]byte{1, 0, 0, 0, 0, 0, 0, 0}) // Simple validator diff
|
||||
}
|
||||
}
|
||||
|
||||
// Add edge cases
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01, 0x02, 0x03, 0x04})
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("newValidatorDiffs panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err := newValidatorDiffs(data)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzNewBalancesDiff tests balance diff deserialization
|
||||
func FuzzNewBalancesDiff(f *testing.F) {
|
||||
// Add seed corpus
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, 8)
|
||||
target := source.Copy()
|
||||
balances := target.Balances()
|
||||
if len(balances) > 0 {
|
||||
balances[0] += 1000
|
||||
_ = target.SetBalances(balances)
|
||||
|
||||
// Create a simple diff for fuzzing - we'll just use raw bytes
|
||||
_, err := diffToBalances(source, target)
|
||||
if err == nil {
|
||||
// Add some realistic balance diff bytes for the corpus
|
||||
f.Add([]byte{1, 0, 0, 0, 0, 0, 0, 0}) // Simple balance diff
|
||||
}
|
||||
}
|
||||
|
||||
// Add edge cases
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08})
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("newBalancesDiff panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err := newBalancesDiff(data)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzApplyDiff tests applying variations of valid diffs
|
||||
func FuzzApplyDiff(f *testing.F) {
|
||||
// Test with realistic state variations, not random data
|
||||
ctx := context.Background()
|
||||
|
||||
// Add seed corpus with various valid scenarios
|
||||
sizes := []uint64{8, 16, 32, 64}
|
||||
for _, size := range sizes {
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, size)
|
||||
target := source.Copy()
|
||||
|
||||
// Different types of realistic changes
|
||||
scenarios := []func(){
|
||||
func() { _ = target.SetSlot(source.Slot() + 1) }, // Slot change
|
||||
func() { // Balance change
|
||||
balances := target.Balances()
|
||||
if len(balances) > 0 {
|
||||
balances[0] += 1000000000 // 1 ETH
|
||||
_ = target.SetBalances(balances)
|
||||
}
|
||||
},
|
||||
func() { // Validator change
|
||||
validators := target.Validators()
|
||||
if len(validators) > 0 {
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
testTarget := source.Copy()
|
||||
scenario()
|
||||
|
||||
validDiff, err := Diff(source, testTarget)
|
||||
if err == nil {
|
||||
f.Add(validDiff.StateDiff, validDiff.ValidatorDiffs, validDiff.BalancesDiff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, stateDiff, validatorDiffs, balancesDiff []byte) {
|
||||
// Only test with reasonable sized inputs
|
||||
if len(stateDiff) > 10000 || len(validatorDiffs) > 10000 || len(balancesDiff) > 10000 {
|
||||
return
|
||||
}
|
||||
|
||||
// Create fresh source state for each test
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 8)
|
||||
|
||||
diff := HdiffBytes{
|
||||
StateDiff: stateDiff,
|
||||
ValidatorDiffs: validatorDiffs,
|
||||
BalancesDiff: balancesDiff,
|
||||
}
|
||||
|
||||
// Apply diff - errors are expected for fuzzed data
|
||||
_, err := ApplyDiff(ctx, source, diff)
|
||||
_ = err // Expected to fail with invalid data
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzReadPendingAttestation tests the pending attestation deserialization
|
||||
func FuzzReadPendingAttestation(f *testing.F) {
|
||||
// Add edge cases - this function is particularly vulnerable
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}) // 8 bytes
|
||||
f.Add(make([]byte, 200)) // Larger than expected
|
||||
|
||||
// Add a case with large reported length
|
||||
largeLength := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(largeLength, 0xFFFFFFFF) // Large bits length
|
||||
f.Add(largeLength)
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("readPendingAttestation panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Make a copy since the function modifies the slice
|
||||
dataCopy := make([]byte, len(data))
|
||||
copy(dataCopy, data)
|
||||
|
||||
_, err := readPendingAttestation(&dataCopy)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzKmpIndex tests the KMP algorithm implementation
|
||||
func FuzzKmpIndex(f *testing.F) {
|
||||
// Test with integer pointers to match the actual usage
|
||||
f.Add(0, "1,2,3", "1,2,3,4,5")
|
||||
f.Add(3, "1,2,3", "1,2,3,1,2,3")
|
||||
f.Add(0, "", "1,2,3")
|
||||
|
||||
f.Fuzz(func(t *testing.T, lens int, patternStr string, textStr string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("kmpIndex panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Parse comma-separated strings into int slices
|
||||
var pattern, text []int
|
||||
if patternStr != "" {
|
||||
for _, s := range strings.Split(patternStr, ",") {
|
||||
if val, err := strconv.Atoi(strings.TrimSpace(s)); err == nil {
|
||||
pattern = append(pattern, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
if textStr != "" {
|
||||
for _, s := range strings.Split(textStr, ",") {
|
||||
if val, err := strconv.Atoi(strings.TrimSpace(s)); err == nil {
|
||||
text = append(text, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to pointer slices as used in actual code
|
||||
patternPtrs := make([]*int, len(pattern))
|
||||
for i := range pattern {
|
||||
val := pattern[i]
|
||||
patternPtrs[i] = &val
|
||||
}
|
||||
|
||||
textPtrs := make([]*int, len(text))
|
||||
for i := range text {
|
||||
val := text[i]
|
||||
textPtrs[i] = &val
|
||||
}
|
||||
|
||||
integerEquals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
// Clamp lens to reasonable range to avoid infinite loops
|
||||
if lens < 0 {
|
||||
lens = 0
|
||||
}
|
||||
if lens > len(textPtrs) {
|
||||
lens = len(textPtrs)
|
||||
}
|
||||
|
||||
result := kmpIndex(lens, textPtrs, integerEquals)
|
||||
|
||||
// Basic sanity check
|
||||
if result < 0 || result > lens {
|
||||
t.Errorf("kmpIndex returned invalid result: %d for lens=%d", result, lens)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzComputeLPS tests the LPS computation for KMP
|
||||
func FuzzComputeLPS(f *testing.F) {
|
||||
// Add seed cases
|
||||
f.Add("1,2,1")
|
||||
f.Add("1,1,1")
|
||||
f.Add("1,2,3,4")
|
||||
f.Add("")
|
||||
|
||||
f.Fuzz(func(t *testing.T, patternStr string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("computeLPS panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Parse comma-separated string into int slice
|
||||
var pattern []int
|
||||
if patternStr != "" {
|
||||
for _, s := range strings.Split(patternStr, ",") {
|
||||
if val, err := strconv.Atoi(strings.TrimSpace(s)); err == nil {
|
||||
pattern = append(pattern, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to pointer slice
|
||||
patternPtrs := make([]*int, len(pattern))
|
||||
for i := range pattern {
|
||||
val := pattern[i]
|
||||
patternPtrs[i] = &val
|
||||
}
|
||||
|
||||
integerEquals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
result := computeLPS(patternPtrs, integerEquals)
|
||||
|
||||
// Verify result length matches input
|
||||
if len(result) != len(pattern) {
|
||||
t.Errorf("computeLPS returned wrong length: got %d, expected %d", len(result), len(pattern))
|
||||
}
|
||||
|
||||
// Verify all LPS values are non-negative and within bounds
|
||||
for i, lps := range result {
|
||||
if lps < 0 || lps > i {
|
||||
t.Errorf("Invalid LPS value at index %d: %d", i, lps)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzDiffToBalances tests balance diff computation
|
||||
func FuzzDiffToBalances(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, sourceData, targetData []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("diffToBalances panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Convert byte data to balance arrays
|
||||
var sourceBalances, targetBalances []uint64
|
||||
|
||||
// Parse source balances (8 bytes per uint64)
|
||||
for i := 0; i+7 < len(sourceData) && len(sourceBalances) < 100; i += 8 {
|
||||
balance := binary.LittleEndian.Uint64(sourceData[i : i+8])
|
||||
sourceBalances = append(sourceBalances, balance)
|
||||
}
|
||||
|
||||
// Parse target balances
|
||||
for i := 0; i+7 < len(targetData) && len(targetBalances) < 100; i += 8 {
|
||||
balance := binary.LittleEndian.Uint64(targetData[i : i+8])
|
||||
targetBalances = append(targetBalances, balance)
|
||||
}
|
||||
|
||||
// Create states with the provided balances
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
target, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
|
||||
if len(sourceBalances) > 0 {
|
||||
_ = source.SetBalances(sourceBalances)
|
||||
}
|
||||
if len(targetBalances) > 0 {
|
||||
_ = target.SetBalances(targetBalances)
|
||||
}
|
||||
|
||||
result, err := diffToBalances(source, target)
|
||||
|
||||
// If no error, verify result consistency
|
||||
if err == nil && len(result) > 0 {
|
||||
// Result length should match target length
|
||||
if len(result) != len(target.Balances()) {
|
||||
t.Errorf("diffToBalances result length mismatch: got %d, expected %d",
|
||||
len(result), len(target.Balances()))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzValidatorsEqual tests validator comparison
|
||||
func FuzzValidatorsEqual(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("validatorsEqual panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create two validators and fuzz their fields
|
||||
if len(data) < 16 {
|
||||
return
|
||||
}
|
||||
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 2)
|
||||
validators := source.Validators()
|
||||
if len(validators) < 2 {
|
||||
return
|
||||
}
|
||||
|
||||
val1 := validators[0]
|
||||
val2 := validators[1]
|
||||
|
||||
// Modify validator fields based on fuzz data
|
||||
if len(data) > 0 && data[0]%2 == 0 {
|
||||
val2.EffectiveBalance = val1.EffectiveBalance + uint64(data[0])
|
||||
}
|
||||
if len(data) > 1 && data[1]%2 == 0 {
|
||||
val2.Slashed = !val1.Slashed
|
||||
}
|
||||
|
||||
// Create ReadOnlyValidator wrappers if needed
|
||||
// Since validatorsEqual expects ReadOnlyValidator interface,
|
||||
// we'll skip this test for now as it requires state wrapper implementation
|
||||
_ = val1
|
||||
_ = val2
|
||||
})
|
||||
}
|
||||
390
consensus-types/hdiff/property_test.go
Normal file
390
consensus-types/hdiff/property_test.go
Normal file
@@ -0,0 +1,390 @@
|
||||
package hdiff
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
// PropertyTestRoundTrip verifies that diff->apply is idempotent with realistic data
|
||||
func FuzzPropertyRoundTrip(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, slotDelta uint64, balanceData []byte, validatorData []byte) {
|
||||
// Limit to realistic ranges
|
||||
if slotDelta > 32 { // Max one epoch
|
||||
slotDelta = slotDelta % 32
|
||||
}
|
||||
|
||||
// Convert byte data to realistic deltas and changes
|
||||
var balanceDeltas []int64
|
||||
var validatorChanges []bool
|
||||
|
||||
// Parse balance deltas - limit to realistic amounts (8 bytes per int64)
|
||||
for i := 0; i+7 < len(balanceData) && len(balanceDeltas) < 20; i += 8 {
|
||||
delta := int64(binary.LittleEndian.Uint64(balanceData[i : i+8]))
|
||||
// Keep deltas realistic (max 10 ETH change)
|
||||
if delta > 10000000000 {
|
||||
delta = delta % 10000000000
|
||||
}
|
||||
if delta < -10000000000 {
|
||||
delta = -((-delta) % 10000000000)
|
||||
}
|
||||
balanceDeltas = append(balanceDeltas, delta)
|
||||
}
|
||||
|
||||
// Parse validator changes (1 byte per bool) - limit to small number
|
||||
for i := 0; i < len(validatorData) && len(validatorChanges) < 10; i++ {
|
||||
validatorChanges = append(validatorChanges, validatorData[i]%2 == 0)
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
// Create source state with reasonable size
|
||||
validatorCount := uint64(len(validatorChanges) + 8) // Minimum 8 validators
|
||||
if validatorCount > 64 {
|
||||
validatorCount = 64 // Cap at 64 for performance
|
||||
}
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, validatorCount)
|
||||
|
||||
// Create target state with modifications
|
||||
target := source.Copy()
|
||||
|
||||
// Apply slot change
|
||||
_ = target.SetSlot(source.Slot() + primitives.Slot(slotDelta))
|
||||
|
||||
// Apply realistic balance changes
|
||||
if len(balanceDeltas) > 0 {
|
||||
balances := target.Balances()
|
||||
for i, delta := range balanceDeltas {
|
||||
if i >= len(balances) {
|
||||
break
|
||||
}
|
||||
// Apply realistic balance changes with safe bounds
|
||||
if delta < 0 {
|
||||
if uint64(-delta) > balances[i] {
|
||||
balances[i] = 0 // Can't go below 0
|
||||
} else {
|
||||
balances[i] -= uint64(-delta)
|
||||
}
|
||||
} else {
|
||||
// Cap at reasonable maximum (1000 ETH)
|
||||
maxBalance := uint64(1000000000000) // 1000 ETH in Gwei
|
||||
if balances[i]+uint64(delta) > maxBalance {
|
||||
balances[i] = maxBalance
|
||||
} else {
|
||||
balances[i] += uint64(delta)
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = target.SetBalances(balances)
|
||||
}
|
||||
|
||||
// Apply realistic validator changes
|
||||
if len(validatorChanges) > 0 {
|
||||
validators := target.Validators()
|
||||
for i, shouldChange := range validatorChanges {
|
||||
if i >= len(validators) {
|
||||
break
|
||||
}
|
||||
if shouldChange {
|
||||
// Make realistic changes - small effective balance adjustments
|
||||
validators[i].EffectiveBalance += 1000000000 // 1 ETH
|
||||
}
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
|
||||
// Create diff
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
// If diff creation fails, that's acceptable for malformed inputs
|
||||
return
|
||||
}
|
||||
|
||||
// Apply diff
|
||||
result, err := ApplyDiff(ctx, source, diff)
|
||||
if err != nil {
|
||||
// If diff application fails, that's acceptable
|
||||
return
|
||||
}
|
||||
|
||||
// Verify round-trip property: source + diff = target
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
|
||||
// Verify balance consistency
|
||||
targetBalances := target.Balances()
|
||||
resultBalances := result.Balances()
|
||||
require.Equal(t, len(targetBalances), len(resultBalances))
|
||||
for i := range targetBalances {
|
||||
require.Equal(t, targetBalances[i], resultBalances[i], "Balance mismatch at index %d", i)
|
||||
}
|
||||
|
||||
// Verify validator consistency
|
||||
targetVals := target.Validators()
|
||||
resultVals := result.Validators()
|
||||
require.Equal(t, len(targetVals), len(resultVals))
|
||||
for i := range targetVals {
|
||||
require.Equal(t, targetVals[i].Slashed, resultVals[i].Slashed, "Validator slashing mismatch at index %d", i)
|
||||
require.Equal(t, targetVals[i].EffectiveBalance, resultVals[i].EffectiveBalance, "Validator balance mismatch at index %d", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestReasonablePerformance verifies operations complete quickly with realistic data
|
||||
func FuzzPropertyResourceBounds(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, validatorCount uint8, slotDelta uint8, changeCount uint8) {
|
||||
// Use realistic parameters
|
||||
validators := uint64(validatorCount%64 + 8) // 8-71 validators
|
||||
slots := uint64(slotDelta % 32) // 0-31 slots
|
||||
changes := int(changeCount % 10) // 0-9 changes
|
||||
|
||||
// Create realistic states
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, validators)
|
||||
target := source.Copy()
|
||||
|
||||
// Apply realistic changes
|
||||
_ = target.SetSlot(source.Slot() + primitives.Slot(slots))
|
||||
|
||||
if changes > 0 {
|
||||
validatorList := target.Validators()
|
||||
for i := 0; i < changes && i < len(validatorList); i++ {
|
||||
validatorList[i].EffectiveBalance += 1000000000 // 1 ETH
|
||||
}
|
||||
_ = target.SetValidators(validatorList)
|
||||
}
|
||||
|
||||
// Operations should complete quickly
|
||||
start := time.Now()
|
||||
diff, err := Diff(source, target)
|
||||
duration := time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
// Should be fast
|
||||
require.Equal(t, true, duration < time.Second, "Diff creation too slow: %v", duration)
|
||||
|
||||
// Apply should also be fast
|
||||
start = time.Now()
|
||||
_, err = ApplyDiff(t.Context(), source, diff)
|
||||
duration = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
require.Equal(t, true, duration < time.Second, "Diff application too slow: %v", duration)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestDiffSize verifies that diffs are smaller than full states for typical cases
|
||||
func FuzzPropertyDiffEfficiency(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, slotDelta uint64, numChanges uint8) {
|
||||
if slotDelta > 100 {
|
||||
slotDelta = slotDelta % 100
|
||||
}
|
||||
if numChanges > 10 {
|
||||
numChanges = numChanges % 10
|
||||
}
|
||||
|
||||
// Create states with small differences
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 64)
|
||||
target := source.Copy()
|
||||
|
||||
_ = target.SetSlot(source.Slot() + primitives.Slot(slotDelta))
|
||||
|
||||
// Make a few small changes
|
||||
if numChanges > 0 {
|
||||
validators := target.Validators()
|
||||
for i := uint8(0); i < numChanges && int(i) < len(validators); i++ {
|
||||
validators[i].EffectiveBalance += 1000
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
|
||||
// Create diff
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// For small changes, diff should be much smaller than full state
|
||||
sourceSSZ, err := source.MarshalSSZ()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
diffSize := len(diff.StateDiff) + len(diff.ValidatorDiffs) + len(diff.BalancesDiff)
|
||||
|
||||
// Diff should be smaller than full state for small changes
|
||||
if numChanges <= 5 && slotDelta <= 10 {
|
||||
require.Equal(t, true, diffSize < len(sourceSSZ)/2,
|
||||
"Diff size %d should be less than half of state size %d", diffSize, len(sourceSSZ))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestBalanceConservation verifies that balance operations don't create/destroy value unexpectedly
|
||||
func FuzzPropertyBalanceConservation(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, balanceData []byte) {
|
||||
// Convert byte data to balance changes
|
||||
var balanceChanges []int64
|
||||
for i := 0; i+7 < len(balanceData) && len(balanceChanges) < 50; i += 8 {
|
||||
change := int64(binary.LittleEndian.Uint64(balanceData[i : i+8]))
|
||||
balanceChanges = append(balanceChanges, change)
|
||||
}
|
||||
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, uint64(len(balanceChanges)+10))
|
||||
originalBalances := source.Balances()
|
||||
|
||||
// Calculate total before
|
||||
var totalBefore uint64
|
||||
for _, balance := range originalBalances {
|
||||
totalBefore += balance
|
||||
}
|
||||
|
||||
// Apply balance changes via diff system
|
||||
target := source.Copy()
|
||||
targetBalances := target.Balances()
|
||||
|
||||
var totalDelta int64
|
||||
for i, delta := range balanceChanges {
|
||||
if i >= len(targetBalances) {
|
||||
break
|
||||
}
|
||||
|
||||
// Prevent underflow
|
||||
if delta < 0 && uint64(-delta) > targetBalances[i] {
|
||||
totalDelta += int64(targetBalances[i]) // Lost amount
|
||||
targetBalances[i] = 0
|
||||
} else if delta < 0 {
|
||||
targetBalances[i] -= uint64(-delta)
|
||||
totalDelta += delta
|
||||
} else {
|
||||
// Prevent overflow
|
||||
if uint64(delta) > math.MaxUint64-targetBalances[i] {
|
||||
gained := math.MaxUint64 - targetBalances[i]
|
||||
totalDelta += int64(gained)
|
||||
targetBalances[i] = math.MaxUint64
|
||||
} else {
|
||||
targetBalances[i] += uint64(delta)
|
||||
totalDelta += delta
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = target.SetBalances(targetBalances)
|
||||
|
||||
// Apply through diff system
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ApplyDiff(t.Context(), source, diff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate total after
|
||||
resultBalances := result.Balances()
|
||||
var totalAfter uint64
|
||||
for _, balance := range resultBalances {
|
||||
totalAfter += balance
|
||||
}
|
||||
|
||||
// Verify conservation (accounting for intended changes)
|
||||
expectedTotal := totalBefore
|
||||
if totalDelta >= 0 {
|
||||
expectedTotal += uint64(totalDelta)
|
||||
} else {
|
||||
if uint64(-totalDelta) <= expectedTotal {
|
||||
expectedTotal -= uint64(-totalDelta)
|
||||
} else {
|
||||
expectedTotal = 0
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, expectedTotal, totalAfter,
|
||||
"Balance conservation violated: before=%d, delta=%d, expected=%d, actual=%d",
|
||||
totalBefore, totalDelta, expectedTotal, totalAfter)
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestMonotonicSlot verifies slot only increases
|
||||
func FuzzPropertyMonotonicSlot(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, slotDelta uint64) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 16)
|
||||
target := source.Copy()
|
||||
|
||||
targetSlot := source.Slot() + primitives.Slot(slotDelta)
|
||||
_ = target.SetSlot(targetSlot)
|
||||
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ApplyDiff(t.Context(), source, diff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Slot should never decrease
|
||||
require.Equal(t, true, result.Slot() >= source.Slot(),
|
||||
"Slot decreased from %d to %d", source.Slot(), result.Slot())
|
||||
|
||||
// Slot should match target
|
||||
require.Equal(t, targetSlot, result.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestValidatorIndexIntegrity verifies validator indices remain consistent
|
||||
func FuzzPropertyValidatorIndices(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, changeData []byte) {
|
||||
// Convert byte data to boolean changes
|
||||
var changes []bool
|
||||
for i := 0; i < len(changeData) && len(changes) < 20; i++ {
|
||||
changes = append(changes, changeData[i]%2 == 0)
|
||||
}
|
||||
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, uint64(len(changes)+5))
|
||||
target := source.Copy()
|
||||
|
||||
// Apply changes
|
||||
validators := target.Validators()
|
||||
for i, shouldChange := range changes {
|
||||
if i >= len(validators) {
|
||||
break
|
||||
}
|
||||
if shouldChange {
|
||||
validators[i].EffectiveBalance += 1000
|
||||
}
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ApplyDiff(t.Context(), source, diff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Validator count should not decrease
|
||||
require.Equal(t, true, len(result.Validators()) >= len(source.Validators()),
|
||||
"Validator count decreased from %d to %d", len(source.Validators()), len(result.Validators()))
|
||||
|
||||
// Public keys should be preserved for existing validators
|
||||
sourceVals := source.Validators()
|
||||
resultVals := result.Validators()
|
||||
for i := range sourceVals {
|
||||
if i < len(resultVals) {
|
||||
require.Equal(t, sourceVals[i].PublicKey, resultVals[i].PublicKey,
|
||||
"Public key changed at validator index %d", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
392
consensus-types/hdiff/security_test.go
Normal file
392
consensus-types/hdiff/security_test.go
Normal file
@@ -0,0 +1,392 @@
|
||||
package hdiff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
// TestIntegerOverflowProtection tests protection against balance overflow attacks
|
||||
func TestIntegerOverflowProtection(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 8)
|
||||
|
||||
// Test balance overflow in diffToBalances - use realistic values
|
||||
t.Run("balance_diff_overflow", func(t *testing.T) {
|
||||
target := source.Copy()
|
||||
balances := target.Balances()
|
||||
|
||||
// Set high but realistic balance values (32 ETH in Gwei = 32e9)
|
||||
balances[0] = 32000000000 // 32 ETH
|
||||
balances[1] = 64000000000 // 64 ETH
|
||||
_ = target.SetBalances(balances)
|
||||
|
||||
// This should work fine with realistic values
|
||||
diffs, err := diffToBalances(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the diffs are reasonable
|
||||
require.Equal(t, true, len(diffs) > 0, "Should have balance diffs")
|
||||
})
|
||||
|
||||
// Test reasonable balance changes
|
||||
t.Run("realistic_balance_changes", func(t *testing.T) {
|
||||
// Create realistic balance changes (slashing, rewards)
|
||||
balancesDiff := []int64{1000000000, -500000000, 2000000000} // 1 ETH gain, 0.5 ETH loss, 2 ETH gain
|
||||
|
||||
// Apply to state with normal balances
|
||||
testSource := source.Copy()
|
||||
normalBalances := []uint64{32000000000, 32000000000, 32000000000} // 32 ETH each
|
||||
_ = testSource.SetBalances(normalBalances)
|
||||
|
||||
// This should work fine
|
||||
result, err := applyBalancesDiff(testSource, balancesDiff)
|
||||
require.NoError(t, err)
|
||||
|
||||
resultBalances := result.Balances()
|
||||
require.Equal(t, uint64(33000000000), resultBalances[0]) // 33 ETH
|
||||
require.Equal(t, uint64(31500000000), resultBalances[1]) // 31.5 ETH
|
||||
require.Equal(t, uint64(34000000000), resultBalances[2]) // 34 ETH
|
||||
})
|
||||
}
|
||||
|
||||
// TestReasonablePerformance tests that operations complete in reasonable time
|
||||
func TestReasonablePerformance(t *testing.T) {
|
||||
t.Run("large_state_performance", func(t *testing.T) {
|
||||
// Test with a large but realistic validator set
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 1000) // 1000 validators
|
||||
target := source.Copy()
|
||||
|
||||
// Make realistic changes
|
||||
_ = target.SetSlot(source.Slot() + 32) // One epoch
|
||||
validators := target.Validators()
|
||||
for i := 0; i < 100; i++ { // 10% of validators changed
|
||||
validators[i].EffectiveBalance += 1000000000 // 1 ETH change
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// Should complete quickly
|
||||
start := time.Now()
|
||||
diff, err := Diff(source, target)
|
||||
duration := time.Since(start)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, duration < time.Second, "Diff creation took too long: %v", duration)
|
||||
require.Equal(t, true, len(diff.StateDiff) > 0, "Should have state diff")
|
||||
})
|
||||
|
||||
t.Run("realistic_diff_application", func(t *testing.T) {
|
||||
// Test applying diffs to large states
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 500)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
|
||||
// Create and apply diff
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
start := time.Now()
|
||||
result, err := ApplyDiff(t.Context(), source, diff)
|
||||
duration := time.Since(start)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
require.Equal(t, true, duration < time.Second, "Diff application took too long: %v", duration)
|
||||
})
|
||||
}
|
||||
|
||||
// TestStateTransitionValidation tests realistic state transition scenarios
|
||||
func TestStateTransitionValidation(t *testing.T) {
|
||||
t.Run("validator_slashing_scenario", func(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 10)
|
||||
target := source.Copy()
|
||||
|
||||
// Simulate validator slashing (realistic scenario)
|
||||
validators := target.Validators()
|
||||
validators[0].Slashed = true
|
||||
validators[0].EffectiveBalance = 0 // Slashed validator loses balance
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// This should work fine
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ApplyDiff(t.Context(), source, diff)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result.Validators()[0].Slashed)
|
||||
require.Equal(t, uint64(0), result.Validators()[0].EffectiveBalance)
|
||||
})
|
||||
|
||||
t.Run("epoch_transition_scenario", func(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 64)
|
||||
target := source.Copy()
|
||||
|
||||
// Simulate epoch transition with multiple changes
|
||||
_ = target.SetSlot(source.Slot() + 32) // One epoch
|
||||
|
||||
// Some validators get rewards, others get penalties
|
||||
balances := target.Balances()
|
||||
for i := 0; i < len(balances); i++ {
|
||||
if i%2 == 0 {
|
||||
balances[i] += 100000000 // 0.1 ETH reward
|
||||
} else {
|
||||
if balances[i] > 50000000 {
|
||||
balances[i] -= 50000000 // 0.05 ETH penalty
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = target.SetBalances(balances)
|
||||
|
||||
// This should work smoothly
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ApplyDiff(t.Context(), source, diff)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
})
|
||||
|
||||
t.Run("consistent_state_root", func(t *testing.T) {
|
||||
// Test that diffs preserve state consistency
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 32)
|
||||
target := source.Copy()
|
||||
|
||||
// Make minimal changes
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
|
||||
// Diff and apply should be consistent
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ApplyDiff(t.Context(), source, diff)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Result should match target
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
require.Equal(t, len(target.Validators()), len(result.Validators()))
|
||||
require.Equal(t, len(target.Balances()), len(result.Balances()))
|
||||
})
|
||||
}
|
||||
|
||||
// TestSerializationRoundTrip tests serialization consistency
|
||||
func TestSerializationRoundTrip(t *testing.T) {
|
||||
t.Run("diff_serialization_consistency", func(t *testing.T) {
|
||||
// Test that serialization and deserialization are consistent
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 16)
|
||||
target := source.Copy()
|
||||
|
||||
// Make changes
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
validators := target.Validators()
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// Create diff
|
||||
diff1, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Deserialize and re-serialize
|
||||
hdiff, err := newHdiff(diff1)
|
||||
require.NoError(t, err)
|
||||
|
||||
diff2 := hdiff.serialize()
|
||||
|
||||
// Apply both diffs - should get same result
|
||||
result1, err := ApplyDiff(t.Context(), source, diff1)
|
||||
require.NoError(t, err)
|
||||
|
||||
result2, err := ApplyDiff(t.Context(), source, diff2)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, result1.Slot(), result2.Slot())
|
||||
require.Equal(t, result1.Validators()[0].EffectiveBalance, result2.Validators()[0].EffectiveBalance)
|
||||
})
|
||||
|
||||
t.Run("empty_diff_handling", func(t *testing.T) {
|
||||
// Test that empty diffs are handled correctly
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 8)
|
||||
target := source.Copy() // No changes
|
||||
|
||||
// Should create minimal diff
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Apply should work and return equivalent state
|
||||
result, err := ApplyDiff(t.Context(), source, diff)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, source.Slot(), result.Slot())
|
||||
require.Equal(t, len(source.Validators()), len(result.Validators()))
|
||||
})
|
||||
|
||||
t.Run("compression_efficiency", func(t *testing.T) {
|
||||
// Test that compression is working effectively
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 100)
|
||||
target := source.Copy()
|
||||
|
||||
// Make small changes
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
validators := target.Validators()
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// Create diff
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get full state size
|
||||
fullStateSSZ, err := target.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Diff should be much smaller than full state
|
||||
diffSize := len(diff.StateDiff) + len(diff.ValidatorDiffs) + len(diff.BalancesDiff)
|
||||
require.Equal(t, true, diffSize < len(fullStateSSZ)/2,
|
||||
"Diff should be smaller than full state: diff=%d, full=%d", diffSize, len(fullStateSSZ))
|
||||
})
|
||||
}
|
||||
|
||||
// TestKMPSecurity tests the KMP algorithm for security issues
|
||||
func TestKMPSecurity(t *testing.T) {
|
||||
t.Run("nil_pointer_handling", func(t *testing.T) {
|
||||
// Test with nil pointers in the pattern/text
|
||||
pattern := []*int{nil, nil, nil}
|
||||
text := []*int{nil, nil, nil, nil, nil}
|
||||
|
||||
equals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
// Should not panic - result can be any integer
|
||||
result := kmpIndex(len(pattern), text, equals)
|
||||
_ = result // Any result is valid, just ensure no panic
|
||||
})
|
||||
|
||||
t.Run("empty_pattern_edge_case", func(t *testing.T) {
|
||||
var pattern []*int
|
||||
text := []*int{new(int), new(int)}
|
||||
|
||||
equals := func(a, b *int) bool { return a == b }
|
||||
|
||||
result := kmpIndex(0, text, equals)
|
||||
require.Equal(t, 0, result, "Empty pattern should return 0")
|
||||
_ = pattern // Silence unused variable warning
|
||||
})
|
||||
|
||||
t.Run("realistic_pattern_performance", func(t *testing.T) {
|
||||
// Test with realistic sizes to ensure good performance
|
||||
realisticSize := 100 // More realistic for validator arrays
|
||||
pattern := make([]*int, realisticSize)
|
||||
text := make([]*int, realisticSize*2)
|
||||
|
||||
// Create realistic pattern
|
||||
for i := range pattern {
|
||||
val := i % 10 // More variation
|
||||
pattern[i] = &val
|
||||
}
|
||||
for i := range text {
|
||||
val := i % 10
|
||||
text[i] = &val
|
||||
}
|
||||
|
||||
equals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
result := kmpIndex(len(pattern), text, equals)
|
||||
duration := time.Since(start)
|
||||
|
||||
// Should complete quickly with realistic inputs
|
||||
require.Equal(t, true, duration < time.Second,
|
||||
"KMP took too long: %v", duration)
|
||||
_ = result // Any result is valid, just ensure performance is good
|
||||
})
|
||||
}
|
||||
|
||||
// TestConcurrencySafety tests thread safety of the hdiff operations
|
||||
func TestConcurrencySafety(t *testing.T) {
|
||||
t.Run("concurrent_diff_creation", func(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 32)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
|
||||
const numGoroutines = 10
|
||||
const iterations = 100
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numGoroutines*iterations)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
for j := 0; j < iterations; j++ {
|
||||
_, err := Diff(source, target)
|
||||
if err != nil {
|
||||
errors <- fmt.Errorf("worker %d iteration %d: %v", workerID, j, err)
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
// Check for any errors
|
||||
for err := range errors {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("concurrent_diff_application", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 16)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
const numGoroutines = 10
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Each goroutine needs its own copy of the source state
|
||||
localSource := source.Copy()
|
||||
_, err := ApplyDiff(ctx, localSource, diff)
|
||||
if err != nil {
|
||||
errors <- fmt.Errorf("worker %d: %v", workerID, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
// Check for any errors
|
||||
for err := range errors {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
2101
consensus-types/hdiff/state_diff.go
Normal file
2101
consensus-types/hdiff/state_diff.go
Normal file
File diff suppressed because it is too large
Load Diff
1219
consensus-types/hdiff/state_diff_test.go
Normal file
1219
consensus-types/hdiff/state_diff_test.go
Normal file
File diff suppressed because it is too large
Load Diff
4
consensus-types/hdiff/testdata/fuzz/FuzzKmpIndex/055799fc9491fac6
vendored
Normal file
4
consensus-types/hdiff/testdata/fuzz/FuzzKmpIndex/055799fc9491fac6
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
go test fuzz v1
|
||||
int(1)
|
||||
string("0")
|
||||
string("1,2,1,2")
|
||||
2
consensus-types/hdiff/testdata/fuzz/FuzzNewBalancesDiff/df3a74c14c223270
vendored
Normal file
2
consensus-types/hdiff/testdata/fuzz/FuzzNewBalancesDiff/df3a74c14c223270
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("\xfc\xee\xec\xf6\x0e\xb25\x1b}")
|
||||
4
consensus-types/hdiff/testdata/fuzz/FuzzNewHdiff/2ddba0f892c3b237
vendored
Normal file
4
consensus-types/hdiff/testdata/fuzz/FuzzNewHdiff/2ddba0f892c3b237
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
go test fuzz v1
|
||||
[]byte("\x8c\x8c\x8c\x8c\x8c\x00\x01\x00\x00")
|
||||
[]byte("p")
|
||||
[]byte("0")
|
||||
4
consensus-types/hdiff/testdata/fuzz/FuzzNewHdiff/8b6960781af86b7a
vendored
Normal file
4
consensus-types/hdiff/testdata/fuzz/FuzzNewHdiff/8b6960781af86b7a
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
go test fuzz v1
|
||||
[]byte("\x8c\x8c\x8c\x8c\x8c\x00\x01\x00\x00")
|
||||
[]byte("0")
|
||||
[]byte("0")
|
||||
2
consensus-types/hdiff/testdata/fuzz/FuzzNewStateDiff/35fd79ed1cb8d258
vendored
Normal file
2
consensus-types/hdiff/testdata/fuzz/FuzzNewStateDiff/35fd79ed1cb8d258
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("\xff\xff\xff\x7f")
|
||||
2
consensus-types/hdiff/testdata/fuzz/FuzzNewStateDiff/cbcd9abbac2c5e6d
vendored
Normal file
2
consensus-types/hdiff/testdata/fuzz/FuzzNewStateDiff/cbcd9abbac2c5e6d
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("\xed\xed\xed\xed\f\xee\xed\xedSI")
|
||||
2
consensus-types/hdiff/testdata/fuzz/FuzzNewValidatorDiffs/b58b84143dad8cb3
vendored
Normal file
2
consensus-types/hdiff/testdata/fuzz/FuzzNewValidatorDiffs/b58b84143dad8cb3
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("\t\x00\xbd\x1c\f\xcb\x00 \x00\x00\x00\x00")
|
||||
2
consensus-types/hdiff/testdata/fuzz/FuzzPropertyBalanceConservation/a855d24c40cdafa3
vendored
Normal file
2
consensus-types/hdiff/testdata/fuzz/FuzzPropertyBalanceConservation/a855d24c40cdafa3
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("0000000\xad")
|
||||
2
consensus-types/hdiff/testdata/fuzz/FuzzPropertyValidatorIndices/582528ddfad69eb5
vendored
Normal file
2
consensus-types/hdiff/testdata/fuzz/FuzzPropertyValidatorIndices/582528ddfad69eb5
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("0")
|
||||
2
consensus-types/hdiff/testdata/fuzz/FuzzReadPendingAttestation/a40f5c684fca518d
vendored
Normal file
2
consensus-types/hdiff/testdata/fuzz/FuzzReadPendingAttestation/a40f5c684fca518d
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("0000000\xff")
|
||||
9
consensus-types/helpers/BUILD.bazel
Normal file
9
consensus-types/helpers/BUILD.bazel
Normal file
@@ -0,0 +1,9 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["comparisons.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/consensus-types/helpers",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//proto/prysm/v1alpha1:go_default_library"],
|
||||
)
|
||||
109
consensus-types/helpers/comparisons.go
Normal file
109
consensus-types/helpers/comparisons.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func ForksEqual(s, t *ethpb.Fork) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if s.Epoch != t.Epoch {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.PreviousVersion, t.PreviousVersion) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(s.CurrentVersion, t.CurrentVersion)
|
||||
}
|
||||
|
||||
func BlockHeadersEqual(s, t *ethpb.BeaconBlockHeader) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if s.Slot != t.Slot {
|
||||
return false
|
||||
}
|
||||
if s.ProposerIndex != t.ProposerIndex {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.ParentRoot, t.ParentRoot) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.StateRoot, t.StateRoot) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(s.BodyRoot, t.BodyRoot)
|
||||
}
|
||||
|
||||
func Eth1DataEqual(s, t *ethpb.Eth1Data) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.DepositRoot, t.DepositRoot) {
|
||||
return false
|
||||
}
|
||||
if s.DepositCount != t.DepositCount {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(s.BlockHash, t.BlockHash)
|
||||
}
|
||||
|
||||
func PendingDepositsEqual(s, t *ethpb.PendingDeposit) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.PublicKey, t.PublicKey) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.WithdrawalCredentials, t.WithdrawalCredentials) {
|
||||
return false
|
||||
}
|
||||
if s.Amount != t.Amount {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.Signature, t.Signature) {
|
||||
return false
|
||||
}
|
||||
return s.Slot == t.Slot
|
||||
}
|
||||
|
||||
func PendingPartialWithdrawalsEqual(s, t *ethpb.PendingPartialWithdrawal) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if s.Index != t.Index {
|
||||
return false
|
||||
}
|
||||
if s.Amount != t.Amount {
|
||||
return false
|
||||
}
|
||||
return s.WithdrawableEpoch == t.WithdrawableEpoch
|
||||
}
|
||||
|
||||
func PendingConsolidationsEqual(s, t *ethpb.PendingConsolidation) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
return s.SourceIndex == t.SourceIndex && s.TargetIndex == t.TargetIndex
|
||||
}
|
||||
@@ -320,6 +320,6 @@ func IsProto(item interface{}) bool {
|
||||
return ok
|
||||
}
|
||||
elemTyp := typ.Elem()
|
||||
modelType := reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||
modelType := reflect.TypeFor[proto.Message]()
|
||||
return elemTyp.Implements(modelType)
|
||||
}
|
||||
|
||||
@@ -60,9 +60,21 @@ func PopulateVariableLengthInfo(sszInfo *sszInfo, value any) error {
|
||||
if val.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("expected slice for List type, got %v", val.Kind())
|
||||
}
|
||||
length := val.Len()
|
||||
|
||||
length := uint64(val.Len())
|
||||
if err := listInfo.SetLength(length); err != nil {
|
||||
if listInfo.element.isVariable {
|
||||
listInfo.elementSizes = make([]uint64, 0, length)
|
||||
|
||||
// Populate nested variable-sized type element lengths recursively.
|
||||
for i := range length {
|
||||
if err := PopulateVariableLengthInfo(listInfo.element, val.Index(i).Interface()); err != nil {
|
||||
return fmt.Errorf("could not populate nested list element at index %d: %w", i, err)
|
||||
}
|
||||
listInfo.elementSizes = append(listInfo.elementSizes, listInfo.element.Size())
|
||||
}
|
||||
}
|
||||
|
||||
if err := listInfo.SetLength(uint64(length)); err != nil {
|
||||
return fmt.Errorf("could not set list length: %w", err)
|
||||
}
|
||||
|
||||
@@ -111,15 +123,20 @@ func PopulateVariableLengthInfo(sszInfo *sszInfo, value any) error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Set the actual offset for variable-sized fields.
|
||||
fieldInfo.offset = currentOffset
|
||||
|
||||
// Recursively populate variable-sized fields.
|
||||
fieldValue := derefValue.FieldByName(fieldInfo.goFieldName)
|
||||
if err := PopulateVariableLengthInfo(childSszInfo, fieldValue.Interface()); err != nil {
|
||||
return fmt.Errorf("could not populate from value for field %s: %w", fieldName, err)
|
||||
}
|
||||
|
||||
// Each variable-sized element needs an offset entry.
|
||||
if childSszInfo.sszType == List {
|
||||
currentOffset += childSszInfo.listInfo.OffsetBytes()
|
||||
}
|
||||
|
||||
// Set the actual offset for variable-sized fields.
|
||||
fieldInfo.offset = currentOffset
|
||||
|
||||
currentOffset += childSszInfo.Size()
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ type listInfo struct {
|
||||
element *sszInfo
|
||||
// length is the actual number of elements at runtime (0 if not set).
|
||||
length uint64
|
||||
// elementSizes caches each element's byte size for variable-sized type elements
|
||||
elementSizes []uint64
|
||||
}
|
||||
|
||||
func (l *listInfo) Limit() uint64 {
|
||||
@@ -51,3 +53,35 @@ func (l *listInfo) SetLength(length uint64) error {
|
||||
l.length = length
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *listInfo) Size() uint64 {
|
||||
if l == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
// For fixed-sized type elements, size is multiplying length by element size.
|
||||
if !l.element.isVariable {
|
||||
return l.length * l.element.Size()
|
||||
}
|
||||
|
||||
// For variable-sized type elements, sum up the sizes of each element.
|
||||
totalSize := uint64(0)
|
||||
for _, sz := range l.elementSizes {
|
||||
totalSize += sz
|
||||
}
|
||||
return totalSize
|
||||
}
|
||||
|
||||
// OffsetBytes returns the total number of offset bytes used for the list elements.
|
||||
// Each variable-sized element uses 4 bytes to store its offset.
|
||||
func (l *listInfo) OffsetBytes() uint64 {
|
||||
if l == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
if !l.element.isVariable {
|
||||
return 0
|
||||
}
|
||||
|
||||
return offsetBytes * l.length
|
||||
}
|
||||
|
||||
@@ -133,56 +133,73 @@ func TestCalculateOffsetAndLength(t *testing.T) {
|
||||
{
|
||||
name: "field_list_uint64",
|
||||
path: ".field_list_uint64",
|
||||
expectedOffset: 108, // First part of variable-sized type.
|
||||
expectedOffset: 112, // First part of variable-sized type.
|
||||
expectedLength: 40, // 5 elements * uint64 (8 bytes each)
|
||||
},
|
||||
{
|
||||
name: "field_list_container",
|
||||
path: ".field_list_container",
|
||||
expectedOffset: 148, // Second part of variable-sized type.
|
||||
expectedOffset: 152, // Second part of variable-sized type.
|
||||
expectedLength: 120, // 3 elements * FixedNestedContainer (40 bytes each)
|
||||
},
|
||||
{
|
||||
name: "field_list_bytes32",
|
||||
path: ".field_list_bytes32",
|
||||
expectedOffset: 268,
|
||||
expectedOffset: 272,
|
||||
expectedLength: 96, // 3 elements * 32 bytes each
|
||||
},
|
||||
// Nested paths
|
||||
{
|
||||
name: "nested",
|
||||
path: ".nested",
|
||||
expectedOffset: 364,
|
||||
expectedOffset: 368,
|
||||
// Calculated with:
|
||||
// - Value1: 8 bytes
|
||||
// - field_list_uint64 offset: 4 bytes
|
||||
// - field_list_uint64 length: 40 bytes
|
||||
expectedLength: 52,
|
||||
// - nested_list_field offset: 4 bytes
|
||||
// - nested_list_field length: 99 bytes
|
||||
// - 3 offset pointers for each element in nested_list_field: 12 bytes
|
||||
// Total: 8 + 4 + 40 + 4 + 99 + 12 = 167 bytes
|
||||
expectedLength: 167,
|
||||
},
|
||||
{
|
||||
name: "nested.value1",
|
||||
path: ".nested.value1",
|
||||
expectedOffset: 364,
|
||||
expectedOffset: 368,
|
||||
expectedLength: 8,
|
||||
},
|
||||
{
|
||||
name: "nested.field_list_uint64",
|
||||
path: ".nested.field_list_uint64",
|
||||
expectedOffset: 376,
|
||||
expectedOffset: 384,
|
||||
expectedLength: 40,
|
||||
},
|
||||
{
|
||||
name: "nested.nested_list_field",
|
||||
path: ".nested.nested_list_field",
|
||||
expectedOffset: 436,
|
||||
expectedLength: 99,
|
||||
},
|
||||
// Bitlist field
|
||||
{
|
||||
name: "bitlist_field",
|
||||
path: ".bitlist_field",
|
||||
expectedOffset: 416,
|
||||
expectedOffset: 535,
|
||||
expectedLength: 33, // 32 bytes + 1 byte for length delimiter
|
||||
},
|
||||
// 2D bytes field
|
||||
{
|
||||
name: "nested_list_field",
|
||||
path: ".nested_list_field",
|
||||
expectedOffset: 580,
|
||||
expectedLength: 99,
|
||||
},
|
||||
// Fixed trailing field
|
||||
{
|
||||
name: "trailing_field",
|
||||
path: ".trailing_field",
|
||||
expectedOffset: 52, // After leading_field + 5 offset pointers
|
||||
expectedOffset: 56, // After leading_field + 6 offset pointers
|
||||
expectedLength: 56,
|
||||
},
|
||||
}
|
||||
@@ -377,6 +394,15 @@ func createVariableTestContainer() *sszquerypb.VariableTestContainer {
|
||||
bitlistField.SetBitAt(100, true)
|
||||
bitlistField.SetBitAt(255, true)
|
||||
|
||||
// Total size: 3 lists with lengths 32, 33, and 34 = 99 bytes
|
||||
nestedListField := make([][]byte, 3)
|
||||
for i := range nestedListField {
|
||||
nestedListField[i] = make([]byte, (32 + i)) // Different lengths for each sub-list
|
||||
for j := range nestedListField[i] {
|
||||
nestedListField[i][j] = byte(j + i*16)
|
||||
}
|
||||
}
|
||||
|
||||
return &sszquerypb.VariableTestContainer{
|
||||
// Fixed leading field
|
||||
LeadingField: leadingField,
|
||||
@@ -394,11 +420,15 @@ func createVariableTestContainer() *sszquerypb.VariableTestContainer {
|
||||
Nested: &sszquerypb.VariableNestedContainer{
|
||||
Value1: 42,
|
||||
FieldListUint64: []uint64{1, 2, 3, 4, 5},
|
||||
NestedListField: nestedListField,
|
||||
},
|
||||
|
||||
// Bitlist field
|
||||
BitlistField: bitlistField,
|
||||
|
||||
// 2D bytes field
|
||||
NestedListField: nestedListField,
|
||||
|
||||
// Fixed trailing field
|
||||
TrailingField: trailingField,
|
||||
}
|
||||
@@ -445,11 +475,20 @@ func getVariableTestContainerSpec() testutil.TestSpec {
|
||||
Path: ".nested.field_list_uint64",
|
||||
Expected: testContainer.Nested.FieldListUint64,
|
||||
},
|
||||
{
|
||||
Path: ".nested.nested_list_field",
|
||||
Expected: testContainer.Nested.NestedListField,
|
||||
},
|
||||
// Bitlist field
|
||||
{
|
||||
Path: ".bitlist_field",
|
||||
Expected: testContainer.BitlistField,
|
||||
},
|
||||
// 2D bytes field
|
||||
{
|
||||
Path: ".nested_list_field",
|
||||
Expected: testContainer.NestedListField,
|
||||
},
|
||||
// Fixed trailing field
|
||||
{
|
||||
Path: ".trailing_field",
|
||||
|
||||
@@ -54,10 +54,7 @@ func (info *sszInfo) Size() uint64 {
|
||||
|
||||
switch info.sszType {
|
||||
case List:
|
||||
length := info.listInfo.length
|
||||
elementSize := info.listInfo.element.Size()
|
||||
|
||||
return length * elementSize
|
||||
return info.listInfo.Size()
|
||||
|
||||
case Bitlist:
|
||||
return info.bitlistInfo.Size()
|
||||
@@ -69,6 +66,11 @@ func (info *sszInfo) Size() uint64 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Include offset bytes inside nested lists.
|
||||
if fieldInfo.sszInfo.sszType == List {
|
||||
size += fieldInfo.sszInfo.listInfo.OffsetBytes()
|
||||
}
|
||||
|
||||
size += fieldInfo.sszInfo.Size()
|
||||
}
|
||||
return size
|
||||
|
||||
@@ -31,10 +31,10 @@ func RunStructTest(t *testing.T, spec TestSpec) {
|
||||
_, offset, length, err := query.CalculateOffsetAndLength(info, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedRawBytes := marshalledData[offset : offset+length]
|
||||
rawBytes, err := marshalAny(pathTest.Expected)
|
||||
actualRawBytes := marshalledData[offset : offset+length]
|
||||
expectedRawBytes, err := marshalAny(pathTest.Expected)
|
||||
require.NoError(t, err, "Marshalling expected value should not return an error")
|
||||
require.DeepEqual(t, expectedRawBytes, rawBytes, "Extracted value should match expected")
|
||||
require.DeepEqual(t, actualRawBytes, expectedRawBytes, "Extracted value should match expected")
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
58
proto/ssz_query/ssz_query.pb.go
generated
58
proto/ssz_query/ssz_query.pb.go
generated
@@ -195,6 +195,7 @@ type VariableNestedContainer struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Value1 uint64 `protobuf:"varint,1,opt,name=value1,proto3" json:"value1,omitempty"`
|
||||
FieldListUint64 []uint64 `protobuf:"varint,2,rep,packed,name=field_list_uint64,json=fieldListUint64,proto3" json:"field_list_uint64,omitempty" ssz-max:"100"`
|
||||
NestedListField [][]byte `protobuf:"bytes,3,rep,name=nested_list_field,json=nestedListField,proto3" json:"nested_list_field,omitempty" ssz-max:"100,50" ssz-size:"?,?"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -243,6 +244,13 @@ func (x *VariableNestedContainer) GetFieldListUint64() []uint64 {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *VariableNestedContainer) GetNestedListField() [][]byte {
|
||||
if x != nil {
|
||||
return x.NestedListField
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type VariableTestContainer struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
LeadingField []byte `protobuf:"bytes,1,opt,name=leading_field,json=leadingField,proto3" json:"leading_field,omitempty" ssz-size:"32"`
|
||||
@@ -251,7 +259,8 @@ type VariableTestContainer struct {
|
||||
FieldListBytes32 [][]byte `protobuf:"bytes,4,rep,name=field_list_bytes32,json=fieldListBytes32,proto3" json:"field_list_bytes32,omitempty" ssz-max:"100" ssz-size:"?,32"`
|
||||
Nested *VariableNestedContainer `protobuf:"bytes,5,opt,name=nested,proto3" json:"nested,omitempty"`
|
||||
BitlistField github_com_prysmaticlabs_go_bitfield.Bitlist `protobuf:"bytes,6,opt,name=bitlist_field,json=bitlistField,proto3" json:"bitlist_field,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitlist" ssz-max:"2048"`
|
||||
TrailingField []byte `protobuf:"bytes,7,opt,name=trailing_field,json=trailingField,proto3" json:"trailing_field,omitempty" ssz-size:"56"`
|
||||
NestedListField [][]byte `protobuf:"bytes,7,rep,name=nested_list_field,json=nestedListField,proto3" json:"nested_list_field,omitempty" ssz-max:"100,50" ssz-size:"?,?"`
|
||||
TrailingField []byte `protobuf:"bytes,8,opt,name=trailing_field,json=trailingField,proto3" json:"trailing_field,omitempty" ssz-size:"56"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -328,6 +337,13 @@ func (x *VariableTestContainer) GetBitlistField() github_com_prysmaticlabs_go_bi
|
||||
return github_com_prysmaticlabs_go_bitfield.Bitlist(nil)
|
||||
}
|
||||
|
||||
func (x *VariableTestContainer) GetNestedListField() [][]byte {
|
||||
if x != nil {
|
||||
return x.NestedListField
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *VariableTestContainer) GetTrailingField() []byte {
|
||||
if x != nil {
|
||||
return x.TrailingField
|
||||
@@ -384,14 +400,18 @@ var file_proto_ssz_query_ssz_query_proto_rawDesc = []byte{
|
||||
0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x35, 0x31, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12,
|
||||
0x2d, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c,
|
||||
0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x35, 0x36, 0x52,
|
||||
0x0d, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x22, 0x66,
|
||||
0x0a, 0x17, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64,
|
||||
0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x31, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x31, 0x12, 0x33, 0x0a, 0x11, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f,
|
||||
0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92, 0xb5,
|
||||
0x18, 0x03, 0x31, 0x30, 0x30, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x55, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x22, 0xdf, 0x03, 0x0a, 0x15, 0x56, 0x61, 0x72, 0x69, 0x61,
|
||||
0x0d, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x22, 0xa5,
|
||||
0x01, 0x0a, 0x17, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x65, 0x73, 0x74, 0x65,
|
||||
0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x31, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x31, 0x12, 0x33, 0x0a, 0x11, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74,
|
||||
0x5f, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92,
|
||||
0xb5, 0x18, 0x03, 0x31, 0x30, 0x30, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x55, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x12, 0x3d, 0x0a, 0x11, 0x6e, 0x65, 0x73, 0x74, 0x65,
|
||||
0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x03,
|
||||
0x28, 0x0c, 0x42, 0x11, 0x8a, 0xb5, 0x18, 0x03, 0x3f, 0x2c, 0x3f, 0x92, 0xb5, 0x18, 0x06, 0x31,
|
||||
0x30, 0x30, 0x2c, 0x35, 0x30, 0x52, 0x0f, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x22, 0x9e, 0x04, 0x0a, 0x15, 0x56, 0x61, 0x72, 0x69, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x54, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
|
||||
0x12, 0x2b, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52,
|
||||
@@ -418,14 +438,18 @@ var file_proto_ssz_query_ssz_query_proto_rawDesc = []byte{
|
||||
0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f,
|
||||
0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x6c,
|
||||
0x69, 0x73, 0x74, 0x92, 0xb5, 0x18, 0x04, 0x32, 0x30, 0x34, 0x38, 0x52, 0x0c, 0x62, 0x69, 0x74,
|
||||
0x6c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x2d, 0x0a, 0x0e, 0x74, 0x72, 0x61,
|
||||
0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x35, 0x36, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x69, 0x6c,
|
||||
0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c,
|
||||
0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x2f, 0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x11, 0x6e, 0x65, 0x73,
|
||||
0x74, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x07,
|
||||
0x20, 0x03, 0x28, 0x0c, 0x42, 0x11, 0x8a, 0xb5, 0x18, 0x03, 0x3f, 0x2c, 0x3f, 0x92, 0xb5, 0x18,
|
||||
0x06, 0x31, 0x30, 0x30, 0x2c, 0x35, 0x30, 0x52, 0x0f, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x2d, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x69,
|
||||
0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x35, 0x36, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69,
|
||||
0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
|
||||
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x2f, 0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -65,6 +65,10 @@ message FixedTestContainer {
|
||||
message VariableNestedContainer {
|
||||
uint64 value1 = 1;
|
||||
repeated uint64 field_list_uint64 = 2 [ (ethereum.eth.ext.ssz_max) = "100" ];
|
||||
repeated bytes nested_list_field = 3 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,?",
|
||||
(ethereum.eth.ext.ssz_max) = "100,50"
|
||||
];
|
||||
}
|
||||
|
||||
// VariableTestContainer - comprehensive variable-size container for SSZ query testing
|
||||
@@ -96,7 +100,14 @@ message VariableTestContainer {
|
||||
"github.com/prysmaticlabs/go-bitfield.Bitlist"
|
||||
];
|
||||
|
||||
// 2D bytes list - test list of bytelists.
|
||||
// e.g., ExecutionPayload.transactions
|
||||
repeated bytes nested_list_field = 7 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,?",
|
||||
(ethereum.eth.ext.ssz_max) = "100,50"
|
||||
];
|
||||
|
||||
// Fixed-size trailing field - test fixed field after variable fields
|
||||
// Verifies correct offset calculation after variable-size fields
|
||||
bytes trailing_field = 7 [ (ethereum.eth.ext.ssz_size) = "56" ]; // Test: fixed 56-byte field at end, offset: 32 + 4 + 4 + 4 + 4 + 4 = 52
|
||||
bytes trailing_field = 8 [ (ethereum.eth.ext.ssz_size) = "56" ]; // Test: fixed 56-byte field at end, offset: 32 + 4 + 4 + 4 + 4 + 4 + 4 = 56
|
||||
}
|
||||
|
||||
@@ -324,7 +324,7 @@ func (v *VariableNestedContainer) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the VariableNestedContainer object to a target array
|
||||
func (v *VariableNestedContainer) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(12)
|
||||
offset := int(16)
|
||||
|
||||
// Field (0) 'Value1'
|
||||
dst = ssz.MarshalUint64(dst, v.Value1)
|
||||
@@ -333,6 +333,13 @@ func (v *VariableNestedContainer) MarshalSSZTo(buf []byte) (dst []byte, err erro
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(v.FieldListUint64) * 8
|
||||
|
||||
// Offset (2) 'NestedListField'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
for ii := 0; ii < len(v.NestedListField); ii++ {
|
||||
offset += 4
|
||||
offset += len(v.NestedListField[ii])
|
||||
}
|
||||
|
||||
// Field (1) 'FieldListUint64'
|
||||
if size := len(v.FieldListUint64); size > 100 {
|
||||
err = ssz.ErrListTooBigFn("--.FieldListUint64", size, 100)
|
||||
@@ -342,6 +349,26 @@ func (v *VariableNestedContainer) MarshalSSZTo(buf []byte) (dst []byte, err erro
|
||||
dst = ssz.MarshalUint64(dst, v.FieldListUint64[ii])
|
||||
}
|
||||
|
||||
// Field (2) 'NestedListField'
|
||||
if size := len(v.NestedListField); size > 100 {
|
||||
err = ssz.ErrListTooBigFn("--.NestedListField", size, 100)
|
||||
return
|
||||
}
|
||||
{
|
||||
offset = 4 * len(v.NestedListField)
|
||||
for ii := 0; ii < len(v.NestedListField); ii++ {
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(v.NestedListField[ii])
|
||||
}
|
||||
}
|
||||
for ii := 0; ii < len(v.NestedListField); ii++ {
|
||||
if size := len(v.NestedListField[ii]); size > 50 {
|
||||
err = ssz.ErrBytesLengthFn("--.NestedListField[ii]", size, 50)
|
||||
return
|
||||
}
|
||||
dst = append(dst, v.NestedListField[ii]...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -349,12 +376,12 @@ func (v *VariableNestedContainer) MarshalSSZTo(buf []byte) (dst []byte, err erro
|
||||
func (v *VariableNestedContainer) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 12 {
|
||||
if size < 16 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o1 uint64
|
||||
var o1, o2 uint64
|
||||
|
||||
// Field (0) 'Value1'
|
||||
v.Value1 = ssz.UnmarshallUint64(buf[0:8])
|
||||
@@ -364,13 +391,18 @@ func (v *VariableNestedContainer) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o1 != 12 {
|
||||
if o1 != 16 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Offset (2) 'NestedListField'
|
||||
if o2 = ssz.ReadOffset(buf[12:16]); o2 > size || o1 > o2 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (1) 'FieldListUint64'
|
||||
{
|
||||
buf = tail[o1:]
|
||||
buf = tail[o1:o2]
|
||||
num, err := ssz.DivideInt2(len(buf), 8, 100)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -380,16 +412,45 @@ func (v *VariableNestedContainer) UnmarshalSSZ(buf []byte) error {
|
||||
v.FieldListUint64[ii] = ssz.UnmarshallUint64(buf[ii*8 : (ii+1)*8])
|
||||
}
|
||||
}
|
||||
|
||||
// Field (2) 'NestedListField'
|
||||
{
|
||||
buf = tail[o2:]
|
||||
num, err := ssz.DecodeDynamicLength(buf, 100)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.NestedListField = make([][]byte, num)
|
||||
err = ssz.UnmarshalDynamic(buf, num, func(indx int, buf []byte) (err error) {
|
||||
if len(buf) > 50 {
|
||||
return ssz.ErrBytesLength
|
||||
}
|
||||
if cap(v.NestedListField[indx]) == 0 {
|
||||
v.NestedListField[indx] = make([]byte, 0, len(buf))
|
||||
}
|
||||
v.NestedListField[indx] = append(v.NestedListField[indx], buf...)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the VariableNestedContainer object
|
||||
func (v *VariableNestedContainer) SizeSSZ() (size int) {
|
||||
size = 12
|
||||
size = 16
|
||||
|
||||
// Field (1) 'FieldListUint64'
|
||||
size += len(v.FieldListUint64) * 8
|
||||
|
||||
// Field (2) 'NestedListField'
|
||||
for ii := 0; ii < len(v.NestedListField); ii++ {
|
||||
size += 4
|
||||
size += len(v.NestedListField[ii])
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -421,6 +482,29 @@ func (v *VariableNestedContainer) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(100, numItems, 8))
|
||||
}
|
||||
|
||||
// Field (2) 'NestedListField'
|
||||
{
|
||||
subIndx := hh.Index()
|
||||
num := uint64(len(v.NestedListField))
|
||||
if num > 100 {
|
||||
err = ssz.ErrIncorrectListSize
|
||||
return
|
||||
}
|
||||
for _, elem := range v.NestedListField {
|
||||
{
|
||||
elemIndx := hh.Index()
|
||||
byteLen := uint64(len(elem))
|
||||
if byteLen > 50 {
|
||||
err = ssz.ErrIncorrectListSize
|
||||
return
|
||||
}
|
||||
hh.AppendBytes32(elem)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (50+31)/32)
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 100)
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
@@ -433,7 +517,7 @@ func (v *VariableTestContainer) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the VariableTestContainer object to a target array
|
||||
func (v *VariableTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(108)
|
||||
offset := int(112)
|
||||
|
||||
// Field (0) 'LeadingField'
|
||||
if size := len(v.LeadingField); size != 32 {
|
||||
@@ -465,7 +549,14 @@ func (v *VariableTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(v.BitlistField)
|
||||
|
||||
// Field (6) 'TrailingField'
|
||||
// Offset (6) 'NestedListField'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
for ii := 0; ii < len(v.NestedListField); ii++ {
|
||||
offset += 4
|
||||
offset += len(v.NestedListField[ii])
|
||||
}
|
||||
|
||||
// Field (7) 'TrailingField'
|
||||
if size := len(v.TrailingField); size != 56 {
|
||||
err = ssz.ErrBytesLengthFn("--.TrailingField", size, 56)
|
||||
return
|
||||
@@ -517,6 +608,26 @@ func (v *VariableTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
}
|
||||
dst = append(dst, v.BitlistField...)
|
||||
|
||||
// Field (6) 'NestedListField'
|
||||
if size := len(v.NestedListField); size > 100 {
|
||||
err = ssz.ErrListTooBigFn("--.NestedListField", size, 100)
|
||||
return
|
||||
}
|
||||
{
|
||||
offset = 4 * len(v.NestedListField)
|
||||
for ii := 0; ii < len(v.NestedListField); ii++ {
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(v.NestedListField[ii])
|
||||
}
|
||||
}
|
||||
for ii := 0; ii < len(v.NestedListField); ii++ {
|
||||
if size := len(v.NestedListField[ii]); size > 50 {
|
||||
err = ssz.ErrBytesLengthFn("--.NestedListField[ii]", size, 50)
|
||||
return
|
||||
}
|
||||
dst = append(dst, v.NestedListField[ii]...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -524,12 +635,12 @@ func (v *VariableTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
func (v *VariableTestContainer) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 108 {
|
||||
if size < 112 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o1, o2, o3, o4, o5 uint64
|
||||
var o1, o2, o3, o4, o5, o6 uint64
|
||||
|
||||
// Field (0) 'LeadingField'
|
||||
if cap(v.LeadingField) == 0 {
|
||||
@@ -542,7 +653,7 @@ func (v *VariableTestContainer) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o1 != 108 {
|
||||
if o1 != 112 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -566,11 +677,16 @@ func (v *VariableTestContainer) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (6) 'TrailingField'
|
||||
if cap(v.TrailingField) == 0 {
|
||||
v.TrailingField = make([]byte, 0, len(buf[52:108]))
|
||||
// Offset (6) 'NestedListField'
|
||||
if o6 = ssz.ReadOffset(buf[52:56]); o6 > size || o5 > o6 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
v.TrailingField = append(v.TrailingField, buf[52:108]...)
|
||||
|
||||
// Field (7) 'TrailingField'
|
||||
if cap(v.TrailingField) == 0 {
|
||||
v.TrailingField = make([]byte, 0, len(buf[56:112]))
|
||||
}
|
||||
v.TrailingField = append(v.TrailingField, buf[56:112]...)
|
||||
|
||||
// Field (1) 'FieldListUint64'
|
||||
{
|
||||
@@ -632,7 +748,7 @@ func (v *VariableTestContainer) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// Field (5) 'BitlistField'
|
||||
{
|
||||
buf = tail[o5:]
|
||||
buf = tail[o5:o6]
|
||||
if err = ssz.ValidateBitlist(buf, 2048); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -641,12 +757,35 @@ func (v *VariableTestContainer) UnmarshalSSZ(buf []byte) error {
|
||||
}
|
||||
v.BitlistField = append(v.BitlistField, buf...)
|
||||
}
|
||||
|
||||
// Field (6) 'NestedListField'
|
||||
{
|
||||
buf = tail[o6:]
|
||||
num, err := ssz.DecodeDynamicLength(buf, 100)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.NestedListField = make([][]byte, num)
|
||||
err = ssz.UnmarshalDynamic(buf, num, func(indx int, buf []byte) (err error) {
|
||||
if len(buf) > 50 {
|
||||
return ssz.ErrBytesLength
|
||||
}
|
||||
if cap(v.NestedListField[indx]) == 0 {
|
||||
v.NestedListField[indx] = make([]byte, 0, len(buf))
|
||||
}
|
||||
v.NestedListField[indx] = append(v.NestedListField[indx], buf...)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the VariableTestContainer object
|
||||
func (v *VariableTestContainer) SizeSSZ() (size int) {
|
||||
size = 108
|
||||
size = 112
|
||||
|
||||
// Field (1) 'FieldListUint64'
|
||||
size += len(v.FieldListUint64) * 8
|
||||
@@ -666,6 +805,12 @@ func (v *VariableTestContainer) SizeSSZ() (size int) {
|
||||
// Field (5) 'BitlistField'
|
||||
size += len(v.BitlistField)
|
||||
|
||||
// Field (6) 'NestedListField'
|
||||
for ii := 0; ii < len(v.NestedListField); ii++ {
|
||||
size += 4
|
||||
size += len(v.NestedListField[ii])
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -748,7 +893,30 @@ func (v *VariableTestContainer) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBitlist(v.BitlistField, 2048)
|
||||
|
||||
// Field (6) 'TrailingField'
|
||||
// Field (6) 'NestedListField'
|
||||
{
|
||||
subIndx := hh.Index()
|
||||
num := uint64(len(v.NestedListField))
|
||||
if num > 100 {
|
||||
err = ssz.ErrIncorrectListSize
|
||||
return
|
||||
}
|
||||
for _, elem := range v.NestedListField {
|
||||
{
|
||||
elemIndx := hh.Index()
|
||||
byteLen := uint64(len(elem))
|
||||
if byteLen > 50 {
|
||||
err = ssz.ErrIncorrectListSize
|
||||
return
|
||||
}
|
||||
hh.AppendBytes32(elem)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (50+31)/32)
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 100)
|
||||
}
|
||||
|
||||
// Field (7) 'TrailingField'
|
||||
if size := len(v.TrailingField); size != 56 {
|
||||
err = ssz.ErrBytesLengthFn("--.TrailingField", size, 56)
|
||||
return
|
||||
|
||||
@@ -90,6 +90,7 @@ func (r *runner) run(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Info("Context canceled, stopping validator")
|
||||
//nolint:govet
|
||||
return // Exit if context is canceled.
|
||||
case slot := <-v.NextSlot():
|
||||
if !r.healthMonitor.IsHealthy() {
|
||||
@@ -98,7 +99,7 @@ func (r *runner) run(ctx context.Context) {
|
||||
}
|
||||
|
||||
deadline := v.SlotDeadline(slot)
|
||||
slotCtx, cancel := context.WithDeadline(ctx, deadline)
|
||||
slotCtx, cancel := context.WithDeadline(ctx, deadline) //nolint:govet
|
||||
|
||||
var span trace.Span
|
||||
slotCtx, span = prysmTrace.StartSpan(slotCtx, "validator.processSlot")
|
||||
@@ -131,7 +132,7 @@ func (r *runner) run(ctx context.Context) {
|
||||
|
||||
// Start fetching domain data for the next epoch.
|
||||
if slots.IsEpochEnd(slot) {
|
||||
domainCtx, _ := context.WithDeadline(ctx, deadline)
|
||||
domainCtx, _ := context.WithDeadline(ctx, deadline) //nolint:govet
|
||||
go v.UpdateDomainDataCaches(domainCtx, slot+1)
|
||||
}
|
||||
|
||||
@@ -145,7 +146,7 @@ func (r *runner) run(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
// performRoles calls span.End()
|
||||
rolesCtx, _ := context.WithDeadline(ctx, deadline)
|
||||
rolesCtx, _ := context.WithDeadline(ctx, deadline) //nolint:govet
|
||||
performRoles(rolesCtx, allRoles, v, slot, &wg, span)
|
||||
case e := <-v.EventsChan():
|
||||
v.ProcessEvent(ctx, e)
|
||||
|
||||
Reference in New Issue
Block a user