mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
12 Commits
gloas/proc
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
21366e11ca | ||
|
|
7ed4d496dd | ||
|
|
158c09ca8c | ||
|
|
17245f4fac | ||
|
|
53b0a574ab | ||
|
|
c96d188468 | ||
|
|
0fcb922702 | ||
|
|
3646a77bfb | ||
|
|
1541558261 | ||
|
|
1a6252ade4 | ||
|
|
27c009e7ff | ||
|
|
ffad861e2c |
19
CHANGELOG.md
19
CHANGELOG.md
@@ -4,6 +4,25 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.1.2](https://github.com/prysmaticlabs/prysm/compare/v7.1.1...v7.1.2) - 2026-01-07
|
||||
|
||||
Happy new year! This patch release is very small. The main improvement is better management of pending attestation aggregation via [PR 16153](https://github.com/OffchainLabs/prysm/pull/16153).
|
||||
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16169)
|
||||
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16152)
|
||||
- `validateDataColumn`: Remove error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16157)
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16153)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16151)
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16168)
|
||||
|
||||
## [v7.1.1](https://github.com/prysmaticlabs/prysm/compare/v7.1.0...v7.1.1) - 2025-12-18
|
||||
|
||||
Release highlights:
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -273,16 +273,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0"
|
||||
consensus_spec_version = "v1.7.0-alpha.0"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-54hTaUNF9nLg+hRr3oHoq0yjZpW3MNiiUUuCQu6Rajk=",
|
||||
"minimal": "sha256-1JHIGg3gVMjvcGYRHR5cwdDgOvX47oR/MWp6gyAeZfA=",
|
||||
"mainnet": "sha256-292h3W2Ffts0YExgDTyxYe9Os7R0bZIXuAaMO8P6kl4=",
|
||||
"general": "sha256-b+rJOuVqq+Dy53quPcNYcQwPFoMU7Wp7tdUVe7n0g8w=",
|
||||
"minimal": "sha256-qxRIxtjPxVsVCY90WsBJKhk0027XDSmhjnRvRN14V1c=",
|
||||
"mainnet": "sha256-NsuOQG3LzeiEE1TrWuvQ6vu6BboHv7h7f/RTS0pWkCs=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -298,7 +298,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-VzBgrEokvYSMIIXVnSA5XS9I3m9oxpvToQGxC1N5lzw=",
|
||||
integrity = "sha256-hwNdUBgdBrkk6pWIpNYbzbwswUuOu6AMD2exN8uv+QQ=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v7/proto/eth/v1"
|
||||
@@ -130,12 +131,10 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
// Hacky sleep, should use a better way to be able to resolve the race
|
||||
// between event being sent out and processed.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -222,10 +221,10 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
|
||||
})
|
||||
wg.Wait()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
// Verify fork choice has processed the block. (Genesis block and the new block)
|
||||
assert.Equal(t, 2, s.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
@@ -265,10 +264,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -512,8 +511,9 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) == 1
|
||||
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
@@ -552,8 +552,9 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) == 1
|
||||
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
@@ -596,13 +597,13 @@ func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
|
||||
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
// Wait for the light client bootstrap to be saved (runs in goroutine)
|
||||
var b interfaces.LightClientBootstrap
|
||||
require.Eventually(t, func() bool {
|
||||
var err error
|
||||
b, err = s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
return err == nil && b != nil
|
||||
}, 5*time.Second, 50*time.Millisecond, "Light client bootstrap was not saved within timeout")
|
||||
|
||||
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -212,7 +213,12 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get next withdrawal validator index")
|
||||
}
|
||||
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
if features.Get().LowValcountSweep {
|
||||
bound := min(uint64(st.NumValidators()), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex += primitives.ValidatorIndex(bound)
|
||||
} else {
|
||||
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
}
|
||||
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
|
||||
} else {
|
||||
nextValidatorIndex = expectedWithdrawals[len(expectedWithdrawals)-1].ValidatorIndex + 1
|
||||
|
||||
@@ -12,7 +12,6 @@ go_library(
|
||||
"log.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"transition_no_verify_sig.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
"withdrawals.go",
|
||||
@@ -62,7 +61,6 @@ go_test(
|
||||
"error_test.go",
|
||||
"export_test.go",
|
||||
"registry_updates_test.go",
|
||||
"transition_no_verify_sig_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
|
||||
@@ -6,6 +6,11 @@ type execReqErr struct {
|
||||
error
|
||||
}
|
||||
|
||||
// NewExecReqError creates a new execReqErr.
|
||||
func NewExecReqError(msg string) error {
|
||||
return execReqErr{errors.New(msg)}
|
||||
}
|
||||
|
||||
// IsExecutionRequestError returns true if the error has `execReqErr`.
|
||||
func IsExecutionRequestError(e error) bool {
|
||||
if e == nil {
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessOperationsWithNilRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "Nil deposit request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
|
||||
},
|
||||
errMsg: "nil deposit request",
|
||||
},
|
||||
{
|
||||
name: "Nil withdrawal request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
|
||||
},
|
||||
errMsg: "nil withdrawal request",
|
||||
},
|
||||
{
|
||||
name: "Nil consolidation request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
|
||||
},
|
||||
errMsg: "nil consolidation request",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
|
||||
_, err = electra.ProcessOperations(t.Context(), st, b.Block())
|
||||
require.ErrorContains(t, tc.errMsg, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"builder.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,74 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// IsBuilderIndex returns true when the BuilderIndex flag is set on a validator index.
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// def is_builder_index(validator_index: ValidatorIndex) -> bool:
|
||||
// return (validator_index & BUILDER_INDEX_FLAG) != 0
|
||||
func IsBuilderIndex(validatorIndex primitives.ValidatorIndex) bool {
|
||||
return uint64(validatorIndex)¶ms.BeaconConfig().BuilderIndexFlag != 0
|
||||
}
|
||||
|
||||
// ConvertValidatorIndexToBuilderIndex strips the builder flag from a validator index.
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// def convert_validator_index_to_builder_index(validator_index: ValidatorIndex) -> BuilderIndex:
|
||||
// return BuilderIndex(validator_index & ~BUILDER_INDEX_FLAG)
|
||||
func ConvertValidatorIndexToBuilderIndex(validatorIndex primitives.ValidatorIndex) primitives.BuilderIndex {
|
||||
return primitives.BuilderIndex(uint64(validatorIndex) & ^params.BeaconConfig().BuilderIndexFlag)
|
||||
}
|
||||
|
||||
// ConvertBuilderIndexToValidatorIndex sets the builder flag on a builder index.
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// def convert_builder_index_to_validator_index(builder_index: BuilderIndex) -> ValidatorIndex:
|
||||
// return ValidatorIndex(builder_index | BUILDER_INDEX_FLAG)
|
||||
func ConvertBuilderIndexToValidatorIndex(builderIndex primitives.BuilderIndex) primitives.ValidatorIndex {
|
||||
return primitives.ValidatorIndex(uint64(builderIndex) | params.BeaconConfig().BuilderIndexFlag)
|
||||
}
|
||||
|
||||
// IsBuilderWithdrawalCredential returns true when the builder withdrawal prefix is set.
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// def is_builder_withdrawal_credential(withdrawal_credentials: Bytes32) -> bool:
|
||||
// return withdrawal_credentials[:1] == BUILDER_WITHDRAWAL_PREFIX
|
||||
func IsBuilderWithdrawalCredential(withdrawalCredentials []byte) bool {
|
||||
if len(withdrawalCredentials) == 0 {
|
||||
return false
|
||||
}
|
||||
return withdrawalCredentials[0] == params.BeaconConfig().BuilderWithdrawalPrefixByte
|
||||
}
|
||||
|
||||
// IsActiveBuilder checks if the builder is active for the given state.
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
|
||||
// """
|
||||
// Check if the builder at ``builder_index`` is active for the given ``state``.
|
||||
// """
|
||||
// builder = state.builders[builder_index]
|
||||
// return (
|
||||
// # Placement in builder list is finalized
|
||||
// builder.deposit_epoch < state.finalized_checkpoint.epoch
|
||||
// # Has not initiated exit
|
||||
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
|
||||
// )
|
||||
func IsActiveBuilder(st state.ReadOnlyBeaconState, builderIndex primitives.BuilderIndex) bool {
|
||||
builders, err := st.Builders()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if builderIndex >= primitives.BuilderIndex(len(builders)) {
|
||||
return false
|
||||
}
|
||||
|
||||
builder := builders[int(builderIndex)]
|
||||
if builder == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
finalizedEpoch := st.FinalizedCheckpointEpoch()
|
||||
return builder.DepositEpoch < finalizedEpoch &&
|
||||
builder.WithdrawableEpoch == params.BeaconConfig().FarFutureEpoch
|
||||
}
|
||||
@@ -1,201 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// getBuilderWithdrawals returns builder withdrawals derived from pending builder withdrawals.
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// def get_builder_withdrawals(state, withdrawal_index, prior_withdrawals):
|
||||
//
|
||||
// withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
//
|
||||
// processed_count = 0
|
||||
// withdrawals = []
|
||||
// for withdrawal in state.builder_pending_withdrawals:
|
||||
// all_withdrawals = prior_withdrawals + withdrawals
|
||||
// has_reached_limit = len(all_withdrawals) == withdrawals_limit
|
||||
// if has_reached_limit:
|
||||
// break
|
||||
//
|
||||
// builder_index = withdrawal.builder_index
|
||||
// withdrawals.append(
|
||||
// Withdrawal(
|
||||
// index=withdrawal_index,
|
||||
// validator_index=convert_builder_index_to_validator_index(builder_index),
|
||||
// address=withdrawal.fee_recipient,
|
||||
// amount=withdrawal.amount,
|
||||
// )
|
||||
// )
|
||||
// withdrawal_index += WithdrawalIndex(1)
|
||||
// processed_count += 1
|
||||
//
|
||||
// return withdrawals, withdrawal_index, processed_count
|
||||
func getBuilderWithdrawals(
|
||||
st state.ReadOnlyBeaconState,
|
||||
withdrawalIndex uint64,
|
||||
) ([]*enginev1.Withdrawal, uint64, uint64, error) {
|
||||
pendingWithdrawals, err := st.BuilderPendingWithdrawals()
|
||||
if err != nil {
|
||||
return nil, withdrawalIndex, 0, err
|
||||
}
|
||||
|
||||
withdrawalsLimit := params.BeaconConfig().MaxWithdrawalsPerPayload
|
||||
withdrawals := make([]*enginev1.Withdrawal, 0, len(pendingWithdrawals))
|
||||
var processedCount uint64
|
||||
for _, withdrawal := range pendingWithdrawals {
|
||||
if uint64(+len(withdrawals)) == withdrawalsLimit {
|
||||
break
|
||||
}
|
||||
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: ConvertBuilderIndexToValidatorIndex(primitives.BuilderIndex(withdrawal.BuilderIndex)),
|
||||
Address: bytesutil.SafeCopyBytes(withdrawal.FeeRecipient),
|
||||
Amount: uint64(withdrawal.Amount),
|
||||
})
|
||||
withdrawalIndex++
|
||||
processedCount++
|
||||
}
|
||||
|
||||
return withdrawals, withdrawalIndex, processedCount, nil
|
||||
}
|
||||
|
||||
// getBuildersSweepWithdrawals returns builder withdrawals selected by a sweep over the builders registry.
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// def get_builders_sweep_withdrawals(state, withdrawal_index, prior_withdrawals):
|
||||
//
|
||||
// epoch = get_current_epoch(state)
|
||||
// builders_limit = min(len(state.builders), MAX_BUILDERS_PER_WITHDRAWALS_SWEEP)
|
||||
// withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
//
|
||||
// processed_count = 0
|
||||
// withdrawals = []
|
||||
// builder_index = state.next_withdrawal_builder_index
|
||||
// for _ in range(builders_limit):
|
||||
// all_withdrawals = prior_withdrawals + withdrawals
|
||||
// has_reached_limit = len(all_withdrawals) == withdrawals_limit
|
||||
// if has_reached_limit:
|
||||
// break
|
||||
//
|
||||
// builder = state.builders[builder_index]
|
||||
// if builder.withdrawable_epoch <= epoch and builder.balance > 0:
|
||||
// withdrawals.append(
|
||||
// Withdrawal(
|
||||
// index=withdrawal_index,
|
||||
// validator_index=convert_builder_index_to_validator_index(builder_index),
|
||||
// address=builder.execution_address,
|
||||
// amount=builder.balance,
|
||||
// )
|
||||
// )
|
||||
// withdrawal_index += WithdrawalIndex(1)
|
||||
//
|
||||
// builder_index = BuilderIndex((builder_index + 1) % len(state.builders))
|
||||
// processed_count += 1
|
||||
//
|
||||
// return withdrawals, withdrawal_index, processed_count
|
||||
func getBuildersSweepWithdrawals(
|
||||
st state.ReadOnlyBeaconState,
|
||||
withdrawalIndex uint64,
|
||||
priorWithdrawals []*enginev1.Withdrawal,
|
||||
) ([]*enginev1.Withdrawal, uint64, uint64, error) {
|
||||
builders, err := st.Builders()
|
||||
if err != nil {
|
||||
return nil, withdrawalIndex, 0, err
|
||||
}
|
||||
if len(builders) == 0 {
|
||||
return nil, withdrawalIndex, 0, nil
|
||||
}
|
||||
|
||||
builderIndex, err := st.NextWithdrawalBuilderIndex()
|
||||
if err != nil {
|
||||
return nil, withdrawalIndex, 0, err
|
||||
}
|
||||
if uint64(builderIndex) >= uint64(len(builders)) {
|
||||
return nil, withdrawalIndex, 0, fmt.Errorf("next withdrawal builder index %d out of range", builderIndex)
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
buildersLimit := len(builders)
|
||||
if maxBuilders := int(cfg.MaxBuildersPerWithdrawalsSweep); buildersLimit > maxBuilders {
|
||||
buildersLimit = maxBuilders
|
||||
}
|
||||
withdrawalsLimit := cfg.MaxWithdrawalsPerPayload
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
|
||||
withdrawals := make([]*enginev1.Withdrawal, 0, buildersLimit)
|
||||
var processedCount uint64
|
||||
for i := 0; i < buildersLimit; i++ {
|
||||
if uint64(len(priorWithdrawals)+len(withdrawals)) == withdrawalsLimit {
|
||||
break
|
||||
}
|
||||
|
||||
builder := builders[builderIndex]
|
||||
if builder != nil && builder.WithdrawableEpoch <= epoch && builder.Balance > 0 {
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: ConvertBuilderIndexToValidatorIndex(builderIndex),
|
||||
Address: bytesutil.SafeCopyBytes(builder.ExecutionAddress),
|
||||
Amount: uint64(builder.Balance),
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
|
||||
builderIndex = primitives.BuilderIndex((uint64(builderIndex) + 1) % uint64(len(builders)))
|
||||
processedCount++
|
||||
}
|
||||
|
||||
return withdrawals, withdrawalIndex, processedCount, nil
|
||||
}
|
||||
|
||||
// updatePayloadExpectedWithdrawals stores the expected withdrawals for the next payload.
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// def update_payload_expected_withdrawals(state, withdrawals):
|
||||
//
|
||||
// state.payload_expected_withdrawals = List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD](withdrawals)
|
||||
func updatePayloadExpectedWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal) error {
|
||||
return st.SetPayloadExpectedWithdrawals(withdrawals)
|
||||
}
|
||||
|
||||
// updateBuilderPendingWithdrawals removes processed builder pending withdrawals.
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// def update_builder_pending_withdrawals(state, processed_builder_withdrawals_count):
|
||||
//
|
||||
// state.builder_pending_withdrawals = state.builder_pending_withdrawals[processed_builder_withdrawals_count:]
|
||||
func updateBuilderPendingWithdrawals(st state.BeaconState, processedBuilderWithdrawalsCount uint64) error {
|
||||
if processedBuilderWithdrawalsCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return st.DequeueBuilderPendingWithdrawals(processedBuilderWithdrawalsCount)
|
||||
}
|
||||
|
||||
// updateNextWithdrawalBuilderIndex advances the sweep position for builders.
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// def update_next_withdrawal_builder_index(state, processed_builders_sweep_count):
|
||||
//
|
||||
// if len(state.builders) > 0:
|
||||
// next_index = state.next_withdrawal_builder_index + processed_builders_sweep_count
|
||||
// next_builder_index = BuilderIndex(next_index % len(state.builders))
|
||||
// state.next_withdrawal_builder_index = next_builder_index
|
||||
func updateNextWithdrawalBuilderIndex(st state.BeaconState, processedBuildersSweepCount uint64) error {
|
||||
buildersCount := st.BuildersCount()
|
||||
if buildersCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
nextIndex, err := st.NextWithdrawalBuilderIndex()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nextBuilderIndex := primitives.BuilderIndex((uint64(nextIndex) + processedBuildersSweepCount) % uint64(buildersCount))
|
||||
return st.SetNextWithdrawalBuilderIndex(nextBuilderIndex)
|
||||
}
|
||||
@@ -3,6 +3,8 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"electra.go",
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"skip_slot_cache.go",
|
||||
"state.go",
|
||||
@@ -62,6 +64,8 @@ go_test(
|
||||
"altair_transition_no_verify_sig_test.go",
|
||||
"bellatrix_transition_no_verify_sig_test.go",
|
||||
"benchmarks_test.go",
|
||||
"electra_test.go",
|
||||
"exports_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"state_fuzz_test.go",
|
||||
"state_test.go",
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package electra
|
||||
package transition
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
@@ -47,7 +48,7 @@ var (
|
||||
// # [New in Electra:EIP7251]
|
||||
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||
|
||||
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
func electraOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
@@ -63,59 +64,60 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
st, err = blocks.ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
st, err = blocks.ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
st, err = electra.ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
}
|
||||
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
if _, err := electra.ProcessDeposits(ctx, st, bb.Deposits()); err != nil {
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
st, err = blocks.ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessBLSToExecutionChanges(st, block)
|
||||
st, err = blocks.ProcessBLSToExecutionChanges(st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process bls-to-execution changes")
|
||||
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
|
||||
}
|
||||
// new in electra
|
||||
requests, err := bb.ExecutionRequests()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution requests")
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not get execution requests").Error())
|
||||
}
|
||||
for _, d := range requests.Deposits {
|
||||
if d == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
return nil, electra.NewExecReqError("nil deposit request")
|
||||
}
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
st, err = electra.ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process deposit requests")}
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process deposit requests").Error())
|
||||
}
|
||||
|
||||
for _, w := range requests.Withdrawals {
|
||||
if w == nil {
|
||||
return nil, errors.New("nil withdrawal request")
|
||||
return nil, electra.NewExecReqError("nil withdrawal request")
|
||||
}
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
st, err = electra.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process withdrawal requests")}
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process withdrawal requests").Error())
|
||||
}
|
||||
for _, c := range requests.Consolidations {
|
||||
if c == nil {
|
||||
return nil, errors.New("nil consolidation request")
|
||||
return nil, electra.NewExecReqError("nil consolidation request")
|
||||
}
|
||||
}
|
||||
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process consolidation requests")}
|
||||
if err := electra.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process consolidation requests").Error())
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
216
beacon-chain/core/transition/electra_test.go
Normal file
216
beacon-chain/core/transition/electra_test.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessOperationsWithNilRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "Nil deposit request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
|
||||
},
|
||||
errMsg: "nil deposit request",
|
||||
},
|
||||
{
|
||||
name: "Nil withdrawal request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
|
||||
},
|
||||
errMsg: "nil withdrawal request",
|
||||
},
|
||||
{
|
||||
name: "Nil consolidation request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
|
||||
},
|
||||
errMsg: "nil consolidation request",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
|
||||
_, err = transition.ElectraOperations(t.Context(), st, b.Block())
|
||||
require.ErrorContains(t, tc.errMsg, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestElectraOperations_ProcessingErrors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blk *ethpb.SignedBeaconBlockElectra)
|
||||
errCheck func(t *testing.T, err error)
|
||||
}{
|
||||
{
|
||||
name: "ErrProcessProposerSlashingsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create invalid proposer slashing with out-of-bounds proposer index
|
||||
blk.Block.Body.ProposerSlashings = []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 999999, // Invalid index (out of bounds)
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 999999,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process proposer slashings failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessProposerSlashingsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessAttestationsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create attestation with invalid committee index
|
||||
blk.Block.Body.Attestations = []*ethpb.AttestationElectra{
|
||||
{
|
||||
AggregationBits: []byte{0b00000001},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 999999, // Invalid committee index
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
CommitteeBits: []byte{0b00000001},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process attestations failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessAttestationsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessDepositsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create deposit with invalid proof length
|
||||
blk.Block.Body.Deposits = []*ethpb.Deposit{
|
||||
{
|
||||
Proof: [][]byte{}, // Invalid: empty proof
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 32000000000, // 32 ETH in Gwei
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process deposits failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessDepositsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessVoluntaryExitsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create voluntary exit with invalid validator index
|
||||
blk.Block.Body.VoluntaryExits = []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 0,
|
||||
ValidatorIndex: 999999, // Invalid index (out of bounds)
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process voluntary exits failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessVoluntaryExitsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessBLSChangesFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create BLS to execution change with invalid validator index
|
||||
blk.Block.Body.BlsToExecutionChanges = []*ethpb.SignedBLSToExecutionChange{
|
||||
{
|
||||
Message: ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: 999999, // Invalid index (out of bounds)
|
||||
FromBlsPubkey: make([]byte, 48),
|
||||
ToExecutionAddress: make([]byte, 20),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process BLS to execution changes failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessBLSChangesFailed))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(1)))
|
||||
|
||||
_, err = transition.ElectraOperations(ctx, st, b.Block())
|
||||
require.NotNil(t, err, "Expected an error but got nil")
|
||||
tc.errCheck(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
19
beacon-chain/core/transition/errors.go
Normal file
19
beacon-chain/core/transition/errors.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package transition
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrAttestationsSignatureInvalid = errors.New("attestations signature invalid")
|
||||
ErrRandaoSignatureInvalid = errors.New("randao signature invalid")
|
||||
ErrBLSToExecutionChangesSignatureInvalid = errors.New("BLS to execution changes signature invalid")
|
||||
ErrProcessWithdrawalsFailed = errors.New("process withdrawals failed")
|
||||
ErrProcessRandaoFailed = errors.New("process randao failed")
|
||||
ErrProcessEth1DataFailed = errors.New("process eth1 data failed")
|
||||
ErrProcessProposerSlashingsFailed = errors.New("process proposer slashings failed")
|
||||
ErrProcessAttesterSlashingsFailed = errors.New("process attester slashings failed")
|
||||
ErrProcessAttestationsFailed = errors.New("process attestations failed")
|
||||
ErrProcessDepositsFailed = errors.New("process deposits failed")
|
||||
ErrProcessVoluntaryExitsFailed = errors.New("process voluntary exits failed")
|
||||
ErrProcessBLSChangesFailed = errors.New("process BLS to execution changes failed")
|
||||
ErrProcessSyncAggregateFailed = errors.New("process sync aggregate failed")
|
||||
)
|
||||
3
beacon-chain/core/transition/exports_test.go
Normal file
3
beacon-chain/core/transition/exports_test.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package transition
|
||||
|
||||
var ElectraOperations = electraOperations
|
||||
@@ -7,12 +7,11 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
|
||||
b "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
@@ -70,10 +69,11 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
}
|
||||
|
||||
// Execute per block transition.
|
||||
set, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
sigSlice, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
set := sigSlice.Batch()
|
||||
|
||||
// State root validation.
|
||||
postStateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -113,7 +113,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
// assert block.state_root == hash_tree_root(state)
|
||||
func CalculateStateRoot(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
rollback state.BeaconState,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.CalculateStateRoot")
|
||||
@@ -122,7 +122,7 @@ func CalculateStateRoot(
|
||||
tracing.AnnotateError(span, ctx.Err())
|
||||
return [32]byte{}, ctx.Err()
|
||||
}
|
||||
if state == nil || state.IsNil() {
|
||||
if rollback == nil || rollback.IsNil() {
|
||||
return [32]byte{}, errors.New("nil state")
|
||||
}
|
||||
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
|
||||
@@ -130,7 +130,7 @@ func CalculateStateRoot(
|
||||
}
|
||||
|
||||
// Copy state to avoid mutating the state reference.
|
||||
state = state.Copy()
|
||||
state := rollback.Copy()
|
||||
|
||||
// Execute per slots transition.
|
||||
var err error
|
||||
@@ -141,12 +141,101 @@ func CalculateStateRoot(
|
||||
}
|
||||
|
||||
// Execute per block transition.
|
||||
state, err = ProcessBlockForStateRoot(ctx, state, signed)
|
||||
if features.Get().EnableProposerPreprocessing {
|
||||
state, err = processBlockForProposing(ctx, state, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block for proposing")
|
||||
}
|
||||
} else {
|
||||
state, err = ProcessBlockForStateRoot(ctx, state, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
}
|
||||
return state.HashTreeRoot(ctx)
|
||||
}
|
||||
|
||||
// processBlockVerifySigs processes the block and verifies the signatures within it. Block signatures are not verified as this block is not yet signed.
|
||||
func processBlockForProposing(ctx context.Context, st state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
var set BlockSignatureBatches
|
||||
set, st, err = ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block")
|
||||
return nil, err
|
||||
}
|
||||
// We first try to verify all sigantures batched optimistically. We ignore block proposer signature.
|
||||
sigSet := set.Batch()
|
||||
valid, err := sigSet.Verify()
|
||||
if err != nil || valid {
|
||||
return st, err
|
||||
}
|
||||
// Some signature failed to verify.
|
||||
// Verify Attestations signatures
|
||||
attSigs := set.AttestationSignatures
|
||||
if attSigs == nil {
|
||||
return nil, ErrAttestationsSignatureInvalid
|
||||
}
|
||||
valid, err = attSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrAttestationsSignatureInvalid
|
||||
}
|
||||
|
||||
return state.HashTreeRoot(ctx)
|
||||
// Verify Randao signature
|
||||
randaoSigs := set.RandaoSignatures
|
||||
if randaoSigs == nil {
|
||||
return nil, ErrRandaoSignatureInvalid
|
||||
}
|
||||
valid, err = randaoSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrRandaoSignatureInvalid
|
||||
}
|
||||
|
||||
if signed.Block().Version() < version.Capella {
|
||||
//This should not happen as we must have failed one of the above signatures.
|
||||
return st, nil
|
||||
}
|
||||
// Verify BLS to execution changes signatures
|
||||
blsChangeSigs := set.BLSChangeSignatures
|
||||
if blsChangeSigs == nil {
|
||||
return nil, ErrBLSToExecutionChangesSignatureInvalid
|
||||
}
|
||||
valid, err = blsChangeSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrBLSToExecutionChangesSignatureInvalid
|
||||
}
|
||||
// We should not reach this point as one of the above signatures must have failed.
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatches holds the signature batches for different parts of a beacon block.
|
||||
type BlockSignatureBatches struct {
|
||||
RandaoSignatures *bls.SignatureBatch
|
||||
AttestationSignatures *bls.SignatureBatch
|
||||
BLSChangeSignatures *bls.SignatureBatch
|
||||
}
|
||||
|
||||
// Batch returns the batch of signature batches in the BlockSignatureBatches.
|
||||
func (b BlockSignatureBatches) Batch() *bls.SignatureBatch {
|
||||
sigs := bls.NewSet()
|
||||
if b.RandaoSignatures != nil {
|
||||
sigs.Join(b.RandaoSignatures)
|
||||
}
|
||||
if b.AttestationSignatures != nil {
|
||||
sigs.Join(b.AttestationSignatures)
|
||||
}
|
||||
if b.BLSChangeSignatures != nil {
|
||||
sigs.Join(b.BLSChangeSignatures)
|
||||
}
|
||||
return sigs
|
||||
}
|
||||
|
||||
// ProcessBlockNoVerifyAnySig creates a new, modified beacon state by applying block operation
|
||||
@@ -165,48 +254,48 @@ func ProcessBlockNoVerifyAnySig(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (*bls.SignatureBatch, state.BeaconState, error) {
|
||||
) (BlockSignatureBatches, state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
|
||||
defer span.End()
|
||||
set := BlockSignatureBatches{}
|
||||
if err := blocks.BeaconBlockIsNil(signed); err != nil {
|
||||
return nil, nil, err
|
||||
return set, nil, err
|
||||
}
|
||||
|
||||
if st.Version() != signed.Block().Version() {
|
||||
return nil, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
|
||||
return set, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
|
||||
}
|
||||
|
||||
blk := signed.Block()
|
||||
st, err := ProcessBlockForStateRoot(ctx, st, signed)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return set, nil, err
|
||||
}
|
||||
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
return set, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
}
|
||||
set.RandaoSignatures = rSet
|
||||
aSet, err := b.AttestationSignatureBatch(ctx, st, signed.Block().Body().Attestations())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
return set, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
}
|
||||
set.AttestationSignatures = aSet
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
cSet, err := b.BLSChangesSignatureBatch(st, changes)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
|
||||
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
|
||||
}
|
||||
set.Join(cSet)
|
||||
set.BLSChangeSignatures = cSet
|
||||
}
|
||||
return set, st, nil
|
||||
}
|
||||
@@ -268,7 +357,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
state, err = electra.ProcessOperations(ctx, state, beaconBlock)
|
||||
state, err = electraOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -326,7 +415,7 @@ func ProcessBlockForStateRoot(
|
||||
if state.Version() >= version.Capella {
|
||||
state, err = b.ProcessWithdrawals(state, executionData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
return nil, errors.Wrap(ErrProcessWithdrawalsFailed, err.Error())
|
||||
}
|
||||
}
|
||||
if err = b.ProcessPayload(state, blk.Body()); err != nil {
|
||||
@@ -338,13 +427,13 @@ func ProcessBlockForStateRoot(
|
||||
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not verify and process randao")
|
||||
return nil, errors.Wrap(ErrProcessRandaoFailed, err.Error())
|
||||
}
|
||||
|
||||
state, err = b.ProcessEth1DataInBlock(ctx, state, signed.Block().Body().Eth1Data())
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process eth1 data")
|
||||
return nil, errors.Wrap(ErrProcessEth1DataFailed, err.Error())
|
||||
}
|
||||
|
||||
state, err = ProcessOperationsNoVerifyAttsSigs(ctx, state, signed.Block())
|
||||
@@ -363,7 +452,7 @@ func ProcessBlockForStateRoot(
|
||||
}
|
||||
state, _, err = altair.ProcessSyncAggregate(ctx, state, sa)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "process_sync_aggregate failed")
|
||||
return nil, errors.Wrap(ErrProcessSyncAggregateFailed, err.Error())
|
||||
}
|
||||
|
||||
return state, nil
|
||||
@@ -379,31 +468,35 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
exitInfo := &validators.ExitInfo{}
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = altair.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
}
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
}
|
||||
return b.ProcessBLSToExecutionChanges(st, beaconBlock)
|
||||
st, err = b.ProcessBLSToExecutionChanges(st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// This calls phase 0 block operations.
|
||||
@@ -411,32 +504,32 @@ func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
var err error
|
||||
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
|
||||
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
|
||||
var exitInfo *v.ExitInfo
|
||||
var exitInfo *validators.ExitInfo
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attestations")
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
}
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposits")
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -132,7 +132,8 @@ func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
// Test Signature set verifies.
|
||||
verified, err := set.Verify()
|
||||
sigSet := set.Batch()
|
||||
verified, err := sigSet.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Could not verify signature set.")
|
||||
}
|
||||
@@ -145,7 +146,8 @@ func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
sigSet := set.Batch()
|
||||
verified, err := sigSet.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, "Could not verify signature set")
|
||||
}
|
||||
@@ -154,8 +156,9 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
beaconState, block, _, _, _ := createFullBlockWithOperations(t)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
signatures, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
set := signatures.Batch()
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
|
||||
@@ -75,7 +75,6 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
timeForGoroutinesToFinish := 20 * time.Microsecond
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
@@ -87,8 +86,9 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
require.Eventually(t, func() bool {
|
||||
return p2p.BroadcastCalled.Load()
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
@@ -102,7 +102,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -117,8 +117,9 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
require.Eventually(t, func() bool {
|
||||
return p2p.BroadcastCalled.Load()
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
@@ -132,7 +133,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
@@ -146,8 +147,9 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
require.Eventually(t, func() bool {
|
||||
return p2p.BroadcastCalled.Load()
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
@@ -161,7 +163,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
@@ -175,7 +177,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
|
||||
@@ -72,7 +72,10 @@ func TestService_Broadcast(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -186,7 +189,10 @@ func TestService_BroadcastAttestation(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -375,7 +381,15 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
_, err = tpHandle.Subscribe()
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(500 * time.Millisecond) // libp2p fails without this delay...
|
||||
// This test specifically tests discovery-based peer finding, which requires
|
||||
// time for nodes to discover each other. Using a fixed sleep here is intentional
|
||||
// as we're testing the discovery timing behavior.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// Verify mesh establishment after discovery
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0 && len(p2.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
nodePeers := p.pubsub.ListPeers(topic)
|
||||
nodePeers2 := p2.pubsub.ListPeers(topic)
|
||||
@@ -444,7 +458,10 @@ func TestService_BroadcastSyncCommittee(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -521,7 +538,10 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -584,7 +604,10 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -660,7 +683,10 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -771,8 +797,10 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
// libp2p fails without this delay
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(service.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
|
||||
|
||||
@@ -482,12 +482,12 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) {
|
||||
s.Start()
|
||||
<-exitRoutine
|
||||
}()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(50 * time.Millisecond) // Wait for service initialization
|
||||
var vr [32]byte
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
time.Sleep(4 * time.Second)
|
||||
ps := s.host.Network().Peers()
|
||||
assert.Equal(t, 5, len(ps), "Not all peers added to peerstore")
|
||||
require.Eventually(t, func() bool {
|
||||
return len(s.host.Network().Peers()) == 5
|
||||
}, 10*time.Second, 100*time.Millisecond, "Not all peers added to peerstore")
|
||||
require.NoError(t, s.Stop())
|
||||
exitRoutine <- true
|
||||
}
|
||||
|
||||
@@ -80,8 +80,9 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
}()
|
||||
var vr [32]byte
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
time.Sleep(time.Second * 2)
|
||||
assert.Equal(t, true, s.started, "Expected service to be started")
|
||||
require.Eventually(t, func() bool {
|
||||
return s.started
|
||||
}, 5*time.Second, 100*time.Millisecond, "Expected service to be started")
|
||||
s.Start()
|
||||
require.LogsContain(t, hook, "Attempted to start p2p service when it was already started")
|
||||
require.NoError(t, s.Stop())
|
||||
@@ -260,17 +261,9 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
err = cs.SetClock(startup.NewClock(genesisTime, gvr))
|
||||
require.NoError(t, err, "Could not set clock in service")
|
||||
|
||||
actualPeerCount := len(s.host.Network().Peers())
|
||||
for range 40 {
|
||||
if actualPeerCount == peerCount {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
actualPeerCount = len(s.host.Network().Peers())
|
||||
}
|
||||
|
||||
assert.Equal(t, peerCount, actualPeerCount, "Not all peers added to peerstore")
|
||||
require.Eventually(t, func() bool {
|
||||
return len(s.host.Network().Peers()) == peerCount
|
||||
}, 5*time.Second, 100*time.Millisecond, "Not all peers added to peerstore")
|
||||
|
||||
err = s.Stop()
|
||||
require.NoError(t, err, "Failed to stop service")
|
||||
|
||||
@@ -657,8 +657,9 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
require.Eventually(t, func() bool {
|
||||
return s.AttestationsPool.UnaggregatedAttestationCount() == 1
|
||||
}, time.Second, 10*time.Millisecond, "Expected 1 attestation in pool")
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
@@ -677,8 +678,9 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
require.Eventually(t, func() bool {
|
||||
return s.AttestationsPool.UnaggregatedAttestationCount() == 2
|
||||
}, time.Second, 10*time.Millisecond, "Expected 2 attestations in pool")
|
||||
})
|
||||
t.Run("phase0 att post electra", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
@@ -798,8 +800,9 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
require.Eventually(t, func() bool {
|
||||
return s.AttestationsPool.UnaggregatedAttestationCount() == 1
|
||||
}, time.Second, 10*time.Millisecond, "Expected 1 attestation in pool")
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
@@ -818,8 +821,9 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
require.Eventually(t, func() bool {
|
||||
return s.AttestationsPool.UnaggregatedAttestationCount() == 2
|
||||
}, time.Second, 10*time.Millisecond, "Expected 2 attestations in pool")
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
@@ -1375,9 +1379,9 @@ func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.SubmitBLSToExecutionChanges(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
time.Sleep(100 * time.Millisecond) // Delay to let the routine start
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, numValidators, len(broadcaster.BroadcastMessages))
|
||||
require.Eventually(t, func() bool {
|
||||
return broadcaster.BroadcastCalled.Load() && len(broadcaster.BroadcastMessages) == numValidators
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should be called with all messages")
|
||||
|
||||
poolChanges, err := s.BLSChangesPool.PendingBLSToExecChanges()
|
||||
require.Equal(t, len(poolChanges), len(signedChanges))
|
||||
@@ -1591,10 +1595,10 @@ func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
|
||||
|
||||
s.SubmitBLSToExecutionChanges(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
time.Sleep(10 * time.Millisecond) // Delay to allow the routine to start
|
||||
require.StringContains(t, "One or more messages failed validation", writer.Body.String())
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, numValidators, len(broadcaster.BroadcastMessages)+1)
|
||||
require.Eventually(t, func() bool {
|
||||
return broadcaster.BroadcastCalled.Load() && len(broadcaster.BroadcastMessages)+1 == numValidators
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should be called with expected messages")
|
||||
|
||||
poolChanges, err := s.BLSChangesPool.PendingBLSToExecChanges()
|
||||
require.Equal(t, len(poolChanges)+1, len(signedChanges))
|
||||
|
||||
@@ -602,7 +602,7 @@ func (vs *Server) GetFeeRecipientByPubKey(ctx context.Context, request *ethpb.Fe
|
||||
|
||||
// computeStateRoot computes the state root after a block has been processed through a state transition and
|
||||
// returns it to the validator client.
|
||||
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedBeaconBlock) ([]byte, error) {
|
||||
beaconState, err := vs.StateGen.StateByRoot(ctx, block.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve beacon state")
|
||||
@@ -613,13 +613,72 @@ func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.ReadOnl
|
||||
block,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not calculate state root at slot %d", beaconState.Slot())
|
||||
return vs.handleStateRootError(ctx, block, err)
|
||||
}
|
||||
|
||||
log.WithField("beaconStateRoot", fmt.Sprintf("%#x", root)).Debugf("Computed state root")
|
||||
return root[:], nil
|
||||
}
|
||||
|
||||
type computeStateRootAttemptsKeyType string
|
||||
|
||||
const computeStateRootAttemptsKey = computeStateRootAttemptsKeyType("compute-state-root-attempts")
|
||||
const maxComputeStateRootAttempts = 3
|
||||
|
||||
// handleStateRootError retries block construction in some error cases.
|
||||
func (vs *Server) handleStateRootError(ctx context.Context, block interfaces.SignedBeaconBlock, err error) ([]byte, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, status.Errorf(codes.Canceled, "context error: %v", ctx.Err())
|
||||
}
|
||||
switch {
|
||||
case errors.Is(err, transition.ErrAttestationsSignatureInvalid),
|
||||
errors.Is(err, transition.ErrProcessAttestationsFailed):
|
||||
log.WithError(err).Warn("Retrying block construction without attestations")
|
||||
if err := block.SetAttestations([]ethpb.Att{}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set attestations")
|
||||
}
|
||||
case errors.Is(err, transition.ErrProcessBLSChangesFailed), errors.Is(err, transition.ErrBLSToExecutionChangesSignatureInvalid):
|
||||
log.WithError(err).Warn("Retrying block construction without BLS to execution changes")
|
||||
if err := block.SetBLSToExecutionChanges([]*ethpb.SignedBLSToExecutionChange{}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set BLS to execution changes")
|
||||
}
|
||||
case errors.Is(err, transition.ErrProcessProposerSlashingsFailed):
|
||||
log.WithError(err).Warn("Retrying block construction without proposer slashings")
|
||||
block.SetProposerSlashings([]*ethpb.ProposerSlashing{})
|
||||
case errors.Is(err, transition.ErrProcessAttesterSlashingsFailed):
|
||||
log.WithError(err).Warn("Retrying block construction without attester slashings")
|
||||
if err := block.SetAttesterSlashings([]ethpb.AttSlashing{}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set attester slashings")
|
||||
}
|
||||
case errors.Is(err, transition.ErrProcessVoluntaryExitsFailed):
|
||||
log.WithError(err).Warn("Retrying block construction without voluntary exits")
|
||||
block.SetVoluntaryExits([]*ethpb.SignedVoluntaryExit{})
|
||||
case errors.Is(err, transition.ErrProcessSyncAggregateFailed):
|
||||
log.WithError(err).Warn("Retrying block construction without sync aggregate")
|
||||
emptySig := [96]byte{0xC0}
|
||||
emptyAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize/8),
|
||||
SyncCommitteeSignature: emptySig[:],
|
||||
}
|
||||
if err := block.SetSyncAggregate(emptyAggregate); err != nil {
|
||||
log.WithError(err).Error("Could not set sync aggregate")
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not compute state root")
|
||||
}
|
||||
// prevent deep recursion by limiting max attempts.
|
||||
if v, ok := ctx.Value(computeStateRootAttemptsKey).(int); !ok {
|
||||
ctx = context.WithValue(ctx, computeStateRootAttemptsKey, int(1))
|
||||
} else if v >= maxComputeStateRootAttempts {
|
||||
return nil, fmt.Errorf("attempted max compute state root attempts %d", maxComputeStateRootAttempts)
|
||||
} else {
|
||||
ctx = context.WithValue(ctx, computeStateRootAttemptsKey, v+1)
|
||||
}
|
||||
// recursive call to compute state root again
|
||||
return vs.computeStateRoot(ctx, block)
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// SubmitValidatorRegistrations submits validator registrations.
|
||||
|
||||
@@ -1313,6 +1313,59 @@ func TestProposer_ComputeStateRoot_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestHandleStateRootError_MaxAttemptsReached(t *testing.T) {
|
||||
// Test that handleStateRootError returns an error when max attempts is reached
|
||||
// instead of recursing infinitely.
|
||||
ctx := t.Context()
|
||||
vs := &Server{}
|
||||
|
||||
// Create a minimal block for testing
|
||||
blk := util.NewBeaconBlock()
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Pre-seed the context with max attempts already reached
|
||||
ctx = context.WithValue(ctx, computeStateRootAttemptsKey, maxComputeStateRootAttempts)
|
||||
|
||||
// Call handleStateRootError with a retryable error
|
||||
_, err = vs.handleStateRootError(ctx, wsb, transition.ErrAttestationsSignatureInvalid)
|
||||
|
||||
// Should return an error about max attempts instead of recursing
|
||||
require.ErrorContains(t, "attempted max compute state root attempts", err)
|
||||
}
|
||||
|
||||
func TestHandleStateRootError_IncrementsAttempts(t *testing.T) {
|
||||
// Test that handleStateRootError properly increments the attempts counter
|
||||
// and eventually fails after max attempts.
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
beaconState, parentRoot, _ := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 100)
|
||||
|
||||
stateGen := stategen.New(db, doublylinkedtree.New())
|
||||
vs := &Server{
|
||||
StateGen: stateGen,
|
||||
}
|
||||
|
||||
// Create a block that will trigger retries
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
blk.Block.Slot = 1
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add a state for the parent root so StateByRoot succeeds
|
||||
require.NoError(t, stateGen.SaveState(ctx, parentRoot, beaconState))
|
||||
|
||||
// Call handleStateRootError with a retryable error - it will recurse
|
||||
// but eventually hit the max attempts limit since CalculateStateRoot
|
||||
// will keep failing (no valid attestations, randao, etc.)
|
||||
_, err = vs.handleStateRootError(ctx, wsb, transition.ErrAttestationsSignatureInvalid)
|
||||
|
||||
// Should eventually fail - either with max attempts or another error
|
||||
require.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestProposer_PendingDeposits_Eth1DataVoteOK(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
|
||||
@@ -5,13 +5,13 @@ go_library(
|
||||
srcs = [
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
"interfaces_gloas.go",
|
||||
"prometheus.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/state",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/state/state-native/custom-types:go_default_library",
|
||||
"//beacon-chain/state/state-native/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
customtypes "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/custom-types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -43,6 +44,8 @@ type Prover interface {
|
||||
FinalizedRootProof(ctx context.Context) ([][]byte, error)
|
||||
CurrentSyncCommitteeProof(ctx context.Context) ([][]byte, error)
|
||||
NextSyncCommitteeProof(ctx context.Context) ([][]byte, error)
|
||||
|
||||
ProofByFieldIndex(ctx context.Context, f types.FieldIndex) ([][]byte, error)
|
||||
}
|
||||
|
||||
// ReadOnlyBeaconState defines a struct which only has read access to beacon state methods.
|
||||
@@ -57,7 +60,6 @@ type ReadOnlyBeaconState interface {
|
||||
ReadOnlyCheckpoint
|
||||
ReadOnlyAttestations
|
||||
ReadOnlyWithdrawals
|
||||
ReadOnlyGloas
|
||||
ReadOnlyParticipation
|
||||
ReadOnlyInactivity
|
||||
ReadOnlySyncCommittee
|
||||
@@ -99,7 +101,6 @@ type WriteOnlyBeaconState interface {
|
||||
WriteOnlyWithdrawals
|
||||
WriteOnlyDeposits
|
||||
WriteOnlyProposerLookahead
|
||||
WriteOnlyGloas
|
||||
SetGenesisTime(val time.Time) error
|
||||
SetGenesisValidatorsRoot(val []byte) error
|
||||
SetSlot(val primitives.Slot) error
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// Builder represents a builder registry entry.
|
||||
type Builder struct {
|
||||
ExecutionAddress []byte
|
||||
Balance uint64
|
||||
DepositEpoch primitives.Epoch
|
||||
WithdrawableEpoch primitives.Epoch
|
||||
}
|
||||
|
||||
// ReadOnlyGloas defines read access to Gloas-specific state fields.
|
||||
type ReadOnlyGloas interface {
|
||||
IsParentBlockFull() bool
|
||||
BuildersCount() int
|
||||
Builders() ([]*Builder, error)
|
||||
NextWithdrawalBuilderIndex() (primitives.BuilderIndex, error)
|
||||
BuilderPendingWithdrawals() ([]*ethpb.BuilderPendingWithdrawal, error)
|
||||
}
|
||||
|
||||
// WriteOnlyGloas defines write access to Gloas-specific state fields.
|
||||
type WriteOnlyGloas interface {
|
||||
SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error
|
||||
DequeueBuilderPendingWithdrawals(num uint64) error
|
||||
SetNextWithdrawalBuilderIndex(idx primitives.BuilderIndex) error
|
||||
}
|
||||
@@ -14,7 +14,6 @@ go_library(
|
||||
"getters_deposits.go",
|
||||
"getters_eth1.go",
|
||||
"getters_exit.go",
|
||||
"getters_gloas.go",
|
||||
"getters_misc.go",
|
||||
"getters_participation.go",
|
||||
"getters_payload_header.go",
|
||||
@@ -42,7 +41,6 @@ go_library(
|
||||
"setters_payload_header.go",
|
||||
"setters_proposer_lookahead.go",
|
||||
"setters_randao.go",
|
||||
"setters_gloas.go",
|
||||
"setters_state.go",
|
||||
"setters_sync_committee.go",
|
||||
"setters_validator.go",
|
||||
@@ -104,6 +102,7 @@ go_test(
|
||||
"getters_test.go",
|
||||
"getters_validator_test.go",
|
||||
"getters_withdrawal_test.go",
|
||||
"gloas_test.go",
|
||||
"hasher_test.go",
|
||||
"mvslice_fuzz_test.go",
|
||||
"proofs_test.go",
|
||||
@@ -158,6 +157,7 @@ go_test(
|
||||
"@com_github_google_go_cmp//cmp:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//testing/protocmp:go_default_library",
|
||||
],
|
||||
|
||||
@@ -72,11 +72,13 @@ type BeaconState struct {
|
||||
|
||||
// Gloas fields
|
||||
latestExecutionPayloadBid *ethpb.ExecutionPayloadBid
|
||||
builders []*ethpb.Builder
|
||||
nextWithdrawalBuilderIndex primitives.BuilderIndex
|
||||
executionPayloadAvailability []byte
|
||||
builderPendingPayments []*ethpb.BuilderPendingPayment
|
||||
builderPendingWithdrawals []*ethpb.BuilderPendingWithdrawal
|
||||
latestBlockHash []byte
|
||||
latestWithdrawalsRoot []byte
|
||||
payloadExpectedWithdrawals []*enginev1.Withdrawal
|
||||
|
||||
id uint64
|
||||
lock sync.RWMutex
|
||||
@@ -134,11 +136,13 @@ type beaconStateMarshalable struct {
|
||||
PendingConsolidations []*ethpb.PendingConsolidation `json:"pending_consolidations" yaml:"pending_consolidations"`
|
||||
ProposerLookahead []primitives.ValidatorIndex `json:"proposer_look_ahead" yaml:"proposer_look_ahead"`
|
||||
LatestExecutionPayloadBid *ethpb.ExecutionPayloadBid `json:"latest_execution_payload_bid" yaml:"latest_execution_payload_bid"`
|
||||
Builders []*ethpb.Builder `json:"builders" yaml:"builders"`
|
||||
NextWithdrawalBuilderIndex primitives.BuilderIndex `json:"next_withdrawal_builder_index" yaml:"next_withdrawal_builder_index"`
|
||||
ExecutionPayloadAvailability []byte `json:"execution_payload_availability" yaml:"execution_payload_availability"`
|
||||
BuilderPendingPayments []*ethpb.BuilderPendingPayment `json:"builder_pending_payments" yaml:"builder_pending_payments"`
|
||||
BuilderPendingWithdrawals []*ethpb.BuilderPendingWithdrawal `json:"builder_pending_withdrawals" yaml:"builder_pending_withdrawals"`
|
||||
LatestBlockHash []byte `json:"latest_block_hash" yaml:"latest_block_hash"`
|
||||
LatestWithdrawalsRoot []byte `json:"latest_withdrawals_root" yaml:"latest_withdrawals_root"`
|
||||
PayloadExpectedWithdrawals []*enginev1.Withdrawal `json:"payload_expected_withdrawals" yaml:"payload_expected_withdrawals"`
|
||||
}
|
||||
|
||||
func (b *BeaconState) MarshalJSON() ([]byte, error) {
|
||||
@@ -194,11 +198,13 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) {
|
||||
PendingConsolidations: b.pendingConsolidations,
|
||||
ProposerLookahead: b.proposerLookahead,
|
||||
LatestExecutionPayloadBid: b.latestExecutionPayloadBid,
|
||||
Builders: b.builders,
|
||||
NextWithdrawalBuilderIndex: b.nextWithdrawalBuilderIndex,
|
||||
ExecutionPayloadAvailability: b.executionPayloadAvailability,
|
||||
BuilderPendingPayments: b.builderPendingPayments,
|
||||
BuilderPendingWithdrawals: b.builderPendingWithdrawals,
|
||||
LatestBlockHash: b.latestBlockHash,
|
||||
LatestWithdrawalsRoot: b.latestWithdrawalsRoot,
|
||||
PayloadExpectedWithdrawals: b.payloadExpectedWithdrawals,
|
||||
}
|
||||
return json.Marshal(marshalable)
|
||||
}
|
||||
|
||||
@@ -56,9 +56,7 @@ func (r StateRoots) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
func (r StateRoots) MarshalSSZ() ([]byte, error) {
|
||||
marshalled := make([]byte, fieldparams.StateRootsLength*32)
|
||||
for i, r32 := range r {
|
||||
for j, rr := range r32 {
|
||||
marshalled[i*32+j] = rr
|
||||
}
|
||||
copy(marshalled[i*32:(i+1)*32], r32[:])
|
||||
}
|
||||
return marshalled, nil
|
||||
}
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
state "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
)
|
||||
|
||||
// IsParentBlockFull returns true when the latest bid was fulfilled with a payload.
|
||||
func (b *BeaconState) IsParentBlockFull() bool {
|
||||
if b.version < version.Gloas {
|
||||
return false
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
if b.latestExecutionPayloadBid == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(b.latestExecutionPayloadBid.BlockHash, b.latestBlockHash)
|
||||
}
|
||||
|
||||
// BuilderPendingWithdrawals returns the builder pending withdrawals queue.
|
||||
func (b *BeaconState) BuilderPendingWithdrawals() ([]*ethpb.BuilderPendingWithdrawal, error) {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.builderPendingWithdrawalsVal(), nil
|
||||
}
|
||||
|
||||
// BuildersCount returns the number of builders in the registry.
|
||||
func (b *BeaconState) BuildersCount() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Builders returns the builders registry.
|
||||
func (b *BeaconState) Builders() ([]*state.Builder, error) {
|
||||
if b.version < version.Gloas {
|
||||
return nil, errNotSupported("Builders", b.version)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NextWithdrawalBuilderIndex returns the next builder index for the withdrawals sweep.
|
||||
func (b *BeaconState) NextWithdrawalBuilderIndex() (primitives.BuilderIndex, error) {
|
||||
if b.version < version.Gloas {
|
||||
return 0, errNotSupported("NextWithdrawalBuilderIndex", b.version)
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
@@ -305,10 +305,12 @@ func (b *BeaconState) ToProtoUnsafe() any {
|
||||
PendingConsolidations: b.pendingConsolidations,
|
||||
ProposerLookahead: lookahead,
|
||||
ExecutionPayloadAvailability: b.executionPayloadAvailability,
|
||||
Builders: b.builders,
|
||||
NextWithdrawalBuilderIndex: b.nextWithdrawalBuilderIndex,
|
||||
BuilderPendingPayments: b.builderPendingPayments,
|
||||
BuilderPendingWithdrawals: b.builderPendingWithdrawals,
|
||||
LatestBlockHash: b.latestBlockHash,
|
||||
LatestWithdrawalsRoot: b.latestWithdrawalsRoot,
|
||||
PayloadExpectedWithdrawals: b.payloadExpectedWithdrawals,
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
@@ -607,10 +609,12 @@ func (b *BeaconState) ToProto() any {
|
||||
PendingConsolidations: b.pendingConsolidationsVal(),
|
||||
ProposerLookahead: lookahead,
|
||||
ExecutionPayloadAvailability: b.executionPayloadAvailabilityVal(),
|
||||
Builders: b.buildersVal(),
|
||||
NextWithdrawalBuilderIndex: b.nextWithdrawalBuilderIndex,
|
||||
BuilderPendingPayments: b.builderPendingPaymentsVal(),
|
||||
BuilderPendingWithdrawals: b.builderPendingWithdrawalsVal(),
|
||||
LatestBlockHash: b.latestBlockHashVal(),
|
||||
LatestWithdrawalsRoot: b.latestWithdrawalsRootVal(),
|
||||
PayloadExpectedWithdrawals: b.payloadExpectedWithdrawalsVal(),
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -47,6 +48,22 @@ func (b *BeaconState) builderPendingWithdrawalsVal() []*ethpb.BuilderPendingWith
|
||||
return withdrawals
|
||||
}
|
||||
|
||||
// buildersVal returns a copy of the builders registry.
|
||||
// This assumes that a lock is already held on BeaconState.
|
||||
func (b *BeaconState) buildersVal() []*ethpb.Builder {
|
||||
if b.builders == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
builders := make([]*ethpb.Builder, len(b.builders))
|
||||
for i := range builders {
|
||||
builder := b.builders[i]
|
||||
builders[i] = ethpb.CopyBuilder(builder)
|
||||
}
|
||||
|
||||
return builders
|
||||
}
|
||||
|
||||
// latestBlockHashVal returns a copy of the latest block hash.
|
||||
// This assumes that a lock is already held on BeaconState.
|
||||
func (b *BeaconState) latestBlockHashVal() []byte {
|
||||
@@ -60,15 +77,17 @@ func (b *BeaconState) latestBlockHashVal() []byte {
|
||||
return hash
|
||||
}
|
||||
|
||||
// latestWithdrawalsRootVal returns a copy of the latest withdrawals root.
|
||||
// payloadExpectedWithdrawalsVal returns a copy of the payload expected withdrawals.
|
||||
// This assumes that a lock is already held on BeaconState.
|
||||
func (b *BeaconState) latestWithdrawalsRootVal() []byte {
|
||||
if b.latestWithdrawalsRoot == nil {
|
||||
func (b *BeaconState) payloadExpectedWithdrawalsVal() []*enginev1.Withdrawal {
|
||||
if b.payloadExpectedWithdrawals == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
root := make([]byte, len(b.latestWithdrawalsRoot))
|
||||
copy(root, b.latestWithdrawalsRoot)
|
||||
withdrawals := make([]*enginev1.Withdrawal, len(b.payloadExpectedWithdrawals))
|
||||
for i, withdrawal := range b.payloadExpectedWithdrawals {
|
||||
withdrawals[i] = withdrawal.Copy()
|
||||
}
|
||||
|
||||
return root
|
||||
return withdrawals
|
||||
}
|
||||
|
||||
43
beacon-chain/state/state-native/gloas_test.go
Normal file
43
beacon-chain/state/state-native/gloas_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBuildersVal(t *testing.T) {
|
||||
st := &BeaconState{}
|
||||
|
||||
require.Nil(t, st.buildersVal())
|
||||
|
||||
st.builders = []*ethpb.Builder{
|
||||
{Pubkey: []byte{0x01}, ExecutionAddress: []byte{0x02}, Balance: 3},
|
||||
nil,
|
||||
}
|
||||
|
||||
got := st.buildersVal()
|
||||
require.Len(t, got, 2)
|
||||
require.Nil(t, got[1])
|
||||
require.Equal(t, st.builders[0], got[0])
|
||||
require.NotSame(t, st.builders[0], got[0])
|
||||
}
|
||||
|
||||
func TestPayloadExpectedWithdrawalsVal(t *testing.T) {
|
||||
st := &BeaconState{}
|
||||
|
||||
require.Nil(t, st.payloadExpectedWithdrawalsVal())
|
||||
|
||||
st.payloadExpectedWithdrawals = []*enginev1.Withdrawal{
|
||||
{Index: 1, ValidatorIndex: 2, Address: []byte{0x03}, Amount: 4},
|
||||
nil,
|
||||
}
|
||||
|
||||
got := st.payloadExpectedWithdrawalsVal()
|
||||
require.Len(t, got, 2)
|
||||
require.Nil(t, got[1])
|
||||
require.Equal(t, st.payloadExpectedWithdrawals[0], got[0])
|
||||
require.NotSame(t, st.payloadExpectedWithdrawals[0], got[0])
|
||||
}
|
||||
@@ -342,6 +342,15 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
}
|
||||
|
||||
if state.version >= version.Gloas {
|
||||
buildersRoot, err := stateutil.BuildersRoot(state.builders)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute builders merkleization")
|
||||
}
|
||||
fieldRoots[types.Builders.RealPosition()] = buildersRoot[:]
|
||||
|
||||
nextWithdrawalBuilderIndexRoot := ssz.Uint64Root(uint64(state.nextWithdrawalBuilderIndex))
|
||||
fieldRoots[types.NextWithdrawalBuilderIndex.RealPosition()] = nextWithdrawalBuilderIndexRoot[:]
|
||||
|
||||
epaRoot, err := stateutil.ExecutionPayloadAvailabilityRoot(state.executionPayloadAvailability)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute execution payload availability merkleization")
|
||||
@@ -366,8 +375,12 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
lbhRoot := bytesutil.ToBytes32(state.latestBlockHash)
|
||||
fieldRoots[types.LatestBlockHash.RealPosition()] = lbhRoot[:]
|
||||
|
||||
lwrRoot := bytesutil.ToBytes32(state.latestWithdrawalsRoot)
|
||||
fieldRoots[types.LatestWithdrawalsRoot.RealPosition()] = lwrRoot[:]
|
||||
expectedWithdrawalsRoot, err := ssz.WithdrawalSliceRoot(state.payloadExpectedWithdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute payload expected withdrawals root")
|
||||
}
|
||||
|
||||
fieldRoots[types.PayloadExpectedWithdrawals.RealPosition()] = expectedWithdrawalsRoot[:]
|
||||
}
|
||||
return fieldRoots, nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
@@ -39,42 +40,12 @@ func (b *BeaconState) NextSyncCommitteeGeneralizedIndex() (uint64, error) {
|
||||
|
||||
// CurrentSyncCommitteeProof from the state's Merkle trie representation.
|
||||
func (b *BeaconState) CurrentSyncCommitteeProof(ctx context.Context) ([][]byte, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version == version.Phase0 {
|
||||
return nil, errNotSupported("CurrentSyncCommitteeProof", b.version)
|
||||
}
|
||||
|
||||
// In case the Merkle layers of the trie are not populated, we need
|
||||
// to perform some initialization.
|
||||
if err := b.initializeMerkleLayers(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Our beacon state uses a "dirty" fields pattern which requires us to
|
||||
// recompute branches of the Merkle layers that are marked as dirty.
|
||||
if err := b.recomputeDirtyFields(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return trie.ProofFromMerkleLayers(b.merkleLayers, types.CurrentSyncCommittee.RealPosition()), nil
|
||||
return b.ProofByFieldIndex(ctx, types.CurrentSyncCommittee)
|
||||
}
|
||||
|
||||
// NextSyncCommitteeProof from the state's Merkle trie representation.
|
||||
func (b *BeaconState) NextSyncCommitteeProof(ctx context.Context) ([][]byte, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version == version.Phase0 {
|
||||
return nil, errNotSupported("NextSyncCommitteeProof", b.version)
|
||||
}
|
||||
|
||||
if err := b.initializeMerkleLayers(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := b.recomputeDirtyFields(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return trie.ProofFromMerkleLayers(b.merkleLayers, types.NextSyncCommittee.RealPosition()), nil
|
||||
return b.ProofByFieldIndex(ctx, types.NextSyncCommittee)
|
||||
}
|
||||
|
||||
// FinalizedRootProof crafts a Merkle proof for the finalized root
|
||||
@@ -83,8 +54,37 @@ func (b *BeaconState) FinalizedRootProof(ctx context.Context) ([][]byte, error)
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version == version.Phase0 {
|
||||
return nil, errNotSupported("FinalizedRootProof", b.version)
|
||||
branchProof, err := b.proofByFieldIndex(ctx, types.FinalizedCheckpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The epoch field of a finalized checkpoint is the neighbor
|
||||
// index of the finalized root field in its Merkle tree representation
|
||||
// of the checkpoint. This neighbor is the first element added to the proof.
|
||||
epochBuf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(epochBuf, uint64(b.finalizedCheckpointVal().Epoch))
|
||||
epochRoot := bytesutil.ToBytes32(epochBuf)
|
||||
proof := make([][]byte, 0)
|
||||
proof = append(proof, epochRoot[:])
|
||||
proof = append(proof, branchProof...)
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
// ProofByFieldIndex constructs proofs for given field index with lock acquisition.
|
||||
func (b *BeaconState) ProofByFieldIndex(ctx context.Context, f types.FieldIndex) ([][]byte, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
return b.proofByFieldIndex(ctx, f)
|
||||
}
|
||||
|
||||
// proofByFieldIndex constructs proofs for given field index.
|
||||
// Important: it is assumed that beacon state mutex is locked when calling this method.
|
||||
func (b *BeaconState) proofByFieldIndex(ctx context.Context, f types.FieldIndex) ([][]byte, error) {
|
||||
err := b.validateFieldIndex(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := b.initializeMerkleLayers(ctx); err != nil {
|
||||
@@ -93,16 +93,40 @@ func (b *BeaconState) FinalizedRootProof(ctx context.Context) ([][]byte, error)
|
||||
if err := b.recomputeDirtyFields(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpt := b.finalizedCheckpointVal()
|
||||
// The epoch field of a finalized checkpoint is the neighbor
|
||||
// index of the finalized root field in its Merkle tree representation
|
||||
// of the checkpoint. This neighbor is the first element added to the proof.
|
||||
epochBuf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(epochBuf, uint64(cpt.Epoch))
|
||||
epochRoot := bytesutil.ToBytes32(epochBuf)
|
||||
proof := make([][]byte, 0)
|
||||
proof = append(proof, epochRoot[:])
|
||||
branch := trie.ProofFromMerkleLayers(b.merkleLayers, types.FinalizedCheckpoint.RealPosition())
|
||||
proof = append(proof, branch...)
|
||||
return proof, nil
|
||||
return trie.ProofFromMerkleLayers(b.merkleLayers, f.RealPosition()), nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) validateFieldIndex(f types.FieldIndex) error {
|
||||
switch b.version {
|
||||
case version.Phase0:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Altair:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateAltairFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Bellatrix:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateBellatrixFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Capella:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateCapellaFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Deneb:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateDenebFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Electra:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateElectraFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Fulu:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateFuluFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -21,10 +21,6 @@ func TestBeaconStateMerkleProofs_phase0_notsupported(t *testing.T) {
|
||||
_, err := st.NextSyncCommitteeProof(ctx)
|
||||
require.ErrorContains(t, "not supported", err)
|
||||
})
|
||||
t.Run("finalized root", func(t *testing.T) {
|
||||
_, err := st.FinalizedRootProof(ctx)
|
||||
require.ErrorContains(t, "not supported", err)
|
||||
})
|
||||
}
|
||||
func TestBeaconStateMerkleProofs_altair(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
)
|
||||
|
||||
// SetPayloadExpectedWithdrawals updates the cached withdrawals root for the next payload.
|
||||
func (b *BeaconState) SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("SetPayloadExpectedWithdrawals", b.version)
|
||||
}
|
||||
|
||||
if withdrawals == nil {
|
||||
return errors.New("cannot set nil payload expected withdrawals")
|
||||
}
|
||||
|
||||
root, err := ssz.WithdrawalSliceRoot(withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
rootBytes := make([]byte, len(root))
|
||||
copy(rootBytes, root[:])
|
||||
b.latestWithdrawalsRoot = rootBytes
|
||||
b.markFieldAsDirty(types.LatestWithdrawalsRoot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DequeueBuilderPendingWithdrawals removes processed builder withdrawals from the front of the queue.
|
||||
func (b *BeaconState) DequeueBuilderPendingWithdrawals(n uint64) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("DequeueBuilderPendingWithdrawals", b.version)
|
||||
}
|
||||
|
||||
if n > uint64(len(b.builderPendingWithdrawals)) {
|
||||
return errors.New("cannot dequeue more builder withdrawals than are in the queue")
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs() > 1 {
|
||||
withdrawals := make([]*ethpb.BuilderPendingWithdrawal, len(b.builderPendingWithdrawals))
|
||||
copy(withdrawals, b.builderPendingWithdrawals)
|
||||
b.builderPendingWithdrawals = withdrawals
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals].MinusRef()
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.builderPendingWithdrawals = b.builderPendingWithdrawals[n:]
|
||||
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
|
||||
b.rebuildTrie[types.BuilderPendingWithdrawals] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNextWithdrawalBuilderIndex sets the next builder index for the withdrawals sweep.
|
||||
func (b *BeaconState) SetNextWithdrawalBuilderIndex(_ primitives.BuilderIndex) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("SetNextWithdrawalBuilderIndex", b.version)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -120,11 +120,13 @@ var (
|
||||
)
|
||||
|
||||
gloasAdditionalFields = []types.FieldIndex{
|
||||
types.Builders,
|
||||
types.NextWithdrawalBuilderIndex,
|
||||
types.ExecutionPayloadAvailability,
|
||||
types.BuilderPendingPayments,
|
||||
types.BuilderPendingWithdrawals,
|
||||
types.LatestBlockHash,
|
||||
types.LatestWithdrawalsRoot,
|
||||
types.PayloadExpectedWithdrawals,
|
||||
}
|
||||
|
||||
gloasFields = slices.Concat(
|
||||
@@ -145,7 +147,7 @@ const (
|
||||
denebSharedFieldRefCount = 7
|
||||
electraSharedFieldRefCount = 10
|
||||
fuluSharedFieldRefCount = 11
|
||||
gloasSharedFieldRefCount = 12 // Adds PendingBuilderWithdrawal to the shared-ref set and LatestExecutionPayloadHeader is removed
|
||||
gloasSharedFieldRefCount = 13 // Adds Builders + BuilderPendingWithdrawals to the shared-ref set and LatestExecutionPayloadHeader is removed
|
||||
)
|
||||
|
||||
// InitializeFromProtoPhase0 the beacon state from a protobuf representation.
|
||||
@@ -817,11 +819,13 @@ func InitializeFromProtoUnsafeGloas(st *ethpb.BeaconStateGloas) (state.BeaconSta
|
||||
pendingConsolidations: st.PendingConsolidations,
|
||||
proposerLookahead: proposerLookahead,
|
||||
latestExecutionPayloadBid: st.LatestExecutionPayloadBid,
|
||||
builders: st.Builders,
|
||||
nextWithdrawalBuilderIndex: st.NextWithdrawalBuilderIndex,
|
||||
executionPayloadAvailability: st.ExecutionPayloadAvailability,
|
||||
builderPendingPayments: st.BuilderPendingPayments,
|
||||
builderPendingWithdrawals: st.BuilderPendingWithdrawals,
|
||||
latestBlockHash: st.LatestBlockHash,
|
||||
latestWithdrawalsRoot: st.LatestWithdrawalsRoot,
|
||||
payloadExpectedWithdrawals: st.PayloadExpectedWithdrawals,
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
@@ -861,6 +865,7 @@ func InitializeFromProtoUnsafeGloas(st *ethpb.BeaconStateGloas) (state.BeaconSta
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.PendingConsolidations] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.ProposerLookahead] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1) // New in Gloas.
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1) // New in Gloas.
|
||||
|
||||
state.Count.Inc()
|
||||
@@ -932,6 +937,7 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
pendingDeposits: b.pendingDeposits,
|
||||
pendingPartialWithdrawals: b.pendingPartialWithdrawals,
|
||||
pendingConsolidations: b.pendingConsolidations,
|
||||
builders: b.builders,
|
||||
|
||||
// Everything else, too small to be concerned about, constant size.
|
||||
genesisValidatorsRoot: b.genesisValidatorsRoot,
|
||||
@@ -948,11 +954,12 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
latestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapella.Copy(),
|
||||
latestExecutionPayloadHeaderDeneb: b.latestExecutionPayloadHeaderDeneb.Copy(),
|
||||
latestExecutionPayloadBid: b.latestExecutionPayloadBid.Copy(),
|
||||
nextWithdrawalBuilderIndex: b.nextWithdrawalBuilderIndex,
|
||||
executionPayloadAvailability: b.executionPayloadAvailabilityVal(),
|
||||
builderPendingPayments: b.builderPendingPaymentsVal(),
|
||||
builderPendingWithdrawals: b.builderPendingWithdrawalsVal(),
|
||||
latestBlockHash: b.latestBlockHashVal(),
|
||||
latestWithdrawalsRoot: b.latestWithdrawalsRootVal(),
|
||||
payloadExpectedWithdrawals: b.payloadExpectedWithdrawalsVal(),
|
||||
|
||||
id: types.Enumerator.Inc(),
|
||||
|
||||
@@ -1328,6 +1335,10 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
return stateutil.ProposerLookaheadRoot(b.proposerLookahead)
|
||||
case types.LatestExecutionPayloadBid:
|
||||
return b.latestExecutionPayloadBid.HashTreeRoot()
|
||||
case types.Builders:
|
||||
return stateutil.BuildersRoot(b.builders)
|
||||
case types.NextWithdrawalBuilderIndex:
|
||||
return ssz.Uint64Root(uint64(b.nextWithdrawalBuilderIndex)), nil
|
||||
case types.ExecutionPayloadAvailability:
|
||||
return stateutil.ExecutionPayloadAvailabilityRoot(b.executionPayloadAvailability)
|
||||
|
||||
@@ -1337,8 +1348,8 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
return stateutil.BuilderPendingWithdrawalsRoot(b.builderPendingWithdrawals)
|
||||
case types.LatestBlockHash:
|
||||
return bytesutil.ToBytes32(b.latestBlockHash), nil
|
||||
case types.LatestWithdrawalsRoot:
|
||||
return bytesutil.ToBytes32(b.latestWithdrawalsRoot), nil
|
||||
case types.PayloadExpectedWithdrawals:
|
||||
return ssz.WithdrawalSliceRoot(b.payloadExpectedWithdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
}
|
||||
return [32]byte{}, errors.New("invalid field index provided")
|
||||
}
|
||||
|
||||
@@ -116,6 +116,10 @@ func (f FieldIndex) String() string {
|
||||
return "pendingConsolidations"
|
||||
case ProposerLookahead:
|
||||
return "proposerLookahead"
|
||||
case Builders:
|
||||
return "builders"
|
||||
case NextWithdrawalBuilderIndex:
|
||||
return "nextWithdrawalBuilderIndex"
|
||||
case ExecutionPayloadAvailability:
|
||||
return "executionPayloadAvailability"
|
||||
case BuilderPendingPayments:
|
||||
@@ -124,8 +128,8 @@ func (f FieldIndex) String() string {
|
||||
return "builderPendingWithdrawals"
|
||||
case LatestBlockHash:
|
||||
return "latestBlockHash"
|
||||
case LatestWithdrawalsRoot:
|
||||
return "latestWithdrawalsRoot"
|
||||
case PayloadExpectedWithdrawals:
|
||||
return "payloadExpectedWithdrawals"
|
||||
default:
|
||||
return fmt.Sprintf("unknown field index number: %d", f)
|
||||
}
|
||||
@@ -211,16 +215,20 @@ func (f FieldIndex) RealPosition() int {
|
||||
return 36
|
||||
case ProposerLookahead:
|
||||
return 37
|
||||
case ExecutionPayloadAvailability:
|
||||
case Builders:
|
||||
return 38
|
||||
case BuilderPendingPayments:
|
||||
case NextWithdrawalBuilderIndex:
|
||||
return 39
|
||||
case BuilderPendingWithdrawals:
|
||||
case ExecutionPayloadAvailability:
|
||||
return 40
|
||||
case LatestBlockHash:
|
||||
case BuilderPendingPayments:
|
||||
return 41
|
||||
case LatestWithdrawalsRoot:
|
||||
case BuilderPendingWithdrawals:
|
||||
return 42
|
||||
case LatestBlockHash:
|
||||
return 43
|
||||
case PayloadExpectedWithdrawals:
|
||||
return 44
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
@@ -287,11 +295,13 @@ const (
|
||||
PendingPartialWithdrawals // Electra: EIP-7251
|
||||
PendingConsolidations // Electra: EIP-7251
|
||||
ProposerLookahead // Fulu: EIP-7917
|
||||
Builders // Gloas: EIP-7732
|
||||
NextWithdrawalBuilderIndex // Gloas: EIP-7732
|
||||
ExecutionPayloadAvailability // Gloas: EIP-7732
|
||||
BuilderPendingPayments // Gloas: EIP-7732
|
||||
BuilderPendingWithdrawals // Gloas: EIP-7732
|
||||
LatestBlockHash // Gloas: EIP-7732
|
||||
LatestWithdrawalsRoot // Gloas: EIP-7732
|
||||
PayloadExpectedWithdrawals // Gloas: EIP-7732
|
||||
)
|
||||
|
||||
// Enumerator keeps track of the number of states created since the node's start.
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"block_header_root.go",
|
||||
"builder_pending_payments_root.go",
|
||||
"builder_pending_withdrawals_root.go",
|
||||
"builders_root.go",
|
||||
"eth1_root.go",
|
||||
"execution_payload_availability_root.go",
|
||||
"field_root_attestation.go",
|
||||
|
||||
12
beacon-chain/state/stateutil/builders_root.go
Normal file
12
beacon-chain/state/stateutil/builders_root.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// BuildersRoot computes the SSZ root of a slice of Builder.
|
||||
func BuildersRoot(slice []*ethpb.Builder) ([32]byte, error) {
|
||||
return ssz.SliceRoot(slice, uint64(fieldparams.BuilderRegistryLimit))
|
||||
}
|
||||
@@ -70,7 +70,6 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
go r.startDiscoveryAndSubscriptions()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
@@ -83,9 +82,11 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
msg.Block.ParentRoot = util.Random32Bytes(t)
|
||||
msg.Signature = sk.Sign([]byte("data")).Marshal()
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
// wait for chainstart to be sent
|
||||
time.Sleep(400 * time.Millisecond)
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
|
||||
// Wait for chainstart event to be processed
|
||||
require.Eventually(t, func() bool {
|
||||
return r.chainStarted.IsSet()
|
||||
}, 5*time.Second, 50*time.Millisecond, "Did not receive chain start event.")
|
||||
}
|
||||
|
||||
func TestSyncHandlers_WaitForChainStart(t *testing.T) {
|
||||
@@ -217,20 +218,18 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
p2p.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait for chainstart to be sent
|
||||
time.Sleep(2 * time.Second)
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.Host().Mux().Protocols()))
|
||||
// Wait for chainstart and topics to be registered
|
||||
require.Eventually(t, func() bool {
|
||||
return r.chainStarted.IsSet() && len(r.cfg.p2p.PubSub().GetTopics()) > 0 && len(r.cfg.p2p.Host().Mux().Protocols()) > 0
|
||||
}, 5*time.Second, 50*time.Millisecond, "Did not receive chain start event or topics not registered.")
|
||||
|
||||
// Both pubsub and rpc topics should be unsubscribed.
|
||||
require.NoError(t, r.Stop())
|
||||
|
||||
// Sleep to allow pubsub topics to be deregistered.
|
||||
time.Sleep(1 * time.Second)
|
||||
require.Equal(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
require.Equal(t, 0, len(r.cfg.p2p.Host().Mux().Protocols()))
|
||||
// Wait for pubsub topics to be deregistered.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(r.cfg.p2p.PubSub().GetTopics()) == 0 && len(r.cfg.p2p.Host().Mux().Protocols()) == 0
|
||||
}, 5*time.Second, 50*time.Millisecond, "Pubsub topics were not deregistered")
|
||||
}
|
||||
|
||||
func TestService_Stop_SendsGoodbyeMessages(t *testing.T) {
|
||||
|
||||
@@ -614,11 +614,10 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond) // Wait for cached value to pass through buffers.
|
||||
if res, err := r.validateAggregateAndProof(t.Context(), "", msg); res == pubsub.ValidationAccept {
|
||||
_ = err
|
||||
t.Fatal("Validated status is true")
|
||||
}
|
||||
require.Eventually(t, func() bool {
|
||||
res, _ := r.validateAggregateAndProof(t.Context(), "", msg)
|
||||
return res != pubsub.ValidationAccept
|
||||
}, time.Second, 10*time.Millisecond, "Expected validation to reject duplicate aggregate")
|
||||
}
|
||||
|
||||
func TestValidateAggregateAndProof_BadBlock(t *testing.T) {
|
||||
|
||||
@@ -992,7 +992,6 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
|
||||
// Mark the proposer/slot as seen
|
||||
r.setSeenBlockIndexSlot(msg.Block.Slot, msg.Block.ProposerIndex)
|
||||
time.Sleep(10 * time.Millisecond) // Wait for cached value to pass through buffers
|
||||
|
||||
// Prepare and validate the second message (clone)
|
||||
buf := new(bytes.Buffer)
|
||||
@@ -1010,9 +1009,11 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
// Since this is not an equivocation (same signature), it should be ignored
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pubsub.ValidationIgnore, res, "block with same signature should be ignored")
|
||||
// Wait for the cached value to propagate through buffers
|
||||
require.Eventually(t, func() bool {
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
return err == nil && res == pubsub.ValidationIgnore
|
||||
}, time.Second, 10*time.Millisecond, "block with same signature should be ignored")
|
||||
|
||||
// Verify no slashings were created
|
||||
assert.Equal(t, 0, len(slashingPool.PendingPropSlashings), "Expected no slashings for same signature")
|
||||
|
||||
5
changelog/bastin_ephemeral-debug-logfile.md
Normal file
5
changelog/bastin_ephemeral-debug-logfile.md
Normal file
@@ -0,0 +1,5 @@
|
||||
### Added
|
||||
|
||||
- Added an ephemeral debug logfile that for beacon and validator nodes that captures debug-level logs for 24 hours. It
|
||||
also keeps 1 backup of in case of size-based rotation. The logfiles are stored in `datadir/logs/`. This feature is
|
||||
enabled by default and can be disabled by setting the `--disable-ephemeral-log-file` flag.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p.
|
||||
4
changelog/bastin_verbosity-per-hook.md
Normal file
4
changelog/bastin_verbosity-per-hook.md
Normal file
@@ -0,0 +1,4 @@
|
||||
### Changed
|
||||
|
||||
- Moved verbosity settings to be configurable per hook, rather than just globally. This allows us to control the
|
||||
verbosity of individual output independently.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now.
|
||||
3
changelog/james-prysm_is-ready.md
Normal file
3
changelog/james-prysm_is-ready.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- changed IsHealthy check to IsReady for validator client's interpretation from /eth/v1/node/health, 206 will now return false as the node is syncing.
|
||||
3
changelog/jrhea_duplicate_tracer_provider_setting.md
Normal file
3
changelog/jrhea_duplicate_tracer_provider_setting.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Don't call trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize) twice.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Changed
|
||||
- `validateDataColumn`: Remove error logs.
|
||||
2
changelog/potuz_low_valcount.md
Normal file
2
changelog/potuz_low_valcount.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Add a feature flag to pass spectests with low validator count.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu
|
||||
3
changelog/potuz_preprocess_block_before_processing.md
Normal file
3
changelog/potuz_preprocess_block_before_processing.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add feature flag `--enable-proposer-preprocessing` to process the block and verify signatures before proposing.
|
||||
2
changelog/potuz_update_spectests.md
Normal file
2
changelog/potuz_update_spectests.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Update spectests to v1.7.0-alpha.0
|
||||
3
changelog/pvl-changelog-v7.1.2.md
Normal file
3
changelog/pvl-changelog-v7.1.2.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Updated changelog for v7.1.2
|
||||
3
changelog/pvl_fix-flaky-tests-polling.md
Normal file
3
changelog/pvl_fix-flaky-tests-polling.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Replaced `time.Sleep` with `require.Eventually` polling in tests to fix flaky behavior caused by race conditions between goroutines and assertions.
|
||||
3
changelog/satushh-opt.md
Normal file
3
changelog/satushh-opt.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Performance improvement in state (MarshalSSZTo): use copy() instead of byte-by-byte loop which isn't required.
|
||||
3
changelog/syjn99_generalize-proof-generation.md
Normal file
3
changelog/syjn99_generalize-proof-generation.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add `ProofByFieldIndex` to generalize merkle proof generation for `BeaconState`.
|
||||
3
changelog/t_gloas-builders-registry.md
Normal file
3
changelog/t_gloas-builders-registry.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Added basic Gloas builder support (`Builder` message and `BeaconStateGloas` `builders`/`next_withdrawal_builder_index` fields)
|
||||
@@ -356,6 +356,12 @@ var (
|
||||
Usage: "A comma-separated list of exponents (of 2) in decreasing order, defining the state diff hierarchy levels. The last exponent must be greater than or equal to 5.",
|
||||
Value: cli.NewIntSlice(21, 18, 16, 13, 11, 9, 5),
|
||||
}
|
||||
// DisableEphemeralLogFile disables the 24 hour debug log file.
|
||||
DisableEphemeralLogFile = &cli.BoolFlag{
|
||||
Name: "disable-ephemeral-log-file",
|
||||
Usage: "Disables the creation of a debug log file that keeps 24 hours of logs.",
|
||||
Value: false,
|
||||
}
|
||||
// DisableGetBlobsV2 disables the engine_getBlobsV2 usage.
|
||||
DisableGetBlobsV2 = &cli.BoolFlag{
|
||||
Name: "disable-get-blobs-v2",
|
||||
|
||||
@@ -158,6 +158,7 @@ var appFlags = []cli.Flag{
|
||||
dasFlags.BackfillOldestSlot,
|
||||
dasFlags.BlobRetentionEpochFlag,
|
||||
flags.BatchVerifierLimit,
|
||||
flags.DisableEphemeralLogFile,
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -170,8 +171,15 @@ func before(ctx *cli.Context) error {
|
||||
return errors.Wrap(err, "failed to load flags from config file")
|
||||
}
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
// determine log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
switch format {
|
||||
case "text":
|
||||
// disabling logrus default output so we can control it via different hooks
|
||||
@@ -185,8 +193,9 @@ func before(ctx *cli.Context) error {
|
||||
formatter.ForceColors = true
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -210,11 +219,17 @@ func before(ctx *cli.Context) error {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
if !ctx.Bool(flags.DisableEphemeralLogFile.Name) {
|
||||
if err := logs.ConfigureEphemeralLogFile(ctx.String(cmd.DataDirFlag.Name), ctx.App.Name); err != nil {
|
||||
log.WithError(err).Error("Failed to configure debug log file")
|
||||
}
|
||||
}
|
||||
|
||||
if err := cmd.ExpandSingleEndpointIfFile(ctx, flags.ExecutionEngineEndpoint); err != nil {
|
||||
return errors.Wrap(err, "failed to expand single endpoint")
|
||||
}
|
||||
@@ -291,7 +306,7 @@ func startNode(ctx *cli.Context, cancel context.CancelFunc) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.SetLevel(level)
|
||||
|
||||
// Set libp2p logger to only panic logs for the info level.
|
||||
golog.SetAllLoggers(golog.LevelPanic)
|
||||
|
||||
|
||||
@@ -200,6 +200,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.LogFormat,
|
||||
cmd.LogFileName,
|
||||
cmd.VerbosityFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
},
|
||||
},
|
||||
{ // Feature flags.
|
||||
|
||||
@@ -86,7 +86,7 @@ func main() {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -410,6 +410,12 @@ var (
|
||||
Usage: "Maximum number of health checks to perform before exiting if not healthy. Set to 0 or a negative number for indefinite checks.",
|
||||
Value: DefaultMaxHealthChecks,
|
||||
}
|
||||
// DisableEphemeralLogFile disables the 24 hour debug log file.
|
||||
DisableEphemeralLogFile = &cli.BoolFlag{
|
||||
Name: "disable-ephemeral-log-file",
|
||||
Usage: "Disables the creation of a debug log file that keeps 24 hours of logs.",
|
||||
Value: false,
|
||||
}
|
||||
)
|
||||
|
||||
// DefaultValidatorDir returns OS-specific default validator directory.
|
||||
|
||||
@@ -115,6 +115,7 @@ var appFlags = []cli.Flag{
|
||||
debug.BlockProfileRateFlag,
|
||||
debug.MutexProfileFractionFlag,
|
||||
cmd.AcceptTosFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -147,6 +148,14 @@ func main() {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
@@ -163,8 +172,9 @@ func main() {
|
||||
formatter.ForceColors = true
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -185,11 +195,17 @@ func main() {
|
||||
}
|
||||
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
if !ctx.Bool(flags.DisableEphemeralLogFile.Name) {
|
||||
if err := logs.ConfigureEphemeralLogFile(ctx.String(cmd.DataDirFlag.Name), ctx.App.Name); err != nil {
|
||||
log.WithError(err).Error("Failed to configure debug log file")
|
||||
}
|
||||
}
|
||||
|
||||
// Fix data dir for Windows users.
|
||||
outdatedDataDir := filepath.Join(file.HomeDir(), "AppData", "Roaming", "Eth2Validators")
|
||||
currentDataDir := flags.DefaultValidatorDir()
|
||||
|
||||
@@ -75,6 +75,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.GrpcMaxCallRecvMsgSizeFlag,
|
||||
cmd.AcceptTosFlag,
|
||||
cmd.ApiTimeoutFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -71,6 +71,7 @@ type Flags struct {
|
||||
IgnoreUnviableAttestations bool // Ignore attestations whose target state is not viable (avoids lagging-node DoS).
|
||||
|
||||
EnableVerboseSigVerification bool // EnableVerboseSigVerification specifies whether to verify individual signature if batch verification fails
|
||||
EnableProposerPreprocessing bool // EnableProposerPreprocessing enables proposer pre-processing of blocks before proposing.
|
||||
|
||||
PrepareAllPayloads bool // PrepareAllPayloads informs the engine to prepare a block on every slot.
|
||||
// BlobSaveFsync requires blob saving to block on fsync to ensure blobs are durably persisted before passing DA.
|
||||
@@ -80,6 +81,7 @@ type Flags struct {
|
||||
SaveInvalidBlob bool // SaveInvalidBlob saves invalid blob to temp.
|
||||
|
||||
EnableDiscoveryReboot bool // EnableDiscoveryReboot allows the node to have its local listener to be rebooted in the event of discovery issues.
|
||||
LowValcountSweep bool // LowValcountSweep bounds withdrawal sweep by validator count.
|
||||
|
||||
// KeystoreImportDebounceInterval specifies the time duration the validator waits to reload new keys if they have
|
||||
// changed on disk. This feature is for advanced use cases only.
|
||||
@@ -241,6 +243,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(disableVerboseSigVerification)
|
||||
cfg.EnableVerboseSigVerification = false
|
||||
}
|
||||
cfg.EnableProposerPreprocessing = ctx.Bool(enableProposerPreprocessing.Name)
|
||||
if cfg.EnableProposerPreprocessing {
|
||||
logEnabled(enableProposerPreprocessing)
|
||||
}
|
||||
if ctx.IsSet(prepareAllPayloads.Name) {
|
||||
logEnabled(prepareAllPayloads)
|
||||
cfg.PrepareAllPayloads = true
|
||||
@@ -284,6 +290,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(ignoreUnviableAttestations)
|
||||
cfg.IgnoreUnviableAttestations = true
|
||||
}
|
||||
if ctx.Bool(lowValcountSweep.Name) {
|
||||
logEnabled(lowValcountSweep)
|
||||
cfg.LowValcountSweep = true
|
||||
}
|
||||
|
||||
if ctx.IsSet(EnableStateDiff.Name) {
|
||||
logEnabled(EnableStateDiff)
|
||||
|
||||
@@ -137,6 +137,11 @@ var (
|
||||
Name: "disable-verbose-sig-verification",
|
||||
Usage: "Disables identifying invalid signatures if batch verification fails when processing block.",
|
||||
}
|
||||
enableProposerPreprocessing = &cli.BoolFlag{
|
||||
Name: "enable-proposer-preprocessing",
|
||||
Usage: "Enables proposer pre-processing of blocks before proposing.",
|
||||
Value: false,
|
||||
}
|
||||
prepareAllPayloads = &cli.BoolFlag{
|
||||
Name: "prepare-all-payloads",
|
||||
Usage: "Informs the engine to prepare all local payloads. Useful for relayers and builders.",
|
||||
@@ -211,6 +216,13 @@ var (
|
||||
Name: "ignore-unviable-attestations",
|
||||
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
|
||||
}
|
||||
// lowValcountSweep bounds withdrawal sweep by validator count.
|
||||
lowValcountSweep = &cli.BoolFlag{
|
||||
Name: "low-valcount-sweep",
|
||||
Usage: "Uses validator count bound for withdrawal sweep when validator count is less than MaxValidatorsPerWithdrawalsSweep.",
|
||||
Value: false,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
@@ -259,6 +271,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
ignoreUnviableAttestations,
|
||||
enableFullSSZDataLogging,
|
||||
disableVerboseSigVerification,
|
||||
enableProposerPreprocessing,
|
||||
prepareAllPayloads,
|
||||
aggregateFirstInterval,
|
||||
aggregateSecondInterval,
|
||||
@@ -272,6 +285,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
enableExperimentalAttestationPool,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
lowValcountSweep,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {
|
||||
|
||||
@@ -9,6 +9,7 @@ const (
|
||||
RandaoMixesLength = 65536 // EPOCHS_PER_HISTORICAL_VECTOR
|
||||
HistoricalRootsLength = 16777216 // HISTORICAL_ROOTS_LIMIT
|
||||
ValidatorRegistryLimit = 1099511627776 // VALIDATOR_REGISTRY_LIMIT
|
||||
BuilderRegistryLimit = 1099511627776 // BUILDER_REGISTRY_LIMIT
|
||||
Eth1DataVotesLength = 2048 // SLOTS_PER_ETH1_VOTING_PERIOD
|
||||
PreviousEpochAttestationsLength = 4096 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
CurrentEpochAttestationsLength = 4096 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
|
||||
@@ -9,6 +9,7 @@ const (
|
||||
RandaoMixesLength = 64 // EPOCHS_PER_HISTORICAL_VECTOR
|
||||
HistoricalRootsLength = 16777216 // HISTORICAL_ROOTS_LIMIT
|
||||
ValidatorRegistryLimit = 1099511627776 // VALIDATOR_REGISTRY_LIMIT
|
||||
BuilderRegistryLimit = 1099511627776 // BUILDER_REGISTRY_LIMIT
|
||||
Eth1DataVotesLength = 32 // SLOTS_PER_ETH1_VOTING_PERIOD
|
||||
PreviousEpochAttestationsLength = 1024 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
CurrentEpochAttestationsLength = 1024 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
|
||||
@@ -47,7 +47,6 @@ type BeaconChainConfig struct {
|
||||
HysteresisQuotient uint64 `yaml:"HYSTERESIS_QUOTIENT" spec:"true"` // HysteresisQuotient defines the hysteresis quotient for effective balance calculations.
|
||||
HysteresisDownwardMultiplier uint64 `yaml:"HYSTERESIS_DOWNWARD_MULTIPLIER" spec:"true"` // HysteresisDownwardMultiplier defines the hysteresis downward multiplier for effective balance calculations.
|
||||
HysteresisUpwardMultiplier uint64 `yaml:"HYSTERESIS_UPWARD_MULTIPLIER" spec:"true"` // HysteresisUpwardMultiplier defines the hysteresis upward multiplier for effective balance calculations.
|
||||
BuilderIndexFlag uint64 `yaml:"BUILDER_INDEX_FLAG" spec:"true"` // BuilderIndexFlag marks a ValidatorIndex as a BuilderIndex when the bit is set.
|
||||
|
||||
// Gwei value constants.
|
||||
MinDepositAmount uint64 `yaml:"MIN_DEPOSIT_AMOUNT" spec:"true"` // MinDepositAmount is the minimum amount of Gwei a validator can send to the deposit contract at once (lower amounts will be reverted).
|
||||
@@ -60,7 +59,6 @@ type BeaconChainConfig struct {
|
||||
BLSWithdrawalPrefixByte byte `yaml:"BLS_WITHDRAWAL_PREFIX" spec:"true"` // BLSWithdrawalPrefixByte is used for BLS withdrawal and it's the first byte.
|
||||
ETH1AddressWithdrawalPrefixByte byte `yaml:"ETH1_ADDRESS_WITHDRAWAL_PREFIX" spec:"true"` // ETH1AddressWithdrawalPrefixByte is used for withdrawals and it's the first byte.
|
||||
CompoundingWithdrawalPrefixByte byte `yaml:"COMPOUNDING_WITHDRAWAL_PREFIX" spec:"true"` // CompoundingWithdrawalPrefixByteByte is used for compounding withdrawals and it's the first byte.
|
||||
BuilderWithdrawalPrefixByte byte `yaml:"BUILDER_WITHDRAWAL_PREFIX" spec:"true"` // BuilderWithdrawalPrefixByte is used for builder withdrawal credentials and it's the first byte.
|
||||
ZeroHash [32]byte // ZeroHash is used to represent a zeroed out 32 byte array.
|
||||
|
||||
// Time parameters constants.
|
||||
@@ -126,7 +124,6 @@ type BeaconChainConfig struct {
|
||||
MaxWithdrawalsPerPayload uint64 `yaml:"MAX_WITHDRAWALS_PER_PAYLOAD" spec:"true"` // MaxWithdrawalsPerPayload defines the maximum number of withdrawals in a block.
|
||||
MaxBlsToExecutionChanges uint64 `yaml:"MAX_BLS_TO_EXECUTION_CHANGES" spec:"true"` // MaxBlsToExecutionChanges defines the maximum number of BLS-to-execution-change objects in a block.
|
||||
MaxValidatorsPerWithdrawalsSweep uint64 `yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxValidatorsPerWithdrawalsSweep bounds the size of the sweep searching for withdrawals per slot.
|
||||
MaxBuildersPerWithdrawalsSweep uint64 `yaml:"MAX_BUILDERS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxBuildersPerWithdrawalsSweep bounds the size of the builder withdrawals sweep per slot.
|
||||
|
||||
// BLS domain values.
|
||||
DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER" spec:"true"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification.
|
||||
|
||||
@@ -194,8 +194,6 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
|
||||
fmt.Sprintf("SHARD_COMMITTEE_PERIOD: %d", cfg.ShardCommitteePeriod),
|
||||
fmt.Sprintf("MIN_VALIDATOR_WITHDRAWABILITY_DELAY: %d", cfg.MinValidatorWithdrawabilityDelay),
|
||||
fmt.Sprintf("MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: %d", cfg.MaxValidatorsPerWithdrawalsSweep),
|
||||
fmt.Sprintf("MAX_BUILDERS_PER_WITHDRAWALS_SWEEP: %d", cfg.MaxBuildersPerWithdrawalsSweep),
|
||||
fmt.Sprintf("BUILDER_INDEX_FLAG: %d", cfg.BuilderIndexFlag),
|
||||
fmt.Sprintf("MAX_SEED_LOOKAHEAD: %d", cfg.MaxSeedLookahead),
|
||||
fmt.Sprintf("EJECTION_BALANCE: %d", cfg.EjectionBalance),
|
||||
fmt.Sprintf("MIN_PER_EPOCH_CHURN_LIMIT: %d", cfg.MinPerEpochChurnLimit),
|
||||
|
||||
@@ -55,7 +55,8 @@ var placeholderFields = []string{
|
||||
"MAX_REQUEST_BLOB_SIDECARS_FULU",
|
||||
"MAX_REQUEST_INCLUSION_LIST",
|
||||
"MAX_REQUEST_PAYLOADS", // Compile time constant on BeaconBlockBody.ExecutionRequests
|
||||
"NUMBER_OF_COLUMNS", // Configured as a constant in config/fieldparams/mainnet.go
|
||||
"MIN_BUILDER_WITHDRAWABILITY_DELAY",
|
||||
"NUMBER_OF_COLUMNS", // Configured as a constant in config/fieldparams/mainnet.go
|
||||
"PAYLOAD_ATTESTATION_DUE_BPS",
|
||||
"PROPOSER_INCLUSION_LIST_CUTOFF",
|
||||
"PROPOSER_INCLUSION_LIST_CUTOFF_BPS",
|
||||
|
||||
@@ -83,7 +83,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
HysteresisQuotient: 4,
|
||||
HysteresisDownwardMultiplier: 1,
|
||||
HysteresisUpwardMultiplier: 5,
|
||||
BuilderIndexFlag: 1099511627776,
|
||||
|
||||
// Gwei value constants.
|
||||
MinDepositAmount: 1 * 1e9,
|
||||
@@ -95,7 +94,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
BLSWithdrawalPrefixByte: byte(0),
|
||||
ETH1AddressWithdrawalPrefixByte: byte(1),
|
||||
CompoundingWithdrawalPrefixByte: byte(2),
|
||||
BuilderWithdrawalPrefixByte: byte(3),
|
||||
ZeroHash: [32]byte{},
|
||||
|
||||
// Time parameter constants.
|
||||
@@ -169,7 +167,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
MaxWithdrawalsPerPayload: 16,
|
||||
MaxBlsToExecutionChanges: 16,
|
||||
MaxValidatorsPerWithdrawalsSweep: 16384,
|
||||
MaxBuildersPerWithdrawalsSweep: 16384,
|
||||
|
||||
// BLS domain values.
|
||||
DomainBeaconProposer: bytesutil.Uint32ToBytes4(0x00000000),
|
||||
@@ -209,7 +206,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
BeaconStateDenebFieldCount: 28,
|
||||
BeaconStateElectraFieldCount: 37,
|
||||
BeaconStateFuluFieldCount: 38,
|
||||
BeaconStateGloasFieldCount: 43,
|
||||
BeaconStateGloasFieldCount: 45,
|
||||
|
||||
// Slasher related values.
|
||||
WeakSubjectivityPeriod: 54000,
|
||||
|
||||
@@ -49,7 +49,6 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
|
||||
require.DeepEqual(t, expected.HysteresisQuotient, actual.HysteresisQuotient)
|
||||
require.DeepEqual(t, expected.HysteresisDownwardMultiplier, actual.HysteresisDownwardMultiplier)
|
||||
require.DeepEqual(t, expected.HysteresisUpwardMultiplier, actual.HysteresisUpwardMultiplier)
|
||||
require.DeepEqual(t, expected.BuilderIndexFlag, actual.BuilderIndexFlag)
|
||||
require.DeepEqual(t, expected.MinDepositAmount, actual.MinDepositAmount)
|
||||
require.DeepEqual(t, expected.MaxEffectiveBalance, actual.MaxEffectiveBalance)
|
||||
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
|
||||
@@ -95,7 +94,6 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
|
||||
require.DeepEqual(t, expected.MaxDeposits, actual.MaxDeposits)
|
||||
require.DeepEqual(t, expected.MaxVoluntaryExits, actual.MaxVoluntaryExits)
|
||||
require.DeepEqual(t, expected.MaxWithdrawalsPerPayload, actual.MaxWithdrawalsPerPayload)
|
||||
require.DeepEqual(t, expected.MaxBuildersPerWithdrawalsSweep, actual.MaxBuildersPerWithdrawalsSweep)
|
||||
require.DeepEqual(t, expected.DomainBeaconProposer, actual.DomainBeaconProposer)
|
||||
require.DeepEqual(t, expected.DomainRandao, actual.DomainRandao)
|
||||
require.DeepEqual(t, expected.DomainBeaconAttester, actual.DomainBeaconAttester)
|
||||
|
||||
@@ -669,6 +669,7 @@ func hydrateBeaconBlockBodyGloas() *eth.BeaconBlockBodyGloas {
|
||||
ParentBlockHash: make([]byte, fieldparams.RootLength),
|
||||
ParentBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
|
||||
2
go.mod
2
go.mod
@@ -96,6 +96,7 @@ require (
|
||||
google.golang.org/grpc v1.71.0
|
||||
google.golang.org/protobuf v1.36.5
|
||||
gopkg.in/d4l3k/messagediff.v1 v1.2.1
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
honnef.co/go/tools v0.6.0
|
||||
@@ -273,7 +274,6 @@ require (
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated // indirect
|
||||
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
lukechampine.com/blake3 v1.3.0 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
|
||||
@@ -17,7 +17,9 @@ go_library(
|
||||
"//io/file:go_default_library",
|
||||
"//runtime/logging/logrus-prefixed-formatter:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@in_gopkg_natefinch_lumberjack_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -28,5 +30,8 @@ go_test(
|
||||
"stream_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
deps = [
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -12,16 +12,25 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/io/file"
|
||||
prefixed "github.com/OffchainLabs/prysm/v7/runtime/logging/logrus-prefixed-formatter"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
var ephemeralLogFileVerbosity = logrus.DebugLevel
|
||||
|
||||
// SetLoggingLevel sets the base logging level for logrus.
|
||||
func SetLoggingLevel(lvl logrus.Level) {
|
||||
logrus.SetLevel(max(lvl, ephemeralLogFileVerbosity))
|
||||
}
|
||||
|
||||
func addLogWriter(w io.Writer) {
|
||||
mw := io.MultiWriter(logrus.StandardLogger().Out, w)
|
||||
logrus.SetOutput(mw)
|
||||
}
|
||||
|
||||
// ConfigurePersistentLogging adds a log-to-file writer. File content is identical to stdout.
|
||||
func ConfigurePersistentLogging(logFileName string, format string) error {
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level) error {
|
||||
logrus.WithField("logFileName", logFileName).Info("Logs will be made persistent")
|
||||
if err := file.MkdirAll(filepath.Dir(logFileName)); err != nil {
|
||||
return err
|
||||
@@ -47,14 +56,48 @@ func ConfigurePersistentLogging(logFileName string, format string) error {
|
||||
formatter.DisableColors = true
|
||||
|
||||
logrus.AddHook(&WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: f,
|
||||
Formatter: formatter,
|
||||
Writer: f,
|
||||
AllowedLevels: logrus.AllLevels[:lvl+1],
|
||||
})
|
||||
|
||||
logrus.Info("File logging initialized")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigureEphemeralLogFile adds a log file that keeps 24 hours of logs with >debug verbosity.
|
||||
func ConfigureEphemeralLogFile(datadirPath string, app string) error {
|
||||
logFilePath := filepath.Join(datadirPath, "logs", app+".log")
|
||||
if err := file.MkdirAll(filepath.Dir(logFilePath)); err != nil {
|
||||
return errors.Wrap(err, "failed to create directory")
|
||||
}
|
||||
|
||||
// Create formatter and writer hook
|
||||
formatter := new(prefixed.TextFormatter)
|
||||
formatter.TimestampFormat = "2006-01-02 15:04:05.00"
|
||||
formatter.FullTimestamp = true
|
||||
// If persistent log files are written - we disable the log messages coloring because
|
||||
// the colors are ANSI codes and seen as gibberish in the log files.
|
||||
formatter.DisableColors = true
|
||||
|
||||
// configure the lumberjack log writer to rotate logs every ~24 hours
|
||||
debugWriter := &lumberjack.Logger{
|
||||
Filename: logFilePath,
|
||||
MaxSize: 250, // MB, to avoid unbounded growth
|
||||
MaxBackups: 1, // one backup in case of size-based rotations
|
||||
MaxAge: 1, // days; files older than this are removed
|
||||
}
|
||||
|
||||
logrus.AddHook(&WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: debugWriter,
|
||||
AllowedLevels: logrus.AllLevels[:ephemeralLogFileVerbosity+1],
|
||||
})
|
||||
|
||||
logrus.Info("Ephemeral log file initialized")
|
||||
return nil
|
||||
}
|
||||
|
||||
// MaskCredentialsLogging masks the url credentials before logging for security purpose
|
||||
// [scheme:][//[userinfo@]host][/]path[?query][#fragment] --> [scheme:][//[***]host][/***][#***]
|
||||
// if the format is not matched nothing is done, string is returned as is.
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var urltests = []struct {
|
||||
@@ -34,13 +35,13 @@ func TestConfigurePersistantLogging(t *testing.T) {
|
||||
logFileName := "test.log"
|
||||
existingDirectory := "test-1-existing-testing-dir"
|
||||
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text")
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 2. Test creation of file along with parent directory
|
||||
nonExistingDirectory := "test-2-non-existing-testing-dir"
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text")
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
|
||||
@@ -51,7 +52,7 @@ func TestConfigurePersistantLogging(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text")
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
require.NoError(t, err)
|
||||
|
||||
//4. Create log file in a directory without 700 permissions
|
||||
@@ -61,3 +62,38 @@ func TestConfigurePersistantLogging(t *testing.T) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigureEphemeralLogFile(t *testing.T) {
|
||||
testParentDir := t.TempDir()
|
||||
|
||||
// 1. Test creation of file in an existing parent directory
|
||||
existingDirectory := "test-1-existing-testing-dir"
|
||||
|
||||
err := ConfigureEphemeralLogFile(fmt.Sprintf("%s/%s", testParentDir, existingDirectory), "beacon-chain")
|
||||
require.NoError(t, err)
|
||||
|
||||
// 2. Test creation of file along with parent directory
|
||||
nonExistingDirectory := "test-2-non-existing-testing-dir"
|
||||
|
||||
err = ConfigureEphemeralLogFile(fmt.Sprintf("%s/%s", testParentDir, nonExistingDirectory), "beacon-chain")
|
||||
require.NoError(t, err)
|
||||
|
||||
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
|
||||
existingDirectory = "test-3-existing-testing-dir"
|
||||
nonExistingSubDirectory := "test-3-non-existing-sub-dir"
|
||||
err = os.Mkdir(fmt.Sprintf("%s/%s", testParentDir, existingDirectory), 0700)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = ConfigureEphemeralLogFile(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory), "beacon-chain")
|
||||
require.NoError(t, err)
|
||||
|
||||
//4. Create log file in a directory without 700 permissions
|
||||
existingDirectory = "test-4-existing-testing-dir"
|
||||
err = os.Mkdir(fmt.Sprintf("%s/%s", testParentDir, existingDirectory), 0750)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = ConfigureEphemeralLogFile(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory), "beacon-chain")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -45,7 +45,6 @@ func Setup(ctx context.Context, serviceName, processName, endpoint string, sampl
|
||||
exporter,
|
||||
trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize),
|
||||
trace.WithBatchTimeout(trace.DefaultScheduleDelay*time.Millisecond),
|
||||
trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize),
|
||||
),
|
||||
trace.WithResource(
|
||||
resource.NewWithAttributes(
|
||||
|
||||
@@ -35,6 +35,21 @@ func CopyValidator(val *Validator) *Validator {
|
||||
}
|
||||
}
|
||||
|
||||
// CopyBuilder copies the provided builder.
|
||||
func CopyBuilder(builder *Builder) *Builder {
|
||||
if builder == nil {
|
||||
return nil
|
||||
}
|
||||
return &Builder{
|
||||
Pubkey: bytesutil.SafeCopyBytes(builder.Pubkey),
|
||||
Version: bytesutil.SafeCopyBytes(builder.Version),
|
||||
ExecutionAddress: bytesutil.SafeCopyBytes(builder.ExecutionAddress),
|
||||
Balance: builder.Balance,
|
||||
DepositEpoch: builder.DepositEpoch,
|
||||
WithdrawableEpoch: builder.WithdrawableEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// CopySyncCommitteeMessage copies the provided sync committee message object.
|
||||
func CopySyncCommitteeMessage(s *SyncCommitteeMessage) *SyncCommitteeMessage {
|
||||
if s == nil {
|
||||
|
||||
@@ -1220,7 +1220,7 @@ func genExecutionPayloadBidGloas() *v1alpha1.ExecutionPayloadBid {
|
||||
BlockHash: bytes(32),
|
||||
FeeRecipient: bytes(20),
|
||||
GasLimit: rand.Uint64(),
|
||||
BuilderIndex: primitives.ValidatorIndex(rand.Uint64()),
|
||||
BuilderIndex: primitives.BuilderIndex(rand.Uint64()),
|
||||
Slot: primitives.Slot(rand.Uint64()),
|
||||
Value: primitives.Gwei(rand.Uint64()),
|
||||
BlobKzgCommitmentsRoot: bytes(32),
|
||||
|
||||
@@ -13,11 +13,13 @@ func (header *ExecutionPayloadBid) Copy() *ExecutionPayloadBid {
|
||||
ParentBlockHash: bytesutil.SafeCopyBytes(header.ParentBlockHash),
|
||||
ParentBlockRoot: bytesutil.SafeCopyBytes(header.ParentBlockRoot),
|
||||
BlockHash: bytesutil.SafeCopyBytes(header.BlockHash),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(header.PrevRandao),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(header.FeeRecipient),
|
||||
GasLimit: header.GasLimit,
|
||||
BuilderIndex: header.BuilderIndex,
|
||||
Slot: header.Slot,
|
||||
Value: header.Value,
|
||||
ExecutionPayment: header.ExecutionPayment,
|
||||
BlobKzgCommitmentsRoot: bytesutil.SafeCopyBytes(header.BlobKzgCommitmentsRoot),
|
||||
}
|
||||
}
|
||||
@@ -28,10 +30,9 @@ func (withdrawal *BuilderPendingWithdrawal) Copy() *BuilderPendingWithdrawal {
|
||||
return nil
|
||||
}
|
||||
return &BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(withdrawal.FeeRecipient),
|
||||
Amount: withdrawal.Amount,
|
||||
BuilderIndex: withdrawal.BuilderIndex,
|
||||
WithdrawableEpoch: withdrawal.WithdrawableEpoch,
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(withdrawal.FeeRecipient),
|
||||
Amount: withdrawal.Amount,
|
||||
BuilderIndex: withdrawal.BuilderIndex,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1367
proto/prysm/v1alpha1/gloas.pb.go
generated
1367
proto/prysm/v1alpha1/gloas.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -26,30 +26,37 @@ option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
|
||||
// parent_block_hash: Hash32
|
||||
// parent_block_root: Root
|
||||
// block_hash: Hash32
|
||||
// prev_randao: Bytes32
|
||||
// fee_recipient: ExecutionAddress
|
||||
// gas_limit: uint64
|
||||
// builder_index: ValidatorIndex
|
||||
// builder_index: BuilderIndex
|
||||
// slot: Slot
|
||||
// value: Gwei
|
||||
// execution_payment: Gwei
|
||||
// blob_kzg_commitments_root: Root
|
||||
message ExecutionPayloadBid {
|
||||
bytes parent_block_hash = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes parent_block_root = 2 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes block_hash = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes fee_recipient = 4 [ (ethereum.eth.ext.ssz_size) = "20" ];
|
||||
uint64 gas_limit = 5;
|
||||
uint64 builder_index = 6 [ (ethereum.eth.ext.cast_type) =
|
||||
bytes prev_randao = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes fee_recipient = 5 [ (ethereum.eth.ext.ssz_size) = "20" ];
|
||||
uint64 gas_limit = 6;
|
||||
uint64 builder_index = 7 [ (ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/"
|
||||
"consensus-types/primitives.ValidatorIndex" ];
|
||||
uint64 slot = 7 [
|
||||
"consensus-types/primitives.BuilderIndex" ];
|
||||
uint64 slot = 8 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
|
||||
];
|
||||
uint64 value = 8 [
|
||||
uint64 value = 9 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Gwei"
|
||||
];
|
||||
bytes blob_kzg_commitments_root = 9 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
uint64 execution_payment = 10 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Gwei"
|
||||
];
|
||||
bytes blob_kzg_commitments_root = 11 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadBid wraps an execution payload bid with a signature.
|
||||
@@ -185,12 +192,15 @@ message SignedBeaconBlockGloas {
|
||||
// [All previous fields from earlier forks]
|
||||
// # Replaced existing latest execution header position
|
||||
// latest_execution_payload_bid: ExecutionPayloadBid
|
||||
// # New fields in Gloas:EIP7732
|
||||
// builders: List[Builder, BUILDER_REGISTRY_LIMIT]
|
||||
// # [New in Gloas:EIP7732]
|
||||
// next_withdrawal_builder_index: BuilderIndex
|
||||
// # [New in Gloas:EIP7732]
|
||||
// execution_payload_availability: Bitvector[SLOTS_PER_HISTORICAL_ROOT]
|
||||
// builder_pending_payments: Vector[BuilderPendingPayment, 2 * SLOTS_PER_EPOCH]
|
||||
// builder_pending_withdrawals: List[BuilderPendingWithdrawal, BUILDER_PENDING_WITHDRAWALS_LIMIT]
|
||||
// latest_block_hash: Hash32
|
||||
// latest_withdrawals_root: Root
|
||||
// payload_expected_withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||
message BeaconStateGloas {
|
||||
// Versioning [1001-2000]
|
||||
uint64 genesis_time = 1001;
|
||||
@@ -300,13 +310,20 @@ message BeaconStateGloas {
|
||||
[ (ethereum.eth.ext.ssz_size) = "proposer_lookahead_size" ];
|
||||
|
||||
// Fields introduced in Gloas fork [14001-15000]
|
||||
bytes execution_payload_availability = 14001 [
|
||||
repeated Builder builders = 14001
|
||||
[ (ethereum.eth.ext.ssz_max) = "builder_registry_limit" ];
|
||||
uint64 next_withdrawal_builder_index = 14002
|
||||
[ (ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/"
|
||||
"primitives.BuilderIndex" ];
|
||||
bytes execution_payload_availability = 14003 [
|
||||
(ethereum.eth.ext.ssz_size) = "execution_payload_availability.size"
|
||||
];
|
||||
repeated BuilderPendingPayment builder_pending_payments = 14002 [(ethereum.eth.ext.ssz_size) = "builder_pending_payments.size"];
|
||||
repeated BuilderPendingWithdrawal builder_pending_withdrawals = 14003 [(ethereum.eth.ext.ssz_max) = "1048576"];
|
||||
bytes latest_block_hash = 14004 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes latest_withdrawals_root = 14005 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
repeated BuilderPendingPayment builder_pending_payments = 14004 [(ethereum.eth.ext.ssz_size) = "builder_pending_payments.size"];
|
||||
repeated BuilderPendingWithdrawal builder_pending_withdrawals = 14005 [(ethereum.eth.ext.ssz_max) = "1048576"];
|
||||
bytes latest_block_hash = 14006 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
repeated ethereum.engine.v1.Withdrawal payload_expected_withdrawals = 14007
|
||||
[ (ethereum.eth.ext.ssz_max) = "withdrawal.size" ];
|
||||
}
|
||||
|
||||
// BuilderPendingPayment represents a pending payment to a builder.
|
||||
@@ -329,8 +346,7 @@ message BuilderPendingPayment {
|
||||
// class BuilderPendingWithdrawal(Container):
|
||||
// fee_recipient: ExecutionAddress
|
||||
// amount: Gwei
|
||||
// builder_index: ValidatorIndex
|
||||
// withdrawable_epoch: Epoch
|
||||
// builder_index: BuilderIndex
|
||||
message BuilderPendingWithdrawal {
|
||||
bytes fee_recipient = 1 [ (ethereum.eth.ext.ssz_size) = "20" ];
|
||||
uint64 amount = 2 [
|
||||
@@ -340,11 +356,7 @@ message BuilderPendingWithdrawal {
|
||||
uint64 builder_index = 3
|
||||
[ (ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/"
|
||||
"primitives.ValidatorIndex" ];
|
||||
uint64 withdrawable_epoch = 4 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"
|
||||
];
|
||||
"primitives.BuilderIndex" ];
|
||||
}
|
||||
|
||||
// DataColumnSidecarGloas represents a data column sidecar in the Gloas fork.
|
||||
@@ -387,7 +399,7 @@ message DataColumnSidecarGloas {
|
||||
// class ExecutionPayloadEnvelope(Container):
|
||||
// payload: ExecutionPayload
|
||||
// execution_requests: ExecutionRequests
|
||||
// builder_index: ValidatorIndex
|
||||
// builder_index: BuilderIndex
|
||||
// beacon_block_root: Root
|
||||
// slot: Slot
|
||||
// blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
@@ -397,7 +409,7 @@ message ExecutionPayloadEnvelope {
|
||||
ethereum.engine.v1.ExecutionRequests execution_requests = 2;
|
||||
uint64 builder_index = 3 [ (ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/"
|
||||
"consensus-types/primitives.ValidatorIndex" ];
|
||||
"consensus-types/primitives.BuilderIndex" ];
|
||||
bytes beacon_block_root = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
uint64 slot = 5 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
@@ -421,3 +433,31 @@ message SignedExecutionPayloadEnvelope {
|
||||
ExecutionPayloadEnvelope message = 1;
|
||||
bytes signature = 2 [ (ethereum.eth.ext.ssz_size) = "96" ];
|
||||
}
|
||||
|
||||
// Builder represents a builder in the Gloas fork.
|
||||
//
|
||||
// Spec:
|
||||
// class Builder(Container):
|
||||
// pubkey: BLSPubkey
|
||||
// version: uint8
|
||||
// execution_address: ExecutionAddress
|
||||
// balance: Gwei
|
||||
// deposit_epoch: Epoch
|
||||
// withdrawable_epoch: Epoch
|
||||
message Builder {
|
||||
bytes pubkey = 1 [ (ethereum.eth.ext.ssz_size) = "48" ];
|
||||
bytes version = 2 [ (ethereum.eth.ext.ssz_size) = "1" ];
|
||||
bytes execution_address = 3 [ (ethereum.eth.ext.ssz_size) = "20" ];
|
||||
uint64 balance = 4 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Gwei"
|
||||
];
|
||||
uint64 deposit_epoch = 5 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"
|
||||
];
|
||||
uint64 withdrawable_epoch = 6 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"
|
||||
];
|
||||
}
|
||||
|
||||
@@ -37,26 +37,36 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
}
|
||||
dst = append(dst, e.BlockHash...)
|
||||
|
||||
// Field (3) 'FeeRecipient'
|
||||
// Field (3) 'PrevRandao'
|
||||
if size := len(e.PrevRandao); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.PrevRandao", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.PrevRandao...)
|
||||
|
||||
// Field (4) 'FeeRecipient'
|
||||
if size := len(e.FeeRecipient); size != 20 {
|
||||
err = ssz.ErrBytesLengthFn("--.FeeRecipient", size, 20)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.FeeRecipient...)
|
||||
|
||||
// Field (4) 'GasLimit'
|
||||
// Field (5) 'GasLimit'
|
||||
dst = ssz.MarshalUint64(dst, e.GasLimit)
|
||||
|
||||
// Field (5) 'BuilderIndex'
|
||||
// Field (6) 'BuilderIndex'
|
||||
dst = ssz.MarshalUint64(dst, uint64(e.BuilderIndex))
|
||||
|
||||
// Field (6) 'Slot'
|
||||
// Field (7) 'Slot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
|
||||
|
||||
// Field (7) 'Value'
|
||||
// Field (8) 'Value'
|
||||
dst = ssz.MarshalUint64(dst, uint64(e.Value))
|
||||
|
||||
// Field (8) 'BlobKzgCommitmentsRoot'
|
||||
// Field (9) 'ExecutionPayment'
|
||||
dst = ssz.MarshalUint64(dst, uint64(e.ExecutionPayment))
|
||||
|
||||
// Field (10) 'BlobKzgCommitmentsRoot'
|
||||
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
|
||||
return
|
||||
@@ -70,7 +80,7 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 180 {
|
||||
if size != 220 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
@@ -92,36 +102,45 @@ func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
|
||||
}
|
||||
e.BlockHash = append(e.BlockHash, buf[64:96]...)
|
||||
|
||||
// Field (3) 'FeeRecipient'
|
||||
// Field (3) 'PrevRandao'
|
||||
if cap(e.PrevRandao) == 0 {
|
||||
e.PrevRandao = make([]byte, 0, len(buf[96:128]))
|
||||
}
|
||||
e.PrevRandao = append(e.PrevRandao, buf[96:128]...)
|
||||
|
||||
// Field (4) 'FeeRecipient'
|
||||
if cap(e.FeeRecipient) == 0 {
|
||||
e.FeeRecipient = make([]byte, 0, len(buf[96:116]))
|
||||
e.FeeRecipient = make([]byte, 0, len(buf[128:148]))
|
||||
}
|
||||
e.FeeRecipient = append(e.FeeRecipient, buf[96:116]...)
|
||||
e.FeeRecipient = append(e.FeeRecipient, buf[128:148]...)
|
||||
|
||||
// Field (4) 'GasLimit'
|
||||
e.GasLimit = ssz.UnmarshallUint64(buf[116:124])
|
||||
// Field (5) 'GasLimit'
|
||||
e.GasLimit = ssz.UnmarshallUint64(buf[148:156])
|
||||
|
||||
// Field (5) 'BuilderIndex'
|
||||
e.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[124:132]))
|
||||
// Field (6) 'BuilderIndex'
|
||||
e.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[156:164]))
|
||||
|
||||
// Field (6) 'Slot'
|
||||
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[132:140]))
|
||||
// Field (7) 'Slot'
|
||||
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[164:172]))
|
||||
|
||||
// Field (7) 'Value'
|
||||
e.Value = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[140:148]))
|
||||
// Field (8) 'Value'
|
||||
e.Value = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[172:180]))
|
||||
|
||||
// Field (8) 'BlobKzgCommitmentsRoot'
|
||||
// Field (9) 'ExecutionPayment'
|
||||
e.ExecutionPayment = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[180:188]))
|
||||
|
||||
// Field (10) 'BlobKzgCommitmentsRoot'
|
||||
if cap(e.BlobKzgCommitmentsRoot) == 0 {
|
||||
e.BlobKzgCommitmentsRoot = make([]byte, 0, len(buf[148:180]))
|
||||
e.BlobKzgCommitmentsRoot = make([]byte, 0, len(buf[188:220]))
|
||||
}
|
||||
e.BlobKzgCommitmentsRoot = append(e.BlobKzgCommitmentsRoot, buf[148:180]...)
|
||||
e.BlobKzgCommitmentsRoot = append(e.BlobKzgCommitmentsRoot, buf[188:220]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadBid object
|
||||
func (e *ExecutionPayloadBid) SizeSSZ() (size int) {
|
||||
size = 180
|
||||
size = 220
|
||||
return
|
||||
}
|
||||
|
||||
@@ -155,26 +174,36 @@ func (e *ExecutionPayloadBid) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(e.BlockHash)
|
||||
|
||||
// Field (3) 'FeeRecipient'
|
||||
// Field (3) 'PrevRandao'
|
||||
if size := len(e.PrevRandao); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.PrevRandao", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.PrevRandao)
|
||||
|
||||
// Field (4) 'FeeRecipient'
|
||||
if size := len(e.FeeRecipient); size != 20 {
|
||||
err = ssz.ErrBytesLengthFn("--.FeeRecipient", size, 20)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.FeeRecipient)
|
||||
|
||||
// Field (4) 'GasLimit'
|
||||
// Field (5) 'GasLimit'
|
||||
hh.PutUint64(e.GasLimit)
|
||||
|
||||
// Field (5) 'BuilderIndex'
|
||||
// Field (6) 'BuilderIndex'
|
||||
hh.PutUint64(uint64(e.BuilderIndex))
|
||||
|
||||
// Field (6) 'Slot'
|
||||
// Field (7) 'Slot'
|
||||
hh.PutUint64(uint64(e.Slot))
|
||||
|
||||
// Field (7) 'Value'
|
||||
// Field (8) 'Value'
|
||||
hh.PutUint64(uint64(e.Value))
|
||||
|
||||
// Field (8) 'BlobKzgCommitmentsRoot'
|
||||
// Field (9) 'ExecutionPayment'
|
||||
hh.PutUint64(uint64(e.ExecutionPayment))
|
||||
|
||||
// Field (10) 'BlobKzgCommitmentsRoot'
|
||||
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
|
||||
return
|
||||
@@ -216,7 +245,7 @@ func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err er
|
||||
func (s *SignedExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 276 {
|
||||
if size != 316 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
@@ -224,22 +253,22 @@ func (s *SignedExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
|
||||
if s.Message == nil {
|
||||
s.Message = new(ExecutionPayloadBid)
|
||||
}
|
||||
if err = s.Message.UnmarshalSSZ(buf[0:180]); err != nil {
|
||||
if err = s.Message.UnmarshalSSZ(buf[0:220]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if cap(s.Signature) == 0 {
|
||||
s.Signature = make([]byte, 0, len(buf[180:276]))
|
||||
s.Signature = make([]byte, 0, len(buf[220:316]))
|
||||
}
|
||||
s.Signature = append(s.Signature, buf[180:276]...)
|
||||
s.Signature = append(s.Signature, buf[220:316]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the SignedExecutionPayloadBid object
|
||||
func (s *SignedExecutionPayloadBid) SizeSSZ() (size int) {
|
||||
size = 276
|
||||
size = 316
|
||||
return
|
||||
}
|
||||
|
||||
@@ -713,7 +742,7 @@ func (b *BeaconBlockBodyGloas) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the BeaconBlockBodyGloas object to a target array
|
||||
func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(664)
|
||||
offset := int(704)
|
||||
|
||||
// Field (0) 'RandaoReveal'
|
||||
if size := len(b.RandaoReveal); size != 96 {
|
||||
@@ -885,7 +914,7 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 664 {
|
||||
if size < 704 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
@@ -917,7 +946,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o3 != 664 {
|
||||
if o3 != 704 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -958,12 +987,12 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
if b.SignedExecutionPayloadBid == nil {
|
||||
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
|
||||
}
|
||||
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf[384:660]); err != nil {
|
||||
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf[384:700]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Offset (11) 'PayloadAttestations'
|
||||
if o11 = ssz.ReadOffset(buf[660:664]); o11 > size || o9 > o11 {
|
||||
if o11 = ssz.ReadOffset(buf[700:704]); o11 > size || o9 > o11 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
@@ -1105,7 +1134,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockBodyGloas object
|
||||
func (b *BeaconBlockBodyGloas) SizeSSZ() (size int) {
|
||||
size = 664
|
||||
size = 704
|
||||
|
||||
// Field (3) 'ProposerSlashings'
|
||||
size += len(b.ProposerSlashings) * 416
|
||||
@@ -1408,7 +1437,7 @@ func (b *BeaconStateGloas) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the BeaconStateGloas object to a target array
|
||||
func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(2741821)
|
||||
offset := int(2741333)
|
||||
|
||||
// Field (0) 'GenesisTime'
|
||||
dst = ssz.MarshalUint64(dst, b.GenesisTime)
|
||||
@@ -1630,14 +1659,21 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = ssz.MarshalUint64(dst, b.ProposerLookahead[ii])
|
||||
}
|
||||
|
||||
// Field (38) 'ExecutionPayloadAvailability'
|
||||
// Offset (38) 'Builders'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(b.Builders) * 93
|
||||
|
||||
// Field (39) 'NextWithdrawalBuilderIndex'
|
||||
dst = ssz.MarshalUint64(dst, uint64(b.NextWithdrawalBuilderIndex))
|
||||
|
||||
// Field (40) 'ExecutionPayloadAvailability'
|
||||
if size := len(b.ExecutionPayloadAvailability); size != 1024 {
|
||||
err = ssz.ErrBytesLengthFn("--.ExecutionPayloadAvailability", size, 1024)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.ExecutionPayloadAvailability...)
|
||||
|
||||
// Field (39) 'BuilderPendingPayments'
|
||||
// Field (41) 'BuilderPendingPayments'
|
||||
if size := len(b.BuilderPendingPayments); size != 64 {
|
||||
err = ssz.ErrVectorLengthFn("--.BuilderPendingPayments", size, 64)
|
||||
return
|
||||
@@ -1648,23 +1684,20 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Offset (40) 'BuilderPendingWithdrawals'
|
||||
// Offset (42) 'BuilderPendingWithdrawals'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(b.BuilderPendingWithdrawals) * 44
|
||||
offset += len(b.BuilderPendingWithdrawals) * 36
|
||||
|
||||
// Field (41) 'LatestBlockHash'
|
||||
// Field (43) 'LatestBlockHash'
|
||||
if size := len(b.LatestBlockHash); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.LatestBlockHash", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.LatestBlockHash...)
|
||||
|
||||
// Field (42) 'LatestWithdrawalsRoot'
|
||||
if size := len(b.LatestWithdrawalsRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.LatestWithdrawalsRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.LatestWithdrawalsRoot...)
|
||||
// Offset (44) 'PayloadExpectedWithdrawals'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(b.PayloadExpectedWithdrawals) * 44
|
||||
|
||||
// Field (7) 'HistoricalRoots'
|
||||
if size := len(b.HistoricalRoots); size > 16777216 {
|
||||
@@ -1777,7 +1810,18 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Field (40) 'BuilderPendingWithdrawals'
|
||||
// Field (38) 'Builders'
|
||||
if size := len(b.Builders); size > 1099511627776 {
|
||||
err = ssz.ErrListTooBigFn("--.Builders", size, 1099511627776)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(b.Builders); ii++ {
|
||||
if dst, err = b.Builders[ii].MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Field (42) 'BuilderPendingWithdrawals'
|
||||
if size := len(b.BuilderPendingWithdrawals); size > 1048576 {
|
||||
err = ssz.ErrListTooBigFn("--.BuilderPendingWithdrawals", size, 1048576)
|
||||
return
|
||||
@@ -1788,6 +1832,17 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Field (44) 'PayloadExpectedWithdrawals'
|
||||
if size := len(b.PayloadExpectedWithdrawals); size > 16 {
|
||||
err = ssz.ErrListTooBigFn("--.PayloadExpectedWithdrawals", size, 16)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(b.PayloadExpectedWithdrawals); ii++ {
|
||||
if dst, err = b.PayloadExpectedWithdrawals[ii].MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1795,12 +1850,12 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 2741821 {
|
||||
if size < 2741333 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o7, o9, o11, o12, o15, o16, o21, o27, o34, o35, o36, o40 uint64
|
||||
var o7, o9, o11, o12, o15, o16, o21, o27, o34, o35, o36, o38, o42, o44 uint64
|
||||
|
||||
// Field (0) 'GenesisTime'
|
||||
b.GenesisTime = ssz.UnmarshallUint64(buf[0:8])
|
||||
@@ -1853,7 +1908,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o7 != 2741821 {
|
||||
if o7 != 2741333 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -1963,93 +2018,100 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
if b.LatestExecutionPayloadBid == nil {
|
||||
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
|
||||
}
|
||||
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf[2736629:2736809]); err != nil {
|
||||
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf[2736629:2736849]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Field (25) 'NextWithdrawalIndex'
|
||||
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736809:2736817])
|
||||
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736849:2736857])
|
||||
|
||||
// Field (26) 'NextWithdrawalValidatorIndex'
|
||||
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736817:2736825]))
|
||||
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736857:2736865]))
|
||||
|
||||
// Offset (27) 'HistoricalSummaries'
|
||||
if o27 = ssz.ReadOffset(buf[2736825:2736829]); o27 > size || o21 > o27 {
|
||||
if o27 = ssz.ReadOffset(buf[2736865:2736869]); o27 > size || o21 > o27 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (28) 'DepositRequestsStartIndex'
|
||||
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736829:2736837])
|
||||
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736869:2736877])
|
||||
|
||||
// Field (29) 'DepositBalanceToConsume'
|
||||
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736837:2736845]))
|
||||
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736877:2736885]))
|
||||
|
||||
// Field (30) 'ExitBalanceToConsume'
|
||||
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736845:2736853]))
|
||||
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736885:2736893]))
|
||||
|
||||
// Field (31) 'EarliestExitEpoch'
|
||||
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736853:2736861]))
|
||||
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736893:2736901]))
|
||||
|
||||
// Field (32) 'ConsolidationBalanceToConsume'
|
||||
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736861:2736869]))
|
||||
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736901:2736909]))
|
||||
|
||||
// Field (33) 'EarliestConsolidationEpoch'
|
||||
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736869:2736877]))
|
||||
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736909:2736917]))
|
||||
|
||||
// Offset (34) 'PendingDeposits'
|
||||
if o34 = ssz.ReadOffset(buf[2736877:2736881]); o34 > size || o27 > o34 {
|
||||
if o34 = ssz.ReadOffset(buf[2736917:2736921]); o34 > size || o27 > o34 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Offset (35) 'PendingPartialWithdrawals'
|
||||
if o35 = ssz.ReadOffset(buf[2736881:2736885]); o35 > size || o34 > o35 {
|
||||
if o35 = ssz.ReadOffset(buf[2736921:2736925]); o35 > size || o34 > o35 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Offset (36) 'PendingConsolidations'
|
||||
if o36 = ssz.ReadOffset(buf[2736885:2736889]); o36 > size || o35 > o36 {
|
||||
if o36 = ssz.ReadOffset(buf[2736925:2736929]); o36 > size || o35 > o36 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (37) 'ProposerLookahead'
|
||||
b.ProposerLookahead = ssz.ExtendUint64(b.ProposerLookahead, 64)
|
||||
for ii := 0; ii < 64; ii++ {
|
||||
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736889:2737401][ii*8 : (ii+1)*8])
|
||||
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736929:2737441][ii*8 : (ii+1)*8])
|
||||
}
|
||||
|
||||
// Field (38) 'ExecutionPayloadAvailability'
|
||||
// Offset (38) 'Builders'
|
||||
if o38 = ssz.ReadOffset(buf[2737441:2737445]); o38 > size || o36 > o38 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (39) 'NextWithdrawalBuilderIndex'
|
||||
b.NextWithdrawalBuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[2737445:2737453]))
|
||||
|
||||
// Field (40) 'ExecutionPayloadAvailability'
|
||||
if cap(b.ExecutionPayloadAvailability) == 0 {
|
||||
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737401:2738425]))
|
||||
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737453:2738477]))
|
||||
}
|
||||
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737401:2738425]...)
|
||||
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737453:2738477]...)
|
||||
|
||||
// Field (39) 'BuilderPendingPayments'
|
||||
// Field (41) 'BuilderPendingPayments'
|
||||
b.BuilderPendingPayments = make([]*BuilderPendingPayment, 64)
|
||||
for ii := 0; ii < 64; ii++ {
|
||||
if b.BuilderPendingPayments[ii] == nil {
|
||||
b.BuilderPendingPayments[ii] = new(BuilderPendingPayment)
|
||||
}
|
||||
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738425:2741753][ii*52 : (ii+1)*52]); err != nil {
|
||||
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738477:2741293][ii*44 : (ii+1)*44]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Offset (40) 'BuilderPendingWithdrawals'
|
||||
if o40 = ssz.ReadOffset(buf[2741753:2741757]); o40 > size || o36 > o40 {
|
||||
// Offset (42) 'BuilderPendingWithdrawals'
|
||||
if o42 = ssz.ReadOffset(buf[2741293:2741297]); o42 > size || o38 > o42 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (41) 'LatestBlockHash'
|
||||
// Field (43) 'LatestBlockHash'
|
||||
if cap(b.LatestBlockHash) == 0 {
|
||||
b.LatestBlockHash = make([]byte, 0, len(buf[2741757:2741789]))
|
||||
b.LatestBlockHash = make([]byte, 0, len(buf[2741297:2741329]))
|
||||
}
|
||||
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741757:2741789]...)
|
||||
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741297:2741329]...)
|
||||
|
||||
// Field (42) 'LatestWithdrawalsRoot'
|
||||
if cap(b.LatestWithdrawalsRoot) == 0 {
|
||||
b.LatestWithdrawalsRoot = make([]byte, 0, len(buf[2741789:2741821]))
|
||||
// Offset (44) 'PayloadExpectedWithdrawals'
|
||||
if o44 = ssz.ReadOffset(buf[2741329:2741333]); o44 > size || o42 > o44 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
b.LatestWithdrawalsRoot = append(b.LatestWithdrawalsRoot, buf[2741789:2741821]...)
|
||||
|
||||
// Field (7) 'HistoricalRoots'
|
||||
{
|
||||
@@ -2209,7 +2271,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// Field (36) 'PendingConsolidations'
|
||||
{
|
||||
buf = tail[o36:o40]
|
||||
buf = tail[o36:o38]
|
||||
num, err := ssz.DivideInt2(len(buf), 16, 262144)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2225,10 +2287,28 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Field (40) 'BuilderPendingWithdrawals'
|
||||
// Field (38) 'Builders'
|
||||
{
|
||||
buf = tail[o40:]
|
||||
num, err := ssz.DivideInt2(len(buf), 44, 1048576)
|
||||
buf = tail[o38:o42]
|
||||
num, err := ssz.DivideInt2(len(buf), 93, 1099511627776)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Builders = make([]*Builder, num)
|
||||
for ii := 0; ii < num; ii++ {
|
||||
if b.Builders[ii] == nil {
|
||||
b.Builders[ii] = new(Builder)
|
||||
}
|
||||
if err = b.Builders[ii].UnmarshalSSZ(buf[ii*93 : (ii+1)*93]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Field (42) 'BuilderPendingWithdrawals'
|
||||
{
|
||||
buf = tail[o42:o44]
|
||||
num, err := ssz.DivideInt2(len(buf), 36, 1048576)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2237,7 +2317,25 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
if b.BuilderPendingWithdrawals[ii] == nil {
|
||||
b.BuilderPendingWithdrawals[ii] = new(BuilderPendingWithdrawal)
|
||||
}
|
||||
if err = b.BuilderPendingWithdrawals[ii].UnmarshalSSZ(buf[ii*44 : (ii+1)*44]); err != nil {
|
||||
if err = b.BuilderPendingWithdrawals[ii].UnmarshalSSZ(buf[ii*36 : (ii+1)*36]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Field (44) 'PayloadExpectedWithdrawals'
|
||||
{
|
||||
buf = tail[o44:]
|
||||
num, err := ssz.DivideInt2(len(buf), 44, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.PayloadExpectedWithdrawals = make([]*v1.Withdrawal, num)
|
||||
for ii := 0; ii < num; ii++ {
|
||||
if b.PayloadExpectedWithdrawals[ii] == nil {
|
||||
b.PayloadExpectedWithdrawals[ii] = new(v1.Withdrawal)
|
||||
}
|
||||
if err = b.PayloadExpectedWithdrawals[ii].UnmarshalSSZ(buf[ii*44 : (ii+1)*44]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -2247,7 +2345,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BeaconStateGloas object
|
||||
func (b *BeaconStateGloas) SizeSSZ() (size int) {
|
||||
size = 2741821
|
||||
size = 2741333
|
||||
|
||||
// Field (7) 'HistoricalRoots'
|
||||
size += len(b.HistoricalRoots) * 32
|
||||
@@ -2282,8 +2380,14 @@ func (b *BeaconStateGloas) SizeSSZ() (size int) {
|
||||
// Field (36) 'PendingConsolidations'
|
||||
size += len(b.PendingConsolidations) * 16
|
||||
|
||||
// Field (40) 'BuilderPendingWithdrawals'
|
||||
size += len(b.BuilderPendingWithdrawals) * 44
|
||||
// Field (38) 'Builders'
|
||||
size += len(b.Builders) * 93
|
||||
|
||||
// Field (42) 'BuilderPendingWithdrawals'
|
||||
size += len(b.BuilderPendingWithdrawals) * 36
|
||||
|
||||
// Field (44) 'PayloadExpectedWithdrawals'
|
||||
size += len(b.PayloadExpectedWithdrawals) * 44
|
||||
|
||||
return
|
||||
}
|
||||
@@ -2637,14 +2741,33 @@ func (b *BeaconStateGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
hh.Merkleize(subIndx)
|
||||
}
|
||||
|
||||
// Field (38) 'ExecutionPayloadAvailability'
|
||||
// Field (38) 'Builders'
|
||||
{
|
||||
subIndx := hh.Index()
|
||||
num := uint64(len(b.Builders))
|
||||
if num > 1099511627776 {
|
||||
err = ssz.ErrIncorrectListSize
|
||||
return
|
||||
}
|
||||
for _, elem := range b.Builders {
|
||||
if err = elem.HashTreeRootWith(hh); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1099511627776)
|
||||
}
|
||||
|
||||
// Field (39) 'NextWithdrawalBuilderIndex'
|
||||
hh.PutUint64(uint64(b.NextWithdrawalBuilderIndex))
|
||||
|
||||
// Field (40) 'ExecutionPayloadAvailability'
|
||||
if size := len(b.ExecutionPayloadAvailability); size != 1024 {
|
||||
err = ssz.ErrBytesLengthFn("--.ExecutionPayloadAvailability", size, 1024)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(b.ExecutionPayloadAvailability)
|
||||
|
||||
// Field (39) 'BuilderPendingPayments'
|
||||
// Field (41) 'BuilderPendingPayments'
|
||||
{
|
||||
subIndx := hh.Index()
|
||||
for _, elem := range b.BuilderPendingPayments {
|
||||
@@ -2655,7 +2778,7 @@ func (b *BeaconStateGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
hh.Merkleize(subIndx)
|
||||
}
|
||||
|
||||
// Field (40) 'BuilderPendingWithdrawals'
|
||||
// Field (42) 'BuilderPendingWithdrawals'
|
||||
{
|
||||
subIndx := hh.Index()
|
||||
num := uint64(len(b.BuilderPendingWithdrawals))
|
||||
@@ -2671,19 +2794,28 @@ func (b *BeaconStateGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1048576)
|
||||
}
|
||||
|
||||
// Field (41) 'LatestBlockHash'
|
||||
// Field (43) 'LatestBlockHash'
|
||||
if size := len(b.LatestBlockHash); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.LatestBlockHash", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(b.LatestBlockHash)
|
||||
|
||||
// Field (42) 'LatestWithdrawalsRoot'
|
||||
if size := len(b.LatestWithdrawalsRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.LatestWithdrawalsRoot", size, 32)
|
||||
return
|
||||
// Field (44) 'PayloadExpectedWithdrawals'
|
||||
{
|
||||
subIndx := hh.Index()
|
||||
num := uint64(len(b.PayloadExpectedWithdrawals))
|
||||
if num > 16 {
|
||||
err = ssz.ErrIncorrectListSize
|
||||
return
|
||||
}
|
||||
for _, elem := range b.PayloadExpectedWithdrawals {
|
||||
if err = elem.HashTreeRootWith(hh); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
}
|
||||
hh.PutBytes(b.LatestWithdrawalsRoot)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
@@ -2716,7 +2848,7 @@ func (b *BuilderPendingPayment) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
func (b *BuilderPendingPayment) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 52 {
|
||||
if size != 44 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
@@ -2727,7 +2859,7 @@ func (b *BuilderPendingPayment) UnmarshalSSZ(buf []byte) error {
|
||||
if b.Withdrawal == nil {
|
||||
b.Withdrawal = new(BuilderPendingWithdrawal)
|
||||
}
|
||||
if err = b.Withdrawal.UnmarshalSSZ(buf[8:52]); err != nil {
|
||||
if err = b.Withdrawal.UnmarshalSSZ(buf[8:44]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2736,7 +2868,7 @@ func (b *BuilderPendingPayment) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BuilderPendingPayment object
|
||||
func (b *BuilderPendingPayment) SizeSSZ() (size int) {
|
||||
size = 52
|
||||
size = 44
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2783,9 +2915,6 @@ func (b *BuilderPendingWithdrawal) MarshalSSZTo(buf []byte) (dst []byte, err err
|
||||
// Field (2) 'BuilderIndex'
|
||||
dst = ssz.MarshalUint64(dst, uint64(b.BuilderIndex))
|
||||
|
||||
// Field (3) 'WithdrawableEpoch'
|
||||
dst = ssz.MarshalUint64(dst, uint64(b.WithdrawableEpoch))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2793,7 +2922,7 @@ func (b *BuilderPendingWithdrawal) MarshalSSZTo(buf []byte) (dst []byte, err err
|
||||
func (b *BuilderPendingWithdrawal) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 44 {
|
||||
if size != 36 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
@@ -2807,17 +2936,14 @@ func (b *BuilderPendingWithdrawal) UnmarshalSSZ(buf []byte) error {
|
||||
b.Amount = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[20:28]))
|
||||
|
||||
// Field (2) 'BuilderIndex'
|
||||
b.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[28:36]))
|
||||
|
||||
// Field (3) 'WithdrawableEpoch'
|
||||
b.WithdrawableEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[36:44]))
|
||||
b.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[28:36]))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BuilderPendingWithdrawal object
|
||||
func (b *BuilderPendingWithdrawal) SizeSSZ() (size int) {
|
||||
size = 44
|
||||
size = 36
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2843,9 +2969,6 @@ func (b *BuilderPendingWithdrawal) HashTreeRootWith(hh *ssz.Hasher) (err error)
|
||||
// Field (2) 'BuilderIndex'
|
||||
hh.PutUint64(uint64(b.BuilderIndex))
|
||||
|
||||
// Field (3) 'WithdrawableEpoch'
|
||||
hh.PutUint64(uint64(b.WithdrawableEpoch))
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
@@ -3218,7 +3341,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
}
|
||||
|
||||
// Field (2) 'BuilderIndex'
|
||||
e.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[8:16]))
|
||||
e.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[8:16]))
|
||||
|
||||
// Field (3) 'BeaconBlockRoot'
|
||||
if cap(e.BeaconBlockRoot) == 0 {
|
||||
@@ -3472,3 +3595,132 @@ func (s *SignedExecutionPayloadEnvelope) HashTreeRootWith(hh *ssz.Hasher) (err e
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the Builder object
|
||||
func (b *Builder) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(b)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the Builder object to a target array
|
||||
func (b *Builder) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'Pubkey'
|
||||
if size := len(b.Pubkey); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.Pubkey...)
|
||||
|
||||
// Field (1) 'Version'
|
||||
if size := len(b.Version); size != 1 {
|
||||
err = ssz.ErrBytesLengthFn("--.Version", size, 1)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.Version...)
|
||||
|
||||
// Field (2) 'ExecutionAddress'
|
||||
if size := len(b.ExecutionAddress); size != 20 {
|
||||
err = ssz.ErrBytesLengthFn("--.ExecutionAddress", size, 20)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.ExecutionAddress...)
|
||||
|
||||
// Field (3) 'Balance'
|
||||
dst = ssz.MarshalUint64(dst, uint64(b.Balance))
|
||||
|
||||
// Field (4) 'DepositEpoch'
|
||||
dst = ssz.MarshalUint64(dst, uint64(b.DepositEpoch))
|
||||
|
||||
// Field (5) 'WithdrawableEpoch'
|
||||
dst = ssz.MarshalUint64(dst, uint64(b.WithdrawableEpoch))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the Builder object
|
||||
func (b *Builder) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 93 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'Pubkey'
|
||||
if cap(b.Pubkey) == 0 {
|
||||
b.Pubkey = make([]byte, 0, len(buf[0:48]))
|
||||
}
|
||||
b.Pubkey = append(b.Pubkey, buf[0:48]...)
|
||||
|
||||
// Field (1) 'Version'
|
||||
if cap(b.Version) == 0 {
|
||||
b.Version = make([]byte, 0, len(buf[48:49]))
|
||||
}
|
||||
b.Version = append(b.Version, buf[48:49]...)
|
||||
|
||||
// Field (2) 'ExecutionAddress'
|
||||
if cap(b.ExecutionAddress) == 0 {
|
||||
b.ExecutionAddress = make([]byte, 0, len(buf[49:69]))
|
||||
}
|
||||
b.ExecutionAddress = append(b.ExecutionAddress, buf[49:69]...)
|
||||
|
||||
// Field (3) 'Balance'
|
||||
b.Balance = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[69:77]))
|
||||
|
||||
// Field (4) 'DepositEpoch'
|
||||
b.DepositEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[77:85]))
|
||||
|
||||
// Field (5) 'WithdrawableEpoch'
|
||||
b.WithdrawableEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[85:93]))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the Builder object
|
||||
func (b *Builder) SizeSSZ() (size int) {
|
||||
size = 93
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the Builder object
|
||||
func (b *Builder) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(b)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the Builder object with a hasher
|
||||
func (b *Builder) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'Pubkey'
|
||||
if size := len(b.Pubkey); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(b.Pubkey)
|
||||
|
||||
// Field (1) 'Version'
|
||||
if size := len(b.Version); size != 1 {
|
||||
err = ssz.ErrBytesLengthFn("--.Version", size, 1)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(b.Version)
|
||||
|
||||
// Field (2) 'ExecutionAddress'
|
||||
if size := len(b.ExecutionAddress); size != 20 {
|
||||
err = ssz.ErrBytesLengthFn("--.ExecutionAddress", size, 20)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(b.ExecutionAddress)
|
||||
|
||||
// Field (3) 'Balance'
|
||||
hh.PutUint64(uint64(b.Balance))
|
||||
|
||||
// Field (4) 'DepositEpoch'
|
||||
hh.PutUint64(uint64(b.DepositEpoch))
|
||||
|
||||
// Field (5) 'WithdrawableEpoch'
|
||||
hh.PutUint64(uint64(b.WithdrawableEpoch))
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -26,10 +26,12 @@ func TestExecutionPayloadBid_Copy(t *testing.T) {
|
||||
ParentBlockHash: []byte("parent_block_hash_32_bytes_long!"),
|
||||
ParentBlockRoot: []byte("parent_block_root_32_bytes_long!"),
|
||||
BlockHash: []byte("block_hash_32_bytes_are_long!!"),
|
||||
PrevRandao: []byte("prev_randao_32_bytes_long!!!"),
|
||||
FeeRecipient: []byte("fee_recipient_20_byt"),
|
||||
GasLimit: 15000000,
|
||||
BuilderIndex: primitives.ValidatorIndex(42),
|
||||
BuilderIndex: primitives.BuilderIndex(42),
|
||||
Slot: primitives.Slot(12345),
|
||||
ExecutionPayment: 5645654,
|
||||
Value: 1000000000000000000,
|
||||
BlobKzgCommitmentsRoot: []byte("blob_kzg_commitments_32_bytes!!"),
|
||||
},
|
||||
@@ -76,10 +78,9 @@ func TestBuilderPendingWithdrawal_Copy(t *testing.T) {
|
||||
{
|
||||
name: "fully populated withdrawal",
|
||||
withdrawal: &BuilderPendingWithdrawal{
|
||||
FeeRecipient: []byte("fee_recipient_20_byt"),
|
||||
Amount: primitives.Gwei(5000000000),
|
||||
BuilderIndex: primitives.ValidatorIndex(123),
|
||||
WithdrawableEpoch: primitives.Epoch(456),
|
||||
FeeRecipient: []byte("fee_recipient_20_byt"),
|
||||
Amount: primitives.Gwei(5000000000),
|
||||
BuilderIndex: primitives.BuilderIndex(123),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -134,10 +135,9 @@ func TestBuilderPendingPayment_Copy(t *testing.T) {
|
||||
payment: &BuilderPendingPayment{
|
||||
Weight: primitives.Gwei(2500),
|
||||
Withdrawal: &BuilderPendingWithdrawal{
|
||||
FeeRecipient: []byte("test_recipient_20byt"),
|
||||
Amount: primitives.Gwei(10000),
|
||||
BuilderIndex: primitives.ValidatorIndex(789),
|
||||
WithdrawableEpoch: primitives.Epoch(999),
|
||||
FeeRecipient: []byte("test_recipient_20byt"),
|
||||
Amount: primitives.Gwei(10000),
|
||||
BuilderIndex: primitives.BuilderIndex(789),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -166,3 +166,60 @@ func TestBuilderPendingPayment_Copy(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyBuilder(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
builder *Builder
|
||||
}{
|
||||
{
|
||||
name: "nil builder",
|
||||
builder: nil,
|
||||
},
|
||||
{
|
||||
name: "empty builder",
|
||||
builder: &Builder{},
|
||||
},
|
||||
{
|
||||
name: "fully populated builder",
|
||||
builder: &Builder{
|
||||
Pubkey: []byte("pubkey_48_bytes_long_pubkey_48_bytes_long_pubkey_48!"),
|
||||
Version: []byte{'a'},
|
||||
ExecutionAddress: []byte("execution_address_20"),
|
||||
Balance: primitives.Gwei(12345),
|
||||
DepositEpoch: primitives.Epoch(10),
|
||||
WithdrawableEpoch: primitives.Epoch(20),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
copied := CopyBuilder(tt.builder)
|
||||
if tt.builder == nil {
|
||||
if copied != nil {
|
||||
t.Errorf("CopyBuilder() of nil should return nil, got %v", copied)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tt.builder, copied) {
|
||||
t.Errorf("CopyBuilder() = %v, want %v", copied, tt.builder)
|
||||
}
|
||||
|
||||
if len(tt.builder.Pubkey) > 0 {
|
||||
tt.builder.Pubkey[0] = 0xFF
|
||||
if copied.Pubkey[0] == 0xFF {
|
||||
t.Error("CopyBuilder() did not create deep copy of Pubkey")
|
||||
}
|
||||
}
|
||||
|
||||
if len(tt.builder.ExecutionAddress) > 0 {
|
||||
tt.builder.ExecutionAddress[0] = 0xFF
|
||||
if copied.ExecutionAddress[0] == 0xFF {
|
||||
t.Error("CopyBuilder() did not create deep copy of ExecutionAddress")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ mainnet = {
|
||||
"payload_attestation.size": "4", # Gloas: MAX_PAYLOAD_ATTESTATIONS defined in block body
|
||||
"execution_payload_availability.size": "1024", # Gloas: SLOTS_PER_HISTORICAL_ROOT
|
||||
"builder_pending_payments.size": "64", # Gloas: vector length (2 * SLOTS_PER_EPOCH)
|
||||
"builder_registry_limit": "1099511627776", # Gloas: BUILDER_REGISTRY_LIMIT (same for mainnet/minimal)
|
||||
}
|
||||
|
||||
minimal = {
|
||||
@@ -91,7 +92,8 @@ minimal = {
|
||||
"ptc.type": "github.com/OffchainLabs/go-bitfield.Bitvector2",
|
||||
"payload_attestation.size": "4", # Gloas: MAX_PAYLOAD_ATTESTATIONS defined in block body
|
||||
"execution_payload_availability.size": "8", # Gloas: SLOTS_PER_HISTORICAL_ROOT
|
||||
"builder_pending_payments.size": "16" # Gloas: vector length (2 * SLOTS_PER_EPOCH)
|
||||
"builder_pending_payments.size": "16", # Gloas: vector length (2 * SLOTS_PER_EPOCH)
|
||||
"builder_registry_limit": "1099511627776", # Gloas: BUILDER_REGISTRY_LIMIT (same for mainnet/minimal)
|
||||
}
|
||||
|
||||
###### Rules definitions #######
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v1.6.0
|
||||
version: v1.7.0-alpha.0
|
||||
style: full
|
||||
|
||||
specrefs:
|
||||
@@ -57,6 +57,12 @@ exceptions:
|
||||
- PAYLOAD_STATUS_EMPTY#gloas
|
||||
- PAYLOAD_STATUS_FULL#gloas
|
||||
- PAYLOAD_STATUS_PENDING#gloas
|
||||
- ATTESTATION_TIMELINESS_INDEX#gloas
|
||||
- BUILDER_INDEX_FLAG#gloas
|
||||
- BUILDER_INDEX_SELF_BUILD#gloas
|
||||
- DOMAIN_PROPOSER_PREFERENCES#gloas
|
||||
- NUM_BLOCK_TIMELINESS_DEADLINES#gloas
|
||||
- PTC_TIMELINESS_INDEX#gloas
|
||||
|
||||
configs:
|
||||
# Not implemented (placeholders)
|
||||
@@ -76,6 +82,7 @@ exceptions:
|
||||
- MAX_REQUEST_PAYLOADS#gloas
|
||||
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
|
||||
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
|
||||
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
|
||||
|
||||
ssz_objects:
|
||||
# Not implemented
|
||||
@@ -103,6 +110,9 @@ exceptions:
|
||||
- PayloadAttestationMessage#gloas
|
||||
- SignedExecutionPayloadEnvelope#gloas
|
||||
- SignedExecutionPayloadBid#gloas
|
||||
- Builder#gloas
|
||||
- ProposerPreferences#gloas
|
||||
- SignedProposerPreferences#gloas
|
||||
|
||||
dataclasses:
|
||||
# Not implemented
|
||||
@@ -331,10 +341,8 @@ exceptions:
|
||||
- get_ptc#gloas
|
||||
- get_ptc_assignment#gloas
|
||||
- get_weight#gloas
|
||||
- has_builder_withdrawal_credential#gloas
|
||||
- has_compounding_withdrawal_credential#gloas
|
||||
- is_attestation_same_slot#gloas
|
||||
- is_builder_payment_withdrawable#gloas
|
||||
- is_builder_withdrawal_credential#gloas
|
||||
- is_merge_transition_complete#gloas
|
||||
- is_parent_block_full#gloas
|
||||
@@ -358,7 +366,6 @@ exceptions:
|
||||
- process_proposer_slashing#gloas
|
||||
- process_slot#gloas
|
||||
- process_withdrawals#gloas
|
||||
- remove_flag#gloas
|
||||
- should_extend_payload#gloas
|
||||
- update_latest_messages#gloas
|
||||
- upgrade_to_gloas#gloas
|
||||
@@ -368,3 +375,55 @@ exceptions:
|
||||
- verify_data_column_sidecar_inclusion_proof#gloas
|
||||
- verify_execution_payload_envelope_signature#gloas
|
||||
- verify_execution_payload_bid_signature#gloas
|
||||
- add_builder_to_registry#gloas
|
||||
- apply_deposit_for_builder#gloas
|
||||
- apply_withdrawals#capella
|
||||
- apply_withdrawals#gloas
|
||||
- can_builder_cover_bid#gloas
|
||||
- compute_proposer_score#phase0
|
||||
- convert_builder_index_to_validator_index#gloas
|
||||
- convert_validator_index_to_builder_index#gloas
|
||||
- get_attestation_score#gloas
|
||||
- get_attestation_score#phase0
|
||||
- get_balance_after_withdrawals#capella
|
||||
- get_builder_from_deposit#gloas
|
||||
- get_builder_withdrawals#gloas
|
||||
- get_builders_sweep_withdrawals#gloas
|
||||
- get_index_for_new_builder#gloas
|
||||
- get_pending_balance_to_withdraw_for_builder#gloas
|
||||
- get_pending_partial_withdrawals#electra
|
||||
- get_proposer_preferences_signature#gloas
|
||||
- get_upcoming_proposal_slots#gloas
|
||||
- get_validators_sweep_withdrawals#capella
|
||||
- get_validators_sweep_withdrawals#electra
|
||||
- initiate_builder_exit#gloas
|
||||
- is_active_builder#gloas
|
||||
- is_builder_index#gloas
|
||||
- is_eligible_for_partial_withdrawals#electra
|
||||
- is_head_late#gloas
|
||||
- is_head_weak#gloas
|
||||
- is_parent_strong#gloas
|
||||
- is_proposer_equivocation#phase0
|
||||
- is_valid_proposal_slot#gloas
|
||||
- process_deposit_request#gloas
|
||||
- process_voluntary_exit#gloas
|
||||
- record_block_timeliness#gloas
|
||||
- record_block_timeliness#phase0
|
||||
- should_apply_proposer_boost#gloas
|
||||
- update_builder_pending_withdrawals#gloas
|
||||
- update_next_withdrawal_builder_index#gloas
|
||||
- update_next_withdrawal_index#capella
|
||||
- update_next_withdrawal_validator_index#capella
|
||||
- update_payload_expected_withdrawals#gloas
|
||||
- update_pending_partial_withdrawals#electra
|
||||
- update_proposer_boost_root#gloas
|
||||
- update_proposer_boost_root#phase0
|
||||
|
||||
presets:
|
||||
- CELLS_PER_EXT_BLOB#fulu
|
||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||
- BUILDER_REGISTRY_LIMIT#gloas
|
||||
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
- PTC_SIZE#gloas
|
||||
- UPDATE_TIMEOUT#altair
|
||||
|
||||
@@ -304,16 +304,6 @@
|
||||
GENESIS_SLOT: Slot = 0
|
||||
</spec>
|
||||
|
||||
- name: INTERVALS_PER_SLOT
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: IntervalsPerSlot\s+.*yaml:"INTERVALS_PER_SLOT"
|
||||
regex: true
|
||||
spec: |
|
||||
<spec constant_var="INTERVALS_PER_SLOT" fork="phase0" hash="3352e419">
|
||||
INTERVALS_PER_SLOT: uint64 = 3
|
||||
</spec>
|
||||
|
||||
- name: JUSTIFICATION_BITS_LENGTH
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
|
||||
@@ -698,7 +698,7 @@
|
||||
- name: compute_matrix
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="compute_matrix" fork="fulu" hash="b39370ca">
|
||||
<spec fn="compute_matrix" fork="fulu" hash="0b88eac1">
|
||||
def compute_matrix(blobs: Sequence[Blob]) -> Sequence[MatrixEntry]:
|
||||
"""
|
||||
Return the full, flattened sequence of matrix entries.
|
||||
@@ -714,8 +714,8 @@
|
||||
MatrixEntry(
|
||||
cell=cell,
|
||||
kzg_proof=proof,
|
||||
row_index=blob_index,
|
||||
column_index=cell_index,
|
||||
row_index=blob_index,
|
||||
)
|
||||
)
|
||||
return matrix
|
||||
@@ -739,7 +739,7 @@
|
||||
- file: beacon-chain/rpc/prysm/v1alpha1/validator/proposer_attestations_electra.go
|
||||
search: func computeOnChainAggregate(
|
||||
spec: |
|
||||
<spec fn="compute_on_chain_aggregate" fork="electra" hash="128055d6">
|
||||
<spec fn="compute_on_chain_aggregate" fork="electra" hash="f020af4c">
|
||||
def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Attestation:
|
||||
aggregates = sorted(
|
||||
network_aggregates, key=lambda a: get_committee_indices(a.committee_bits)[0]
|
||||
@@ -760,8 +760,8 @@
|
||||
return Attestation(
|
||||
aggregation_bits=aggregation_bits,
|
||||
data=data,
|
||||
committee_bits=committee_bits,
|
||||
signature=signature,
|
||||
committee_bits=committee_bits,
|
||||
)
|
||||
</spec>
|
||||
|
||||
@@ -2366,40 +2366,18 @@
|
||||
- file: beacon-chain/state/state-native/getters_withdrawal.go
|
||||
search: func (b *BeaconState) ExpectedWithdrawals(
|
||||
spec: |
|
||||
<spec fn="get_expected_withdrawals" fork="capella" hash="09191977">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]:
|
||||
epoch = get_current_epoch(state)
|
||||
<spec fn="get_expected_withdrawals" fork="capella" hash="d6a98c14">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]:
|
||||
withdrawal_index = state.next_withdrawal_index
|
||||
validator_index = state.next_withdrawal_validator_index
|
||||
withdrawals: List[Withdrawal] = []
|
||||
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
for _ in range(bound):
|
||||
validator = state.validators[validator_index]
|
||||
balance = state.balances[validator_index]
|
||||
if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
elif is_partially_withdrawable_validator(validator, balance):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance - MAX_EFFECTIVE_BALANCE,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
break
|
||||
validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
|
||||
return withdrawals
|
||||
|
||||
# Get validators sweep withdrawals
|
||||
validators_sweep_withdrawals, withdrawal_index, processed_validators_sweep_count = (
|
||||
get_validators_sweep_withdrawals(state, withdrawal_index, withdrawals)
|
||||
)
|
||||
withdrawals.extend(validators_sweep_withdrawals)
|
||||
|
||||
return withdrawals, processed_validators_sweep_count
|
||||
</spec>
|
||||
|
||||
- name: get_expected_withdrawals#electra
|
||||
@@ -2407,80 +2385,26 @@
|
||||
- file: beacon-chain/state/state-native/getters_withdrawal.go
|
||||
search: func (b *BeaconState) ExpectedWithdrawals(
|
||||
spec: |
|
||||
<spec fn="get_expected_withdrawals" fork="electra" hash="060932cd">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]:
|
||||
epoch = get_current_epoch(state)
|
||||
<spec fn="get_expected_withdrawals" fork="electra" hash="cfce862b">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64, uint64]:
|
||||
withdrawal_index = state.next_withdrawal_index
|
||||
validator_index = state.next_withdrawal_validator_index
|
||||
withdrawals: List[Withdrawal] = []
|
||||
processed_partial_withdrawals_count = 0
|
||||
|
||||
# [New in Electra:EIP7251]
|
||||
# Consume pending partial withdrawals
|
||||
for withdrawal in state.pending_partial_withdrawals:
|
||||
if (
|
||||
withdrawal.withdrawable_epoch > epoch
|
||||
or len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP
|
||||
):
|
||||
break
|
||||
# Get partial withdrawals
|
||||
partial_withdrawals, withdrawal_index, processed_partial_withdrawals_count = (
|
||||
get_pending_partial_withdrawals(state, withdrawal_index, withdrawals)
|
||||
)
|
||||
withdrawals.extend(partial_withdrawals)
|
||||
|
||||
validator = state.validators[withdrawal.validator_index]
|
||||
has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
total_withdrawn = sum(
|
||||
w.amount for w in withdrawals if w.validator_index == withdrawal.validator_index
|
||||
)
|
||||
balance = state.balances[withdrawal.validator_index] - total_withdrawn
|
||||
has_excess_balance = balance > MIN_ACTIVATION_BALANCE
|
||||
if (
|
||||
validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
and has_sufficient_effective_balance
|
||||
and has_excess_balance
|
||||
):
|
||||
withdrawable_balance = min(balance - MIN_ACTIVATION_BALANCE, withdrawal.amount)
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=withdrawal.validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=withdrawable_balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
# Get validators sweep withdrawals
|
||||
validators_sweep_withdrawals, withdrawal_index, processed_validators_sweep_count = (
|
||||
get_validators_sweep_withdrawals(state, withdrawal_index, withdrawals)
|
||||
)
|
||||
withdrawals.extend(validators_sweep_withdrawals)
|
||||
|
||||
processed_partial_withdrawals_count += 1
|
||||
|
||||
# Sweep for remaining.
|
||||
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
for _ in range(bound):
|
||||
validator = state.validators[validator_index]
|
||||
# [Modified in Electra:EIP7251]
|
||||
total_withdrawn = sum(w.amount for w in withdrawals if w.validator_index == validator_index)
|
||||
balance = state.balances[validator_index] - total_withdrawn
|
||||
if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
elif is_partially_withdrawable_validator(validator, balance):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
# [Modified in Electra:EIP7251]
|
||||
amount=balance - get_max_effective_balance(validator),
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
break
|
||||
validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
|
||||
return withdrawals, processed_partial_withdrawals_count
|
||||
# [Modified in Electra:EIP7251]
|
||||
return withdrawals, processed_partial_withdrawals_count, processed_validators_sweep_count
|
||||
</spec>
|
||||
|
||||
- name: get_filtered_block_tree
|
||||
@@ -3053,7 +2977,7 @@
|
||||
- name: get_proposer_head
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_proposer_head" fork="phase0" hash="15d44290">
|
||||
<spec fn="get_proposer_head" fork="phase0" hash="99e8fc05">
|
||||
def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root:
|
||||
head_block = store.blocks[head_root]
|
||||
parent_root = head_block.parent_root
|
||||
@@ -3084,7 +3008,10 @@
|
||||
head_weak = is_head_weak(store, head_root)
|
||||
|
||||
# Check that the missing votes are assigned to the parent and not being hoarded.
|
||||
parent_strong = is_parent_strong(store, parent_root)
|
||||
parent_strong = is_parent_strong(store, head_root)
|
||||
|
||||
# Re-org more aggressively if there is a proposer equivocation in the previous slot.
|
||||
proposer_equivocation = is_proposer_equivocation(store, head_root)
|
||||
|
||||
if all(
|
||||
[
|
||||
@@ -3100,6 +3027,8 @@
|
||||
):
|
||||
# We can re-org the current head by building upon its parent block.
|
||||
return parent_root
|
||||
elif all([head_weak, current_time_ok, proposer_equivocation]):
|
||||
return parent_root
|
||||
else:
|
||||
return head_root
|
||||
</spec>
|
||||
@@ -3117,11 +3046,10 @@
|
||||
- name: get_proposer_score
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_proposer_score" fork="phase0" hash="164b8de0">
|
||||
<spec fn="get_proposer_score" fork="phase0" hash="2c8d8a27">
|
||||
def get_proposer_score(store: Store) -> Gwei:
|
||||
justified_checkpoint_state = store.checkpoint_states[store.justified_checkpoint]
|
||||
committee_weight = get_total_active_balance(justified_checkpoint_state) // SLOTS_PER_EPOCH
|
||||
return (committee_weight * PROPOSER_SCORE_BOOST) // 100
|
||||
return compute_proposer_score(justified_checkpoint_state)
|
||||
</spec>
|
||||
|
||||
- name: get_randao_mix
|
||||
@@ -3509,26 +3437,10 @@
|
||||
- file: beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go
|
||||
search: func (f *ForkChoice) Weight(
|
||||
spec: |
|
||||
<spec fn="get_weight" fork="phase0" hash="f2e4e8ef">
|
||||
<spec fn="get_weight" fork="phase0" hash="b18bf25c">
|
||||
def get_weight(store: Store, root: Root) -> Gwei:
|
||||
state = store.checkpoint_states[store.justified_checkpoint]
|
||||
unslashed_and_active_indices = [
|
||||
i
|
||||
for i in get_active_validator_indices(state, get_current_epoch(state))
|
||||
if not state.validators[i].slashed
|
||||
]
|
||||
attestation_score = Gwei(
|
||||
sum(
|
||||
state.validators[i].effective_balance
|
||||
for i in unslashed_and_active_indices
|
||||
if (
|
||||
i in store.latest_messages
|
||||
and i not in store.equivocating_indices
|
||||
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot)
|
||||
== root
|
||||
)
|
||||
)
|
||||
)
|
||||
attestation_score = get_attestation_score(store, root, state)
|
||||
if store.proposer_boost_root == Root():
|
||||
# Return only attestation score if ``proposer_boost_root`` is not set
|
||||
return attestation_score
|
||||
@@ -3615,7 +3527,7 @@
|
||||
- file: beacon-chain/core/transition/state.go
|
||||
search: func GenesisBeaconState(
|
||||
spec: |
|
||||
<spec fn="initialize_beacon_state_from_eth1" fork="phase0" hash="c69537d6">
|
||||
<spec fn="initialize_beacon_state_from_eth1" fork="phase0" hash="d3a0ddd4">
|
||||
def initialize_beacon_state_from_eth1(
|
||||
eth1_block_hash: Hash32, eth1_timestamp: uint64, deposits: Sequence[Deposit]
|
||||
) -> BeaconState:
|
||||
@@ -3627,7 +3539,7 @@
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
eth1_data=Eth1Data(deposit_count=uint64(len(deposits)), block_hash=eth1_block_hash),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash]
|
||||
* EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
@@ -4162,10 +4074,11 @@
|
||||
- name: is_parent_strong
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="is_parent_strong" fork="phase0" hash="e06641a8">
|
||||
def is_parent_strong(store: Store, parent_root: Root) -> bool:
|
||||
<spec fn="is_parent_strong" fork="phase0" hash="02a3fd0b">
|
||||
def is_parent_strong(store: Store, root: Root) -> bool:
|
||||
justified_state = store.checkpoint_states[store.justified_checkpoint]
|
||||
parent_threshold = calculate_committee_fraction(justified_state, REORG_PARENT_WEIGHT_THRESHOLD)
|
||||
parent_root = store.blocks[root].parent_root
|
||||
parent_weight = get_weight(store, parent_root)
|
||||
return parent_weight > parent_threshold
|
||||
</spec>
|
||||
@@ -4683,7 +4596,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="phase0" hash="aff24b59">
|
||||
<spec fn="on_block" fork="phase0" hash="5f45947a">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
block = signed_block.message
|
||||
# Parent block must be known
|
||||
@@ -4713,19 +4626,8 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4739,7 +4641,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="bellatrix" hash="a3193d92">
|
||||
<spec fn="on_block" fork="bellatrix" hash="e81d01c3">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4780,19 +4682,8 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4806,7 +4697,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="capella" hash="560056ad">
|
||||
<spec fn="on_block" fork="capella" hash="7450531c">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4839,19 +4730,8 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4865,7 +4745,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="deneb" hash="9565acee">
|
||||
<spec fn="on_block" fork="deneb" hash="bbad196e">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4903,19 +4783,8 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4929,7 +4798,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="fulu" hash="4f955de9">
|
||||
<spec fn="on_block" fork="fulu" hash="b8f279b9">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4967,19 +4836,8 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -5074,7 +4932,7 @@
|
||||
- name: prepare_execution_payload#capella
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="capella" hash="28db1590">
|
||||
<spec fn="prepare_execution_payload" fork="capella" hash="c258893e">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -5087,12 +4945,15 @@
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
# [New in Capella]
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
# [New in Capella]
|
||||
withdrawals=get_expected_withdrawals(state),
|
||||
withdrawals=withdrawals,
|
||||
)
|
||||
return execution_engine.notify_forkchoice_updated(
|
||||
head_block_hash=parent_hash,
|
||||
@@ -5105,7 +4966,7 @@
|
||||
- name: prepare_execution_payload#deneb
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="deneb" hash="f3387ec6">
|
||||
<spec fn="prepare_execution_payload" fork="deneb" hash="59f61f3a">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -5117,11 +4978,13 @@
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
withdrawals=get_expected_withdrawals(state),
|
||||
withdrawals=withdrawals,
|
||||
# [New in Deneb:EIP4788]
|
||||
parent_beacon_block_root=hash_tree_root(state.latest_block_header),
|
||||
)
|
||||
@@ -5136,7 +4999,7 @@
|
||||
- name: prepare_execution_payload#electra
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="electra" hash="567b3739">
|
||||
<spec fn="prepare_execution_payload" fork="electra" hash="5414b883">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -5149,7 +5012,7 @@
|
||||
|
||||
# [Modified in EIP7251]
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
withdrawals, _, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
@@ -5171,7 +5034,7 @@
|
||||
- file: beacon-chain/core/blocks/attestation.go
|
||||
search: func ProcessAttestationNoVerifySignature(
|
||||
spec: |
|
||||
<spec fn="process_attestation" fork="phase0" hash="6ac78cd0">
|
||||
<spec fn="process_attestation" fork="phase0" hash="d8e86aa9">
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
data = attestation.data
|
||||
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
@@ -5183,8 +5046,8 @@
|
||||
assert len(attestation.aggregation_bits) == len(committee)
|
||||
|
||||
pending_attestation = PendingAttestation(
|
||||
data=data,
|
||||
aggregation_bits=attestation.aggregation_bits,
|
||||
data=data,
|
||||
inclusion_delay=state.slot - data.slot,
|
||||
proposer_index=get_beacon_proposer_index(state),
|
||||
)
|
||||
@@ -6409,8 +6272,8 @@
|
||||
|
||||
- name: process_operations#electra
|
||||
sources:
|
||||
- file: beacon-chain/core/electra/transition_no_verify_sig.go
|
||||
search: func ProcessOperations(
|
||||
- file: beacon-chain/core/transition/electra.go
|
||||
search: func electraOperations(
|
||||
spec: |
|
||||
<spec fn="process_operations" fork="electra" hash="643f18b4">
|
||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
@@ -7208,31 +7071,18 @@
|
||||
- file: beacon-chain/core/blocks/withdrawals.go
|
||||
search: func ProcessWithdrawals(
|
||||
spec: |
|
||||
<spec fn="process_withdrawals" fork="capella" hash="ed6a9c5a">
|
||||
<spec fn="process_withdrawals" fork="capella" hash="901f9fc4">
|
||||
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
expected_withdrawals = get_expected_withdrawals(state)
|
||||
assert payload.withdrawals == expected_withdrawals
|
||||
# Get expected withdrawals
|
||||
withdrawals, processed_validators_sweep_count = get_expected_withdrawals(state)
|
||||
assert payload.withdrawals == withdrawals
|
||||
|
||||
for withdrawal in expected_withdrawals:
|
||||
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
# Apply expected withdrawals
|
||||
apply_withdrawals(state, withdrawals)
|
||||
|
||||
# Update the next withdrawal index if this block contained withdrawals
|
||||
if len(expected_withdrawals) != 0:
|
||||
latest_withdrawal = expected_withdrawals[-1]
|
||||
state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
|
||||
# Update the next validator index to start the next withdrawal sweep
|
||||
if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# Next sweep starts after the latest withdrawal's validator index
|
||||
next_validator_index = ValidatorIndex(
|
||||
(expected_withdrawals[-1].validator_index + 1) % len(state.validators)
|
||||
)
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
else:
|
||||
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
# Update withdrawals fields in the state
|
||||
update_next_withdrawal_index(state, withdrawals)
|
||||
update_next_withdrawal_validator_index(state, processed_validators_sweep_count)
|
||||
</spec>
|
||||
|
||||
- name: process_withdrawals#electra
|
||||
@@ -7240,39 +7090,23 @@
|
||||
- file: beacon-chain/core/blocks/withdrawals.go
|
||||
search: func ProcessWithdrawals(
|
||||
spec: |
|
||||
<spec fn="process_withdrawals" fork="electra" hash="dd99a91f">
|
||||
<spec fn="process_withdrawals" fork="electra" hash="67870972">
|
||||
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
# [Modified in Electra:EIP7251]
|
||||
expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state)
|
||||
# Get expected withdrawals
|
||||
withdrawals, processed_partial_withdrawals_count, processed_validators_sweep_count = (
|
||||
get_expected_withdrawals(state)
|
||||
)
|
||||
assert payload.withdrawals == withdrawals
|
||||
|
||||
assert payload.withdrawals == expected_withdrawals
|
||||
|
||||
for withdrawal in expected_withdrawals:
|
||||
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
# Apply expected withdrawals
|
||||
apply_withdrawals(state, withdrawals)
|
||||
|
||||
# Update withdrawals fields in the state
|
||||
update_next_withdrawal_index(state, withdrawals)
|
||||
# [New in Electra:EIP7251]
|
||||
# Update pending partial withdrawals
|
||||
state.pending_partial_withdrawals = state.pending_partial_withdrawals[
|
||||
processed_partial_withdrawals_count:
|
||||
]
|
||||
|
||||
# Update the next withdrawal index if this block contained withdrawals
|
||||
if len(expected_withdrawals) != 0:
|
||||
latest_withdrawal = expected_withdrawals[-1]
|
||||
state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
|
||||
# Update the next validator index to start the next withdrawal sweep
|
||||
if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# Next sweep starts after the latest withdrawal's validator index
|
||||
next_validator_index = ValidatorIndex(
|
||||
(expected_withdrawals[-1].validator_index + 1) % len(state.validators)
|
||||
)
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
else:
|
||||
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
update_pending_partial_withdrawals(state, processed_partial_withdrawals_count)
|
||||
update_next_withdrawal_validator_index(state, processed_validators_sweep_count)
|
||||
</spec>
|
||||
|
||||
- name: queue_excess_active_balance
|
||||
@@ -7303,7 +7137,7 @@
|
||||
- name: recover_matrix
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="recover_matrix" fork="fulu" hash="9b01f005">
|
||||
<spec fn="recover_matrix" fork="fulu" hash="3db21f50">
|
||||
def recover_matrix(
|
||||
partial_matrix: Sequence[MatrixEntry], blob_count: uint64
|
||||
) -> Sequence[MatrixEntry]:
|
||||
@@ -7323,8 +7157,8 @@
|
||||
MatrixEntry(
|
||||
cell=cell,
|
||||
kzg_proof=proof,
|
||||
row_index=blob_index,
|
||||
column_index=cell_index,
|
||||
row_index=blob_index,
|
||||
)
|
||||
)
|
||||
return matrix
|
||||
@@ -7373,7 +7207,7 @@
|
||||
- file: beacon-chain/forkchoice/ro.go
|
||||
search: func (ro *ROForkChoice) ShouldOverrideFCU(
|
||||
spec: |
|
||||
<spec fn="should_override_forkchoice_update" fork="bellatrix" hash="9a8043af">
|
||||
<spec fn="should_override_forkchoice_update" fork="bellatrix" hash="c055d92a">
|
||||
def should_override_forkchoice_update(store: Store, head_root: Root) -> bool:
|
||||
head_block = store.blocks[head_root]
|
||||
parent_root = head_block.parent_root
|
||||
@@ -7414,7 +7248,7 @@
|
||||
# `store.time` early, or by counting queued attestations during the head block's slot.
|
||||
if current_slot > head_block.slot:
|
||||
head_weak = is_head_weak(store, head_root)
|
||||
parent_strong = is_parent_strong(store, parent_root)
|
||||
parent_strong = is_parent_strong(store, head_root)
|
||||
else:
|
||||
head_weak = True
|
||||
parent_strong = True
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz/equality"
|
||||
"github.com/d4l3k/messagediff"
|
||||
@@ -138,12 +139,21 @@ func StringContains(loggerFn assertionLoggerFn, expected, actual string, flag bo
|
||||
|
||||
// NoError asserts that error is nil.
|
||||
func NoError(loggerFn assertionLoggerFn, err error, msg ...any) {
|
||||
// reflect.ValueOf is needed for nil instances of custom types implementing Error
|
||||
if err != nil && !reflect.ValueOf(err).IsNil() {
|
||||
errMsg := parseMsg("Unexpected error", msg...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
loggerFn("%s:%d %s: %v", filepath.Base(file), line, errMsg, err)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
// reflect.ValueOf is needed for nil instances of custom types implementing Error.
|
||||
// Only check IsNil for types that support it to avoid panics on struct types.
|
||||
v := reflect.ValueOf(err)
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
|
||||
if v.IsNil() {
|
||||
return
|
||||
}
|
||||
}
|
||||
errMsg := parseMsg("Unexpected error", msg...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
loggerFn("%s:%d %s: %v", filepath.Base(file), line, errMsg, err)
|
||||
}
|
||||
|
||||
// ErrorIs uses Errors.Is to recursively unwrap err looking for target in the chain.
|
||||
@@ -341,3 +351,18 @@ func (tb *TBMock) Errorf(format string, args ...any) {
|
||||
func (tb *TBMock) Fatalf(format string, args ...any) {
|
||||
tb.FatalfMsg = fmt.Sprintf(format, args...)
|
||||
}
|
||||
|
||||
// Eventually asserts that given condition will be met within waitFor time,
|
||||
// periodically checking target function each tick.
|
||||
func Eventually(loggerFn assertionLoggerFn, condition func() bool, waitFor, tick time.Duration, msg ...any) {
|
||||
deadline := time.Now().Add(waitFor)
|
||||
for time.Now().Before(deadline) {
|
||||
if condition() {
|
||||
return
|
||||
}
|
||||
time.Sleep(tick)
|
||||
}
|
||||
errMsg := parseMsg("Condition never satisfied", msg...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
loggerFn("%s:%d %s (waited %v)", filepath.Base(file), line, errMsg, waitFor)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package require
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assertions"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
@@ -87,3 +89,9 @@ func ErrorIs(tb assertions.AssertionTestingTB, err, target error, msg ...any) {
|
||||
func StringContains(tb assertions.AssertionTestingTB, expected, actual string, msg ...any) {
|
||||
assertions.StringContains(tb.Fatalf, expected, actual, true, msg)
|
||||
}
|
||||
|
||||
// Eventually asserts that given condition will be met within waitFor time,
|
||||
// periodically checking target function each tick.
|
||||
func Eventually(tb assertions.AssertionTestingTB, condition func() bool, waitFor, tick time.Duration, msg ...any) {
|
||||
assertions.Eventually(tb.Fatalf, condition, waitFor, tick, msg...)
|
||||
}
|
||||
|
||||
@@ -62,8 +62,17 @@ func runTest(t *testing.T, config string, fork int, basePath string) { // nolint
|
||||
if len(testFolders) == 0 {
|
||||
t.Fatalf("No test folders found for %s/%s/%s", config, version.String(fork), folderPath)
|
||||
}
|
||||
|
||||
var skipTests = map[string]bool{
|
||||
// Skipping because of #4807 backporting issues
|
||||
"voting_source_beyond_two_epoch": true,
|
||||
"justified_update_always_if_better": true,
|
||||
"justified_update_not_realized_finality": true,
|
||||
}
|
||||
for _, folder := range testFolders {
|
||||
if skipTests[folder.Name()] {
|
||||
t.Logf("Skipping test %s due to known issues", folder.Name())
|
||||
continue
|
||||
}
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
preStepsFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "steps.yaml")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user