Compare commits

..

1 Commits

Author SHA1 Message Date
Manu NALEPA
f7b532e958 Add automatic heap dump monitoring based on memory threshold 2026-01-12 13:24:09 +01:00
226 changed files with 1931 additions and 8174 deletions

View File

@@ -1,4 +1,5 @@
load("@bazel_gazelle//:def.bzl", "gazelle")
load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
@@ -54,6 +55,15 @@ alias(
visibility = ["//visibility:public"],
)
gometalinter(
name = "gometalinter",
config = "//:.gometalinter.json",
paths = [
"./...",
],
prefix = prefix,
)
goimports(
name = "goimports",
display_diffs = True,

View File

@@ -72,7 +72,7 @@ Do NOT add new `go_repository` to the WORKSPACE file. All dependencies should li
To enable conditional compilation and custom configuration for tests (where compiled code has more
debug info, while not being completely optimized), we rely on Go's build tags/constraints mechanism
(see official docs on [build constraints](https://pkg.go.dev/go/build#hdr-Build_Constraints)).
(see official docs on [build constraints](https://golang.org/pkg/go/build/#hdr-Build_Constraints)).
Therefore, whenever using `go test`, do not forget to pass in extra build tag, eg:
```bash

View File

@@ -9,7 +9,7 @@ This README details how to setup Prysm for interop testing for usage with other
## Installation & Setup
1. Install [Bazel](https://bazel.build/install) **(Recommended)**
1. Install [Bazel](https://docs.bazel.build/versions/master/install.html) **(Recommended)**
2. `git clone https://github.com/OffchainLabs/prysm && cd prysm`
3. `bazel build //cmd/...`

View File

@@ -15,7 +15,7 @@
## 📖 Overview
This is the core repository for Prysm, a [Golang](https://go.dev/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
@@ -23,7 +23,7 @@ See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details
## 🚀 Getting Started
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://prysm.offchainlabs.com/docs/)**.
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://docs.prylabs.network)**.
💬 **Need help?** Join our **[Discord Community](https://discord.gg/prysm)** for support.
@@ -51,7 +51,7 @@ Prysm maintains two permanent branches:
### 🛠 Contribution Guide
Want to get involved? Check out our **[Contribution Guide](https://prysm.offchainlabs.com/docs/contribute/contribution-guidelines/)** to learn more!
Want to get involved? Check out our **[Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/)** to learn more!
---

View File

@@ -273,16 +273,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.7.0-alpha.1"
consensus_spec_version = "v1.7.0-alpha.0"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
"general": "sha256-b+rJOuVqq+Dy53quPcNYcQwPFoMU7Wp7tdUVe7n0g8w=",
"minimal": "sha256-qxRIxtjPxVsVCY90WsBJKhk0027XDSmhjnRvRN14V1c=",
"mainnet": "sha256-NsuOQG3LzeiEE1TrWuvQ6vu6BboHv7h7f/RTS0pWkCs=",
},
version = consensus_spec_version,
)
@@ -298,7 +298,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
integrity = "sha256-hwNdUBgdBrkk6pWIpNYbzbwswUuOu6AMD2exN8uv+QQ=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -423,6 +423,10 @@ load("@prysm//testing/endtoend:deps.bzl", "e2e_deps")
e2e_deps()
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
gometalinter_dependencies()
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
gazelle_dependencies(go_sdk = "go_sdk")

View File

@@ -972,224 +972,3 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
ProposerLookahead: lookahead,
}, nil
}
// ----------------------------------------------------------------------------
// Gloas
// ----------------------------------------------------------------------------
func BeaconStateGloasFromConsensus(st beaconState.BeaconState) (*BeaconStateGloas, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
for i, r := range srcBr {
br[i] = hexutil.Encode(r)
}
srcSr := st.StateRoots()
sr := make([]string, len(srcSr))
for i, r := range srcSr {
sr[i] = hexutil.Encode(r)
}
srcHr := st.HistoricalRoots()
hr := make([]string, len(srcHr))
for i, r := range srcHr {
hr[i] = hexutil.Encode(r)
}
srcVotes := st.Eth1DataVotes()
votes := make([]*Eth1Data, len(srcVotes))
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
for i, b := range srcBals {
bals[i] = fmt.Sprintf("%d", b)
}
srcRm := st.RandaoMixes()
rm := make([]string, len(srcRm))
for i, m := range srcRm {
rm[i] = hexutil.Encode(m)
}
srcSlashings := st.Slashings()
slashings := make([]string, len(srcSlashings))
for i, s := range srcSlashings {
slashings[i] = fmt.Sprintf("%d", s)
}
srcPrevPart, err := st.PreviousEpochParticipation()
if err != nil {
return nil, err
}
prevPart := make([]string, len(srcPrevPart))
for i, p := range srcPrevPart {
prevPart[i] = fmt.Sprintf("%d", p)
}
srcCurrPart, err := st.CurrentEpochParticipation()
if err != nil {
return nil, err
}
currPart := make([]string, len(srcCurrPart))
for i, p := range srcCurrPart {
currPart[i] = fmt.Sprintf("%d", p)
}
srcIs, err := st.InactivityScores()
if err != nil {
return nil, err
}
is := make([]string, len(srcIs))
for i, s := range srcIs {
is[i] = fmt.Sprintf("%d", s)
}
currSc, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSc, err := st.NextSyncCommittee()
if err != nil {
return nil, err
}
srcHs, err := st.HistoricalSummaries()
if err != nil {
return nil, err
}
hs := make([]*HistoricalSummary, len(srcHs))
for i, s := range srcHs {
hs[i] = HistoricalSummaryFromConsensus(s)
}
nwi, err := st.NextWithdrawalIndex()
if err != nil {
return nil, err
}
nwvi, err := st.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
drsi, err := st.DepositRequestsStartIndex()
if err != nil {
return nil, err
}
dbtc, err := st.DepositBalanceToConsume()
if err != nil {
return nil, err
}
ebtc, err := st.ExitBalanceToConsume()
if err != nil {
return nil, err
}
eee, err := st.EarliestExitEpoch()
if err != nil {
return nil, err
}
cbtc, err := st.ConsolidationBalanceToConsume()
if err != nil {
return nil, err
}
ece, err := st.EarliestConsolidationEpoch()
if err != nil {
return nil, err
}
pbd, err := st.PendingDeposits()
if err != nil {
return nil, err
}
ppw, err := st.PendingPartialWithdrawals()
if err != nil {
return nil, err
}
pc, err := st.PendingConsolidations()
if err != nil {
return nil, err
}
srcLookahead, err := st.ProposerLookahead()
if err != nil {
return nil, err
}
lookahead := make([]string, len(srcLookahead))
for i, v := range srcLookahead {
lookahead[i] = fmt.Sprintf("%d", uint64(v))
}
////////// TODO new ones
lepb, err := st.LatestExecutionPayloadBid()
if err != nil {
return nil, err
}
builders, err := st.Builders()
if err != nil {
return nil, err
}
nwbi, err := st.NextWithdrawalBuilderIndex()
if err != nil {
return nil, err
}
epa, err := st.ExecutionPayloadAvailability()
if err != nil {
return nil, err
}
bpp, err := st.BuilderPendingPayments()
if err != nil {
return nil, err
}
bpw, err := st.BuilderPendingWithdrawals()
if err != nil {
return nil, err
}
lbh, err := st.LatestBlockHash()
if err != nil {
return nil, err
}
pew, err := st.PayloadExpectedWithdrawals()
if err != nil {
return nil, err
}
return &BeaconStateGloas{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
BlockRoots: br,
StateRoots: sr,
HistoricalRoots: hr,
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
Eth1DataVotes: votes,
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
Validators: vals,
Balances: bals,
RandaoMixes: rm,
Slashings: slashings,
PreviousEpochParticipation: prevPart,
CurrentEpochParticipation: currPart,
JustificationBits: hexutil.Encode(st.JustificationBits()),
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
InactivityScores: is,
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
HistoricalSummaries: hs,
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
EarliestExitEpoch: fmt.Sprintf("%d", eee),
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
PendingDeposits: PendingDepositsFromConsensus(pbd),
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
ProposerLookahead: lookahead,
LatestExecutionPayloadBid: LatestExecutionPayloadBidFromConsensus(lepb),
Builders: BuildersFromConsensus(builders),
NextWithdrawalBuilderIndex: fmt.Sprintf("%d", nwbi),
ExecutionPayloadAvailability: epa,
BuilderPendingPayments: BuilderPendingPaymentsFromConsensus(bpp),
BuilderPendingWithdrawals: BuilderPendingWithdrawalsFromConsensus(bpw),
LatestBlockHash: hexutil.Encode(lbh[:]),
PayloadExpectedWithdrawals: PayloadExpectedWithdrawalsFromConsensus(pew),
}, nil
}

View File

@@ -262,37 +262,3 @@ type PendingConsolidation struct {
SourceIndex string `json:"source_index"`
TargetIndex string `json:"target_index"`
}
type ExecutionPayloadBid struct {
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_rando"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKZGCommitmentsRoot string `json:"blob_kzg_commitments_root"`
}
type Builder struct {
Pubkey string `json:"pubkey"`
Version string `json:"version"`
ExecutionAddress string `json:"execution_address"`
Balance string `json:"balance"`
DepositEpoch string `json:"deposit_epoch"`
WithdrawableEpoch string `json:"withdrawable_epoch"`
}
type BuilderPendingPayment struct {
Weight string `json:"weight"`
Withdrawal *BuilderPendingWithdrawal `json:"withdrawal"`
}
type BuilderPendingWithdrawal struct {
FeeRecipient string `json:"fee_recipient"`
Amount string `json:"amount"`
BuilderIndex string `json:"builder_index"`
}

View File

@@ -221,51 +221,3 @@ type BeaconStateFulu struct {
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
ProposerLookahead []string `json:"proposer_lookahead"`
}
type BeaconStateGloas struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root"`
Slot string `json:"slot"`
Fork *Fork `json:"fork"`
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
BlockRoots []string `json:"block_roots"`
StateRoots []string `json:"state_roots"`
HistoricalRoots []string `json:"historical_roots"`
Eth1Data *Eth1Data `json:"eth1_data"`
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*Validator `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
CurrentEpochParticipation []string `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits"`
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
EarliestExitEpoch string `json:"earliest_exit_epoch"`
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
ProposerLookahead []string `json:"proposer_lookahead"`
LatestExecutionPayloadBid *ExecutionPayloadBid `json:"latest_execution_payload_bid"`
Builders []*Builder `json:"builders"`
NextWithdrawalBuilderIndex string `json:"next_withdrawal_builder_index"`
ExecutionPayloadAvailability string `json:"execution_payload_availability"`
BuilderPendingPayments []*BuilderPendingPayment `json:"builder_pending_payments"`
BuilderPendingWithdrawals []*BuilderPendingWithdrawal `json:"builder_pending_withdrawals"`
LatestBlockHash string `json:"latest_block_hash"`
PayloadExpectedWithdrawals []*Withdrawal `json:"payload_expected_withdrawals"`
}

View File

@@ -74,6 +74,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -173,6 +174,7 @@ go_test(
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -13,7 +13,7 @@ go_library(
deps = [
"//consensus-types/blocks:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_ethereum_c_kzg_4844_v2//bindings/go:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -221,19 +221,6 @@ var (
Buckets: []float64{1, 2, 4, 8, 16, 32},
},
)
commitmentCount = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "commitment_count_max_21",
Help: "The number of blob KZG commitments per block.",
Buckets: []float64{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21},
},
)
maxBlobsPerBlock = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "max_blobs_per_block",
Help: "The maximum number of blobs allowed in a block.",
},
)
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -94,7 +94,6 @@ func (s *Service) spawnProcessAttestationsRoutine() {
for {
select {
case <-s.ctx.Done():
ticker.Done()
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {

View File

@@ -16,7 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/slasher/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -259,55 +258,32 @@ func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, blo
}
func (s *Service) reportPostBlockProcessing(
signedBlock interfaces.SignedBeaconBlock,
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
receivedTime time.Time,
daWaitedTime time.Duration,
) {
block := signedBlock.Block()
if block == nil {
log.WithField("blockRoot", blockRoot).Error("Nil block")
return
}
// Reports on block and fork choice metrics.
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
reportSlotMetrics(block.Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
reportSlotMetrics(block.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
// Log block sync status.
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
justified := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if err := logBlockSyncStatus(block, blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
log.WithError(err).Error("Unable to log block sync status")
}
// Log payload data
if err := logPayload(block); err != nil {
if err := logPayload(block.Block()); err != nil {
log.WithError(err).Error("Unable to log debug block payload data")
}
// Log state transition data.
if err := logStateTransitionData(block); err != nil {
if err := logStateTransitionData(block.Block()); err != nil {
log.WithError(err).Error("Unable to log state transition data")
}
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
chainServiceProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
body := block.Body()
if body == nil {
log.WithField("blockRoot", blockRoot).Error("Nil block body")
return
}
commitments, err := body.BlobKzgCommitments()
if err != nil {
log.WithError(err).Error("Unable to get blob KZG commitments")
}
commitmentCount.Observe(float64(len(commitments)))
maxBlobsPerBlock.Set(float64(params.BeaconConfig().MaxBlobsPerBlock(block.Slot())))
}
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
@@ -29,6 +30,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
@@ -289,6 +291,19 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errors.Wrap(err, "failed to initialize blockchain service")
}
if !params.FuluEnabled() {
return nil
}
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
if err != nil {
return errors.Wrap(err, "could not get and save custody group count")
}
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
return errors.Wrap(err, "update custody info")
}
return nil
}
@@ -453,6 +468,73 @@ func (s *Service) removeStartupState() {
s.cfg.FinalizedStateAtStartUp = nil
}
// UpdateCustodyInfoInDB updates the custody information in the database.
// It returns the (potentially updated) custody group count and the earliest available slot.
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
isSupernode := flags.Get().Supernode
isSemiSupernode := flags.Get().SemiSupernode
cfg := params.BeaconConfig()
custodyRequirement := cfg.CustodyRequirement
// Check if the node was previously subscribed to all data subnets, and if so,
// store the new status accordingly.
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
if err != nil {
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
}
// Compute the target custody group count based on current flag configuration.
targetCustodyGroupCount := custodyRequirement
// Supernode: custody all groups (either currently set or previously enabled)
if isSupernode {
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
}
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
if isSemiSupernode {
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
if err != nil {
return 0, 0, errors.Wrap(err, "minimum custody group count")
}
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
}
// Safely compute the fulu fork slot.
fuluForkSlot, err := fuluForkSlot()
if err != nil {
return 0, 0, errors.Wrap(err, "fulu fork slot")
}
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
if slot < fuluForkSlot {
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
if err != nil {
return 0, 0, errors.Wrap(err, "earliest slot")
}
}
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
if err != nil {
return 0, 0, errors.Wrap(err, "update custody info")
}
if isSupernode {
log.WithFields(logrus.Fields{
"current": actualCustodyGroupCount,
"target": cfg.NumberOfCustodyGroups,
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
}
if wasSupernode && !isSupernode {
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
}
return earliestAvailableSlot, actualCustodyGroupCount, nil
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
currentTime := prysmTime.Now()
if currentTime.After(genesisTime) {
@@ -469,3 +551,19 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
}
func fuluForkSlot() (primitives.Slot, error) {
cfg := params.BeaconConfig()
fuluForkEpoch := cfg.FuluForkEpoch
if fuluForkEpoch == cfg.FarFutureEpoch {
return cfg.FarFutureSlot, nil
}
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
if err != nil {
return 0, errors.Wrap(err, "epoch start")
}
return forkFuluSlot, nil
}

View File

@@ -23,9 +23,11 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -594,3 +596,218 @@ func TestNotifyIndex(t *testing.T) {
t.Errorf("Notifier channel did not receive the index")
}
}
func TestUpdateCustodyInfoInDB(t *testing.T) {
const (
fuluForkEpoch = 10
custodyRequirement = uint64(4)
earliestStoredSlot = primitives.Slot(12)
numberOfCustodyGroups = uint64(64)
)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = fuluForkEpoch
cfg.CustodyRequirement = custodyRequirement
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
params.OverrideBeaconConfig(cfg)
ctx := t.Context()
pbBlock := util.NewBeaconBlock()
pbBlock.Block.Slot = 12
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbBlock)
require.NoError(t, err)
roBlock, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
t.Run("CGC increases before fulu", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Before Fulu
// -----------
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(19)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
// After Fulu
// ----------
actualEas, actualCgc, err = service.updateCustodyInfoInDB(fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
})
t.Run("CGC increases after fulu", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Before Fulu
// -----------
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
// After Fulu
// ----------
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
})
t.Run("Supernode downgrade prevented", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
// Try to downgrade by removing flag
gFlags.Supernode = false
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Should still be supernode
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
})
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
// Try to downgrade by removing flag
gFlags.SemiSupernode = false
flags.Init(gFlags)
defer flags.Init(resetFlags)
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
})
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Start with semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
// Upgrade to full supernode
gFlags.SemiSupernode = false
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Should upgrade to full supernode
upgradeSlot := slot + 2
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
require.NoError(t, err)
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
})
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Mock a high custody requirement (simulating many validators)
// We need to override the custody requirement calculation
// For this test, we'll verify the logic by checking if custodyRequirement > 64
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
// This would require a different test setup with actual validators
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
// With low validator requirements (4), should use semi-supernode minimum (64)
require.Equal(t, semiSupernodeCustody, actualCgc)
})
}

View File

@@ -21,12 +21,12 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/gloas:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
@@ -12,7 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
@@ -128,16 +126,7 @@ func processProposerSlashing(
if exitInfo == nil {
return nil, errors.New("exit info is required to process proposer slashing")
}
var err error
// [New in Gloas:EIP7732]: remove the BuilderPendingPayment corresponding to the slashed proposer within 2 epoch window
if beaconState.Version() >= version.Gloas {
err = gloas.RemoveBuilderPendingPayment(beaconState, slashing.Header_1.Header)
if err != nil {
return nil, err
}
}
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)

View File

@@ -7,6 +7,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
@@ -212,7 +213,12 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
if err != nil {
return nil, errors.Wrap(err, "could not get next withdrawal validator index")
}
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
if features.Get().LowValcountSweep {
bound := min(uint64(st.NumValidators()), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
nextValidatorIndex += primitives.ValidatorIndex(bound)
} else {
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
}
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
} else {
nextValidatorIndex = expectedWithdrawals[len(expectedWithdrawals)-1].ValidatorIndex + 1

View File

@@ -12,6 +12,7 @@ go_library(
"log.go",
"registry_updates.go",
"transition.go",
"transition_no_verify_sig.go",
"upgrade.go",
"validator.go",
"withdrawals.go",
@@ -61,6 +62,7 @@ go_test(
"error_test.go",
"export_test.go",
"registry_updates_test.go",
"transition_no_verify_sig_test.go",
"transition_test.go",
"upgrade_test.go",
"validator_test.go",

View File

@@ -6,11 +6,6 @@ type execReqErr struct {
error
}
// NewExecReqError creates a new execReqErr.
func NewExecReqError(msg string) error {
return execReqErr{errors.New(msg)}
}
// IsExecutionRequestError returns true if the error has `execReqErr`.
func IsExecutionRequestError(e error) bool {
if e == nil {

View File

@@ -1,10 +1,9 @@
package transition
package electra
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
@@ -48,7 +47,7 @@ var (
// # [New in Electra:EIP7251]
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
func electraOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
// 6110 validations are in VerifyOperationLengths
@@ -64,60 +63,59 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
return nil, errors.Wrap(err, "could not update total active balance cache")
}
}
st, err = blocks.ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair proposer slashing")
}
st, err = blocks.ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair attester slashing")
}
st, err = electra.ProcessAttestationsNoVerifySignature(ctx, st, block)
st, err = ProcessAttestationsNoVerifySignature(ctx, st, block)
if err != nil {
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair attestation")
}
if _, err := electra.ProcessDeposits(ctx, st, bb.Deposits()); err != nil {
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
return nil, errors.Wrap(err, "could not process altair deposit")
}
st, err = blocks.ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
return nil, errors.Wrap(err, "could not process voluntary exits")
}
st, err = blocks.ProcessBLSToExecutionChanges(st, block)
st, err = ProcessBLSToExecutionChanges(st, block)
if err != nil {
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
return nil, errors.Wrap(err, "could not process bls-to-execution changes")
}
// new in electra
requests, err := bb.ExecutionRequests()
if err != nil {
return nil, electra.NewExecReqError(errors.Wrap(err, "could not get execution requests").Error())
return nil, errors.Wrap(err, "could not get execution requests")
}
for _, d := range requests.Deposits {
if d == nil {
return nil, electra.NewExecReqError("nil deposit request")
return nil, errors.New("nil deposit request")
}
}
st, err = electra.ProcessDepositRequests(ctx, st, requests.Deposits)
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
if err != nil {
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process deposit requests").Error())
return nil, execReqErr{errors.Wrap(err, "could not process deposit requests")}
}
for _, w := range requests.Withdrawals {
if w == nil {
return nil, electra.NewExecReqError("nil withdrawal request")
return nil, errors.New("nil withdrawal request")
}
}
st, err = electra.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
if err != nil {
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process withdrawal requests").Error())
return nil, execReqErr{errors.Wrap(err, "could not process withdrawal requests")}
}
for _, c := range requests.Consolidations {
if c == nil {
return nil, electra.NewExecReqError("nil consolidation request")
return nil, errors.New("nil consolidation request")
}
}
if err := electra.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process consolidation requests").Error())
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
return nil, execReqErr{errors.Wrap(err, "could not process consolidation requests")}
}
return st, nil
}

View File

@@ -0,0 +1,60 @@
package electra_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func TestProcessOperationsWithNilRequests(t *testing.T) {
tests := []struct {
name string
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
errMsg string
}{
{
name: "Nil deposit request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
},
errMsg: "nil deposit request",
},
{
name: "Nil withdrawal request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
},
errMsg: "nil withdrawal request",
},
{
name: "Nil consolidation request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
},
errMsg: "nil consolidation request",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
st, ks := util.DeterministicGenesisStateElectra(t, 128)
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
tc.modifyBlk(blk)
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
_, err = electra.ProcessOperations(t.Context(), st, b.Block())
require.ErrorContains(t, tc.errMsg, err)
})
}
}

View File

@@ -1,57 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"bid.go",
"pending_payment.go",
"proposer_slashing.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"bid_test.go",
"pending_payment_test.go",
"proposer_slashing_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/validator-client:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -1,193 +0,0 @@
package gloas
import (
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
//
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == G2_POINT_AT_INFINITY
// else:
// assert is_active_builder(state, builder_index)
// assert can_builder_cover_bid(state, builder_index, amount)
// assert verify_execution_payload_bid_signature(state, signed_bid)
// assert bid.slot == block.slot
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
// if amount > 0:
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
// state.latest_execution_payload_bid = bid
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
signedBid, err := block.Body().SignedExecutionPayloadBid()
if err != nil {
return errors.Wrap(err, "failed to get signed execution payload bid")
}
wrappedBid, err := blocks.WrappedROSignedExecutionPayloadBid(signedBid)
if err != nil {
return errors.Wrap(err, "failed to wrap signed bid")
}
bid, err := wrappedBid.Bid()
if err != nil {
return errors.Wrap(err, "failed to get bid from wrapped bid")
}
builderIndex := bid.BuilderIndex()
amount := bid.Value()
if builderIndex == params.BeaconConfig().BuilderIndexSelfBuild {
if amount != 0 {
return fmt.Errorf("self-build amount must be zero, got %d", amount)
}
if wrappedBid.Signature() != common.InfiniteSignature {
return errors.New("self-build signature must be point at infinity")
}
} else {
ok, err := st.IsActiveBuilder(builderIndex)
if err != nil {
return errors.Wrap(err, "builder active check failed")
}
if !ok {
return fmt.Errorf("builder %d is not active", builderIndex)
}
ok, err = st.CanBuilderCoverBid(builderIndex, amount)
if err != nil {
return errors.Wrap(err, "builder balance check failed")
}
if !ok {
return fmt.Errorf("builder %d cannot cover bid amount %d", builderIndex, amount)
}
if err := validatePayloadBidSignature(st, wrappedBid); err != nil {
return errors.Wrap(err, "bid signature validation failed")
}
}
if err := validateBidConsistency(st, bid, block); err != nil {
return errors.Wrap(err, "bid consistency validation failed")
}
if amount > 0 {
feeRecipient := bid.FeeRecipient()
pendingPayment := &ethpb.BuilderPendingPayment{
Weight: 0,
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: feeRecipient[:],
Amount: amount,
BuilderIndex: builderIndex,
},
}
slotIndex := params.BeaconConfig().SlotsPerEpoch + (bid.Slot() % params.BeaconConfig().SlotsPerEpoch)
if err := st.SetBuilderPendingPayment(slotIndex, pendingPayment); err != nil {
return errors.Wrap(err, "failed to set pending payment")
}
}
if err := st.SetExecutionPayloadBid(bid); err != nil {
return errors.Wrap(err, "failed to cache execution payload bid")
}
return nil
}
// validateBidConsistency checks that the bid is consistent with the current beacon state.
func validateBidConsistency(st state.BeaconState, bid interfaces.ROExecutionPayloadBid, block interfaces.ReadOnlyBeaconBlock) error {
if bid.Slot() != block.Slot() {
return fmt.Errorf("bid slot %d does not match block slot %d", bid.Slot(), block.Slot())
}
latestBlockHash, err := st.LatestBlockHash()
if err != nil {
return errors.Wrap(err, "failed to get latest block hash")
}
if bid.ParentBlockHash() != latestBlockHash {
return fmt.Errorf("bid parent block hash mismatch: got %x, expected %x",
bid.ParentBlockHash(), latestBlockHash)
}
if bid.ParentBlockRoot() != block.ParentRoot() {
return fmt.Errorf("bid parent block root mismatch: got %x, expected %x",
bid.ParentBlockRoot(), block.ParentRoot())
}
randaoMix, err := helpers.RandaoMix(st, slots.ToEpoch(st.Slot()))
if err != nil {
return errors.Wrap(err, "failed to get randao mix")
}
if bid.PrevRandao() != [32]byte(randaoMix) {
return fmt.Errorf("bid prev randao mismatch: got %x, expected %x", bid.PrevRandao(), randaoMix)
}
return nil
}
// validatePayloadBidSignature verifies the BLS signature on a signed execution payload bid.
// It validates that the signature was created by the builder specified in the bid
// using the appropriate domain for the beacon builder.
func validatePayloadBidSignature(st state.ReadOnlyBeaconState, signedBid interfaces.ROSignedExecutionPayloadBid) error {
bid, err := signedBid.Bid()
if err != nil {
return errors.Wrap(err, "failed to get bid")
}
pubkey, err := st.BuilderPubkey(bid.BuilderIndex())
if err != nil {
return errors.Wrap(err, "failed to get builder pubkey")
}
publicKey, err := bls.PublicKeyFromBytes(pubkey[:])
if err != nil {
return errors.Wrap(err, "invalid builder public key")
}
signatureBytes := signedBid.Signature()
signature, err := bls.SignatureFromBytes(signatureBytes[:])
if err != nil {
return errors.Wrap(err, "invalid signature format")
}
currentEpoch := slots.ToEpoch(bid.Slot())
domain, err := signing.Domain(
st.Fork(),
currentEpoch,
params.BeaconConfig().DomainBeaconBuilder,
st.GenesisValidatorsRoot(),
)
if err != nil {
return errors.Wrap(err, "failed to compute signing domain")
}
signingRoot, err := signedBid.SigningRoot(domain)
if err != nil {
return errors.Wrap(err, "failed to compute signing root")
}
if !signature.Verify(publicKey, signingRoot[:]) {
return signing.ErrSigFailedToVerify
}
return nil
}

View File

@@ -1,633 +0,0 @@
package gloas
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
validatorpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/validator-client"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/time/slots"
fastssz "github.com/prysmaticlabs/fastssz"
"google.golang.org/protobuf/proto"
)
type stubBlockBody struct {
signedBid *ethpb.SignedExecutionPayloadBid
}
func (s stubBlockBody) Version() int { return version.Gloas }
func (s stubBlockBody) RandaoReveal() [96]byte { return [96]byte{} }
func (s stubBlockBody) Eth1Data() *ethpb.Eth1Data { return nil }
func (s stubBlockBody) Graffiti() [32]byte { return [32]byte{} }
func (s stubBlockBody) ProposerSlashings() []*ethpb.ProposerSlashing { return nil }
func (s stubBlockBody) AttesterSlashings() []ethpb.AttSlashing { return nil }
func (s stubBlockBody) Attestations() []ethpb.Att { return nil }
func (s stubBlockBody) Deposits() []*ethpb.Deposit { return nil }
func (s stubBlockBody) VoluntaryExits() []*ethpb.SignedVoluntaryExit { return nil }
func (s stubBlockBody) SyncAggregate() (*ethpb.SyncAggregate, error) { return nil, nil }
func (s stubBlockBody) IsNil() bool { return s.signedBid == nil }
func (s stubBlockBody) HashTreeRoot() ([32]byte, error) { return [32]byte{}, nil }
func (s stubBlockBody) Proto() (proto.Message, error) { return nil, nil }
func (s stubBlockBody) Execution() (interfaces.ExecutionData, error) { return nil, nil }
func (s stubBlockBody) BLSToExecutionChanges() ([]*ethpb.SignedBLSToExecutionChange, error) {
return nil, nil
}
func (s stubBlockBody) BlobKzgCommitments() ([][]byte, error) { return nil, nil }
func (s stubBlockBody) ExecutionRequests() (*enginev1.ExecutionRequests, error) {
return nil, nil
}
func (s stubBlockBody) PayloadAttestations() ([]*ethpb.PayloadAttestation, error) {
return nil, nil
}
func (s stubBlockBody) SignedExecutionPayloadBid() (*ethpb.SignedExecutionPayloadBid, error) {
return s.signedBid, nil
}
func (s stubBlockBody) MarshalSSZ() ([]byte, error) { return nil, nil }
func (s stubBlockBody) MarshalSSZTo([]byte) ([]byte, error) { return nil, nil }
func (s stubBlockBody) UnmarshalSSZ([]byte) error { return nil }
func (s stubBlockBody) SizeSSZ() int { return 0 }
type stubBlock struct {
slot primitives.Slot
proposer primitives.ValidatorIndex
parentRoot [32]byte
body stubBlockBody
v int
}
var (
_ interfaces.ReadOnlyBeaconBlockBody = (*stubBlockBody)(nil)
_ interfaces.ReadOnlyBeaconBlock = (*stubBlock)(nil)
)
func (s stubBlock) Slot() primitives.Slot { return s.slot }
func (s stubBlock) ProposerIndex() primitives.ValidatorIndex { return s.proposer }
func (s stubBlock) ParentRoot() [32]byte { return s.parentRoot }
func (s stubBlock) StateRoot() [32]byte { return [32]byte{} }
func (s stubBlock) Body() interfaces.ReadOnlyBeaconBlockBody { return s.body }
func (s stubBlock) IsNil() bool { return false }
func (s stubBlock) IsBlinded() bool { return false }
func (s stubBlock) HashTreeRoot() ([32]byte, error) { return [32]byte{}, nil }
func (s stubBlock) Proto() (proto.Message, error) { return nil, nil }
func (s stubBlock) MarshalSSZ() ([]byte, error) { return nil, nil }
func (s stubBlock) MarshalSSZTo([]byte) ([]byte, error) { return nil, nil }
func (s stubBlock) UnmarshalSSZ([]byte) error { return nil }
func (s stubBlock) SizeSSZ() int { return 0 }
func (s stubBlock) Version() int { return s.v }
func (s stubBlock) AsSignRequestObject() (validatorpb.SignRequestObject, error) {
return nil, nil
}
func (s stubBlock) HashTreeRootWith(*fastssz.Hasher) error { return nil }
func buildGloasState(t *testing.T, slot primitives.Slot, proposerIdx primitives.ValidatorIndex, builderIdx primitives.BuilderIndex, balance uint64, randao [32]byte, latestHash [32]byte, builderPubkey [48]byte) *state_native.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range blockRoots {
blockRoots[i] = bytes.Repeat([]byte{0xAA}, 32)
stateRoots[i] = bytes.Repeat([]byte{0xBB}, 32)
}
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
for i := range randaoMixes {
randaoMixes[i] = randao[:]
}
withdrawalCreds := make([]byte, 32)
withdrawalCreds[0] = cfg.BuilderWithdrawalPrefixByte
validatorCount := int(proposerIdx) + 1
validators := make([]*ethpb.Validator, validatorCount)
balances := make([]uint64, validatorCount)
for i := range validatorCount {
validators[i] = &ethpb.Validator{
PublicKey: builderPubkey[:],
WithdrawalCredentials: withdrawalCreds,
EffectiveBalance: balance,
Slashed: false,
ActivationEligibilityEpoch: 0,
ActivationEpoch: 0,
ExitEpoch: cfg.FarFutureEpoch,
WithdrawableEpoch: cfg.FarFutureEpoch,
}
balances[i] = balance
}
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
for i := range payments {
payments[i] = &ethpb.BuilderPendingPayment{Withdrawal: &ethpb.BuilderPendingWithdrawal{}}
}
var builders []*ethpb.Builder
if builderIdx != params.BeaconConfig().BuilderIndexSelfBuild {
builderCount := int(builderIdx) + 1
builders = make([]*ethpb.Builder, builderCount)
builders[builderCount-1] = &ethpb.Builder{
Pubkey: builderPubkey[:],
Version: []byte{0},
ExecutionAddress: bytes.Repeat([]byte{0x01}, 20),
Balance: primitives.Gwei(balance),
DepositEpoch: 0,
WithdrawableEpoch: cfg.FarFutureEpoch,
}
}
stProto := &ethpb.BeaconStateGloas{
Slot: slot,
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, 32),
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x22}, 4),
PreviousVersion: bytes.Repeat([]byte{0x22}, 4),
Epoch: 0,
},
BlockRoots: blockRoots,
StateRoots: stateRoots,
RandaoMixes: randaoMixes,
Validators: validators,
Balances: balances,
LatestBlockHash: latestHash[:],
BuilderPendingPayments: payments,
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
Builders: builders,
FinalizedCheckpoint: &ethpb.Checkpoint{
Epoch: 1,
},
}
st, err := state_native.InitializeFromProtoGloas(stProto)
require.NoError(t, err)
return st.(*state_native.BeaconState)
}
func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid, fork *ethpb.Fork, genesisRoot [32]byte) [96]byte {
t.Helper()
epoch := slots.ToEpoch(primitives.Slot(bid.Slot))
domain, err := signing.Domain(fork, epoch, params.BeaconConfig().DomainBeaconBuilder, genesisRoot[:])
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(bid, domain)
require.NoError(t, err)
sig := sk.Sign(root[:]).Marshal()
var out [96]byte
copy(out[:], sig)
return out
}
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
slot := primitives.Slot(12)
proposerIdx := primitives.ValidatorIndex(0)
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
pubKey := [48]byte{}
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: common.InfiniteSignature[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
require.NoError(t, ProcessExecutionPayloadBid(state, block))
stateProto, ok := state.ToProto().(*ethpb.BeaconStateGloas)
require.Equal(t, true, ok)
slotIndex := params.BeaconConfig().SlotsPerEpoch + (slot % params.BeaconConfig().SlotsPerEpoch)
require.Equal(t, primitives.Gwei(0), stateProto.BuilderPendingPayments[slotIndex].Withdrawal.Amount)
}
func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
slot := primitives.Slot(2)
proposerIdx := primitives.ValidatorIndex(0)
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
randao := [32]byte{}
latestHash := [32]byte{1}
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: common.InfiniteSignature[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err := ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "self-build amount must be zero", err)
}
func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
slot := primitives.Slot(8)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
pub := sk.PublicKey().Marshal()
var pubKey [48]byte
copy(pubKey[:], pub)
balance := params.BeaconConfig().MinActivationBalance + 1_000_000
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: sig[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx, // not self-build
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
require.NoError(t, ProcessExecutionPayloadBid(state, block))
stateProto, ok := state.ToProto().(*ethpb.BeaconStateGloas)
require.Equal(t, true, ok)
slotIndex := params.BeaconConfig().SlotsPerEpoch + (slot % params.BeaconConfig().SlotsPerEpoch)
require.Equal(t, primitives.Gwei(500_000), stateProto.BuilderPendingPayments[slotIndex].Withdrawal.Amount)
require.NotNil(t, stateProto.LatestExecutionPayloadBid)
require.Equal(t, primitives.BuilderIndex(1), stateProto.LatestExecutionPayloadBid.BuilderIndex)
require.Equal(t, primitives.Gwei(500_000), stateProto.LatestExecutionPayloadBid.Value)
}
func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
slot := primitives.Slot(4)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0x01}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0x02}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
// Make builder inactive by setting withdrawable_epoch.
stateProto := state.ToProto().(*ethpb.BeaconStateGloas)
stateProto.Builders[int(builderIdx)].WithdrawableEpoch = 0
stateIface, err := state_native.InitializeFromProtoGloas(stateProto)
require.NoError(t, err)
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "is not active", err)
}
func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
slot := primitives.Slot(5)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0x0A}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0x0B}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+10, randao, latestHash, pubKey)
stateProto := state.ToProto().(*ethpb.BeaconStateGloas)
// Add pending balances to push below required balance.
stateProto.BuilderPendingWithdrawals = []*ethpb.BuilderPendingWithdrawal{
{Amount: 15, BuilderIndex: builderIdx},
}
stateProto.BuilderPendingPayments = []*ethpb.BuilderPendingPayment{
{Withdrawal: &ethpb.BuilderPendingWithdrawal{Amount: 20, BuilderIndex: builderIdx}},
}
stateIface, err := state_native.InitializeFromProtoGloas(stateProto)
require.NoError(t, err)
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "cannot cover bid amount", err)
}
func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
slot := primitives.Slot(6)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
// Use an invalid signature.
invalidSig := [96]byte{1}
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: invalidSig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "bid signature validation failed", err)
}
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
slot := primitives.Slot(10)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "bid slot", err)
}
func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
slot := primitives.Slot(11)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "parent block hash mismatch", err)
}
func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
slot := primitives.Slot(12)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
parentRoot := bytes.Repeat([]byte{0x22}, 32)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bytes.Repeat([]byte{0x99}, 32)), // mismatch
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "parent block root mismatch", err)
}
func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
slot := primitives.Slot(13)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "prev randao mismatch", err)
}

View File

@@ -1,76 +0,0 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
// Spec v1.7.0-alpha.0 (pseudocode):
// def process_builder_pending_payments(state: BeaconState) -> None:
//
// quorum = get_builder_payment_quorum_threshold(state)
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
// if payment.weight >= quorum:
// state.builder_pending_withdrawals.append(payment.withdrawal)
//
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
// state.builder_pending_payments = old_payments + new_payments
func ProcessBuilderPendingPayments(state state.BeaconState) error {
quorum, err := builderQuorumThreshold(state)
if err != nil {
return errors.Wrap(err, "could not compute builder payment quorum threshold")
}
payments, err := state.BuilderPendingPayments()
if err != nil {
return errors.Wrap(err, "could not get builder pending payments")
}
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
var withdrawals []*ethpb.BuilderPendingWithdrawal
for _, payment := range payments[:slotsPerEpoch] {
if quorum > payment.Weight {
continue
}
withdrawals = append(withdrawals, payment.Withdrawal)
}
if err := state.AppendBuilderPendingWithdrawals(withdrawals); err != nil {
return errors.Wrap(err, "could not append builder pending withdrawals")
}
if err := state.RotateBuilderPendingPayments(); err != nil {
return errors.Wrap(err, "could not rotate builder pending payments")
}
return nil
}
// builderQuorumThreshold calculates the quorum threshold for builder payments.
// Spec v1.7.0-alpha.0 (pseudocode):
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
//
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
activeBalance, err := helpers.TotalActiveBalance(state)
if err != nil {
return 0, errors.Wrap(err, "could not get total active balance")
}
cfg := params.BeaconConfig()
slotsPerEpoch := uint64(cfg.SlotsPerEpoch)
numerator := cfg.BuilderPaymentThresholdNumerator
denominator := cfg.BuilderPaymentThresholdDenominator
activeBalancePerSlot := activeBalance / slotsPerEpoch
quorum := (activeBalancePerSlot * numerator) / denominator
return primitives.Gwei(quorum), nil
}

View File

@@ -1,119 +0,0 @@
package gloas
import (
"slices"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestBuilderQuorumThreshold(t *testing.T) {
helpers.ClearCache()
cfg := params.BeaconConfig()
validators := []*ethpb.Validator{
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
}
st, err := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{Validators: validators})
require.NoError(t, err)
got, err := builderQuorumThreshold(st)
require.NoError(t, err)
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
perSlot := total / uint64(cfg.SlotsPerEpoch)
want := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
require.Equal(t, primitives.Gwei(want), got)
}
func TestProcessBuilderPendingPayments(t *testing.T) {
helpers.ClearCache()
cfg := params.BeaconConfig()
buildPayments := func(weights ...primitives.Gwei) []*ethpb.BuilderPendingPayment {
p := make([]*ethpb.BuilderPendingPayment, 2*int(cfg.SlotsPerEpoch))
for i := range p {
p[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
}
}
for i, w := range weights {
p[i].Weight = w
p[i].Withdrawal.Amount = 1
}
return p
}
validators := []*ethpb.Validator{
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
}
pbSt, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
perSlot := total / uint64(cfg.SlotsPerEpoch)
quorum := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
slotsPerEpoch := int(cfg.SlotsPerEpoch)
t.Run("append qualifying withdrawals", func(t *testing.T) {
payments := buildPayments(primitives.Gwei(quorum+1), primitives.Gwei(quorum+2))
st := &testProcessState{BeaconState: pbSt, payments: payments}
require.NoError(t, ProcessBuilderPendingPayments(st))
require.Equal(t, 2, len(st.withdrawals))
require.Equal(t, payments[0].Withdrawal, st.withdrawals[0])
require.Equal(t, payments[1].Withdrawal, st.withdrawals[1])
require.Equal(t, 2*slotsPerEpoch, len(st.payments))
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
require.Equal(t, primitives.Gwei(0), st.payments[i].Weight)
require.Equal(t, primitives.Gwei(0), st.payments[i].Withdrawal.Amount)
require.Equal(t, 20, len(st.payments[i].Withdrawal.FeeRecipient))
}
})
t.Run("no withdrawals when below quorum", func(t *testing.T) {
payments := buildPayments(primitives.Gwei(quorum - 1))
st := &testProcessState{BeaconState: pbSt, payments: payments}
require.NoError(t, ProcessBuilderPendingPayments(st))
require.Equal(t, 0, len(st.withdrawals))
})
}
type testProcessState struct {
state.BeaconState
payments []*ethpb.BuilderPendingPayment
withdrawals []*ethpb.BuilderPendingWithdrawal
}
func (t *testProcessState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
return t.payments, nil
}
func (t *testProcessState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
t.withdrawals = append(t.withdrawals, withdrawals...)
return nil
}
func (t *testProcessState) RotateBuilderPendingPayments() error {
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
rotated := slices.Clone(t.payments[slotsPerEpoch:])
for range slotsPerEpoch {
rotated = append(rotated, &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
},
})
}
t.payments = rotated
return nil
}

View File

@@ -1,43 +0,0 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
// Spec v1.7.0 (pseudocode):
//
// slot = header_1.slot
// proposal_epoch = compute_epoch_at_slot(slot)
// if proposal_epoch == get_current_epoch(state):
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// elif proposal_epoch == get_previous_epoch(state):
// payment_index = slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
proposalEpoch := slots.ToEpoch(header.Slot)
currentEpoch := time.CurrentEpoch(st)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
var paymentIndex primitives.Slot
if proposalEpoch == currentEpoch {
paymentIndex = slotsPerEpoch + header.Slot%slotsPerEpoch
} else if proposalEpoch+1 == currentEpoch {
paymentIndex = header.Slot % slotsPerEpoch
} else {
return nil
}
if err := st.ClearBuilderPendingPayment(paymentIndex); err != nil {
return errors.Wrap(err, "could not clear builder pending payment")
}
return nil
}

View File

@@ -1,112 +0,0 @@
package gloas
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestRemoveBuilderPendingPayment_CurrentEpoch(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
stateSlot := slotsPerEpoch*2 + 1
headerSlot := slotsPerEpoch * 2
st := newGloasStateWithPayments(t, stateSlot)
paymentIndex := int(slotsPerEpoch + headerSlot%slotsPerEpoch)
setPendingPayment(t, st, paymentIndex, 123)
err := RemoveBuilderPendingPayment(st, &eth.BeaconBlockHeader{Slot: headerSlot})
require.NoError(t, err)
got := getPendingPayment(t, st, paymentIndex)
require.NotNil(t, got.Withdrawal)
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
}
func TestRemoveBuilderPendingPayment_PreviousEpoch(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
stateSlot := slotsPerEpoch*2 + 1
headerSlot := slotsPerEpoch + 7
st := newGloasStateWithPayments(t, stateSlot)
paymentIndex := int(headerSlot % slotsPerEpoch)
setPendingPayment(t, st, paymentIndex, 456)
err := RemoveBuilderPendingPayment(st, &eth.BeaconBlockHeader{Slot: headerSlot})
require.NoError(t, err)
got := getPendingPayment(t, st, paymentIndex)
require.NotNil(t, got.Withdrawal)
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
}
func TestRemoveBuilderPendingPayment_OlderThanTwoEpoch(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
stateSlot := slotsPerEpoch*4 + 1 // current epoch far ahead
headerSlot := slotsPerEpoch * 2 // two epochs behind
st := newGloasStateWithPayments(t, stateSlot)
paymentIndex := int(headerSlot % slotsPerEpoch)
original := getPendingPayment(t, st, paymentIndex)
err := RemoveBuilderPendingPayment(st, &eth.BeaconBlockHeader{Slot: headerSlot})
require.NoError(t, err)
after := getPendingPayment(t, st, paymentIndex)
require.DeepEqual(t, original.Withdrawal.FeeRecipient, after.Withdrawal.FeeRecipient)
require.Equal(t, original.Withdrawal.Amount, after.Withdrawal.Amount)
}
func newGloasStateWithPayments(t *testing.T, slot primitives.Slot) state.BeaconState {
t.Helper()
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
paymentCount := int(slotsPerEpoch * 2)
payments := make([]*eth.BuilderPendingPayment, paymentCount)
for i := range payments {
payments[i] = &eth.BuilderPendingPayment{
Withdrawal: &eth.BuilderPendingWithdrawal{
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
Amount: 1,
},
}
}
st, err := state_native.InitializeFromProtoUnsafeGloas(&eth.BeaconStateGloas{
Slot: slot,
BuilderPendingPayments: payments,
})
require.NoError(t, err)
return st
}
func setPendingPayment(t *testing.T, st state.BeaconState, index int, amount uint64) {
t.Helper()
payment := &eth.BuilderPendingPayment{
Withdrawal: &eth.BuilderPendingWithdrawal{
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
Amount: primitives.Gwei(amount),
},
}
require.NoError(t, st.SetBuilderPendingPayment(primitives.Slot(index), payment))
}
func getPendingPayment(t *testing.T, st state.BeaconState, index int) *eth.BuilderPendingPayment {
t.Helper()
stateProto := st.ToProtoUnsafe().(*eth.BeaconStateGloas)
return stateProto.BuilderPendingPayments[index]
}

View File

@@ -3,8 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"electra.go",
"errors.go",
"log.go",
"skip_slot_cache.go",
"state.go",
@@ -64,14 +62,11 @@ go_test(
"altair_transition_no_verify_sig_test.go",
"bellatrix_transition_no_verify_sig_test.go",
"benchmarks_test.go",
"electra_test.go",
"exports_test.go",
"skip_slot_cache_test.go",
"state_fuzz_test.go",
"state_test.go",
"trailing_slot_state_cache_test.go",
"transition_fuzz_test.go",
"transition_gloas_test.go",
"transition_no_verify_sig_test.go",
"transition_test.go",
],
@@ -107,7 +102,6 @@ go_test(
"//time/slots:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -1,216 +0,0 @@
package transition_test
import (
"context"
"errors"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func TestProcessOperationsWithNilRequests(t *testing.T) {
tests := []struct {
name string
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
errMsg string
}{
{
name: "Nil deposit request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
},
errMsg: "nil deposit request",
},
{
name: "Nil withdrawal request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
},
errMsg: "nil withdrawal request",
},
{
name: "Nil consolidation request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
},
errMsg: "nil consolidation request",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
st, ks := util.DeterministicGenesisStateElectra(t, 128)
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
tc.modifyBlk(blk)
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
_, err = transition.ElectraOperations(t.Context(), st, b.Block())
require.ErrorContains(t, tc.errMsg, err)
})
}
}
func TestElectraOperations_ProcessingErrors(t *testing.T) {
tests := []struct {
name string
modifyBlk func(blk *ethpb.SignedBeaconBlockElectra)
errCheck func(t *testing.T, err error)
}{
{
name: "ErrProcessProposerSlashingsFailed",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
// Create invalid proposer slashing with out-of-bounds proposer index
blk.Block.Body.ProposerSlashings = []*ethpb.ProposerSlashing{
{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 999999, // Invalid index (out of bounds)
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
Header_2: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 999999,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
},
}
},
errCheck: func(t *testing.T, err error) {
require.ErrorContains(t, "process proposer slashings failed", err)
require.Equal(t, true, errors.Is(err, transition.ErrProcessProposerSlashingsFailed))
},
},
{
name: "ErrProcessAttestationsFailed",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
// Create attestation with invalid committee index
blk.Block.Body.Attestations = []*ethpb.AttestationElectra{
{
AggregationBits: []byte{0b00000001},
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 999999, // Invalid committee index
BeaconBlockRoot: make([]byte, 32),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
Target: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
},
CommitteeBits: []byte{0b00000001},
Signature: make([]byte, 96),
},
}
},
errCheck: func(t *testing.T, err error) {
require.ErrorContains(t, "process attestations failed", err)
require.Equal(t, true, errors.Is(err, transition.ErrProcessAttestationsFailed))
},
},
{
name: "ErrProcessDepositsFailed",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
// Create deposit with invalid proof length
blk.Block.Body.Deposits = []*ethpb.Deposit{
{
Proof: [][]byte{}, // Invalid: empty proof
Data: &ethpb.Deposit_Data{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
Amount: 32000000000, // 32 ETH in Gwei
Signature: make([]byte, 96),
},
},
}
},
errCheck: func(t *testing.T, err error) {
require.ErrorContains(t, "process deposits failed", err)
require.Equal(t, true, errors.Is(err, transition.ErrProcessDepositsFailed))
},
},
{
name: "ErrProcessVoluntaryExitsFailed",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
// Create voluntary exit with invalid validator index
blk.Block.Body.VoluntaryExits = []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 0,
ValidatorIndex: 999999, // Invalid index (out of bounds)
},
Signature: make([]byte, 96),
},
}
},
errCheck: func(t *testing.T, err error) {
require.ErrorContains(t, "process voluntary exits failed", err)
require.Equal(t, true, errors.Is(err, transition.ErrProcessVoluntaryExitsFailed))
},
},
{
name: "ErrProcessBLSChangesFailed",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
// Create BLS to execution change with invalid validator index
blk.Block.Body.BlsToExecutionChanges = []*ethpb.SignedBLSToExecutionChange{
{
Message: &ethpb.BLSToExecutionChange{
ValidatorIndex: 999999, // Invalid index (out of bounds)
FromBlsPubkey: make([]byte, 48),
ToExecutionAddress: make([]byte, 20),
},
Signature: make([]byte, 96),
},
}
},
errCheck: func(t *testing.T, err error) {
require.ErrorContains(t, "process BLS to execution changes failed", err)
require.Equal(t, true, errors.Is(err, transition.ErrProcessBLSChangesFailed))
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
st, ks := util.DeterministicGenesisStateElectra(t, 128)
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
tc.modifyBlk(blk)
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, st.SetSlot(primitives.Slot(1)))
_, err = transition.ElectraOperations(ctx, st, b.Block())
require.NotNil(t, err, "Expected an error but got nil")
tc.errCheck(t, err)
})
}
}

View File

@@ -1,19 +0,0 @@
package transition
import "errors"
var (
ErrAttestationsSignatureInvalid = errors.New("attestations signature invalid")
ErrRandaoSignatureInvalid = errors.New("randao signature invalid")
ErrBLSToExecutionChangesSignatureInvalid = errors.New("BLS to execution changes signature invalid")
ErrProcessWithdrawalsFailed = errors.New("process withdrawals failed")
ErrProcessRandaoFailed = errors.New("process randao failed")
ErrProcessEth1DataFailed = errors.New("process eth1 data failed")
ErrProcessProposerSlashingsFailed = errors.New("process proposer slashings failed")
ErrProcessAttesterSlashingsFailed = errors.New("process attester slashings failed")
ErrProcessAttestationsFailed = errors.New("process attestations failed")
ErrProcessDepositsFailed = errors.New("process deposits failed")
ErrProcessVoluntaryExitsFailed = errors.New("process voluntary exits failed")
ErrProcessBLSChangesFailed = errors.New("process BLS to execution changes failed")
ErrProcessSyncAggregateFailed = errors.New("process sync aggregate failed")
)

View File

@@ -1,3 +0,0 @@
package transition
var ElectraOperations = electraOperations

View File

@@ -142,18 +142,6 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
); err != nil {
return nil, err
}
// Spec v1.6.1 (pseudocode):
// # [New in Gloas:EIP7732]
// # Unset the next payload availability
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
if state.Version() >= version.Gloas {
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
return nil, err
}
}
return state, nil
}

View File

@@ -1,141 +0,0 @@
package transition
import (
"bytes"
"context"
"fmt"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/stretchr/testify/require"
)
func TestProcessSlot_GloasClearsNextPayloadAvailability(t *testing.T) {
slot := primitives.Slot(10)
cfg := params.BeaconConfig()
nextIdx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
byteIdx := nextIdx / 8
bitMask := byte(1 << (nextIdx % 8))
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
st := newGloasState(t, slot, availability)
_, err := ProcessSlot(context.Background(), st)
require.NoError(t, err)
post := st.ToProto().(*ethpb.BeaconStateGloas)
require.Equal(t, byte(0xFF)&^bitMask, post.ExecutionPayloadAvailability[byteIdx])
}
func TestProcessSlot_GloasClearsNextPayloadAvailability_Wrap(t *testing.T) {
cfg := params.BeaconConfig()
slot := primitives.Slot(cfg.SlotsPerHistoricalRoot - 1)
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
st := newGloasState(t, slot, availability)
_, err := ProcessSlot(context.Background(), st)
require.NoError(t, err)
post := st.ToProto().(*ethpb.BeaconStateGloas)
require.Equal(t, byte(0xFE), post.ExecutionPayloadAvailability[0])
}
func TestProcessSlot_GloasAvailabilityUpdateError(t *testing.T) {
slot := primitives.Slot(7)
availability := make([]byte, 1)
st := newGloasState(t, slot, availability)
_, err := ProcessSlot(context.Background(), st)
cfg := params.BeaconConfig()
idx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
byteIdx := idx / 8
require.EqualError(t, err, fmt.Sprintf(
"bit index %d (byte index %d) out of range for execution payload availability length %d",
idx, byteIdx, len(availability),
))
}
func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) state.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
protoState := &ethpb.BeaconStateGloas{
Slot: slot,
LatestBlockHeader: testBeaconBlockHeader(),
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
RandaoMixes: make([][]byte, fieldparams.RandaoMixesLength),
ExecutionPayloadAvailability: availability,
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitmentsRoot: make([]byte, 32),
},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
PreviousEpochParticipation: []byte{},
CurrentEpochParticipation: []byte{},
JustificationBits: []byte{0},
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentSyncCommittee: &ethpb.SyncCommittee{},
NextSyncCommittee: &ethpb.SyncCommittee{},
}
for i := range protoState.BlockRoots {
protoState.BlockRoots[i] = make([]byte, 32)
}
for i := range protoState.StateRoots {
protoState.StateRoots[i] = make([]byte, 32)
}
for i := range protoState.RandaoMixes {
protoState.RandaoMixes[i] = make([]byte, 32)
}
for i := range protoState.BuilderPendingPayments {
protoState.BuilderPendingPayments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
},
}
}
pubkeys := make([][]byte, cfg.SyncCommitteeSize)
for i := range pubkeys {
pubkeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
}
aggPubkey := make([]byte, fieldparams.BLSPubkeyLength)
protoState.CurrentSyncCommittee = &ethpb.SyncCommittee{
Pubkeys: pubkeys,
AggregatePubkey: aggPubkey,
}
protoState.NextSyncCommittee = &ethpb.SyncCommittee{
Pubkeys: pubkeys,
AggregatePubkey: aggPubkey,
}
st, err := state_native.InitializeFromProtoGloas(protoState)
require.NoError(t, err)
require.Equal(t, version.Gloas, st.Version())
return st
}
func testBeaconBlockHeader() *ethpb.BeaconBlockHeader {
return &ethpb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
}
}

View File

@@ -7,11 +7,12 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
b "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
@@ -69,11 +70,10 @@ func ExecuteStateTransitionNoVerifyAnySig(
}
// Execute per block transition.
sigSlice, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
set, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
if err != nil {
return nil, nil, errors.Wrap(err, "could not process block")
}
set := sigSlice.Batch()
// State root validation.
postStateRoot, err := st.HashTreeRoot(ctx)
@@ -113,7 +113,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
// assert block.state_root == hash_tree_root(state)
func CalculateStateRoot(
ctx context.Context,
rollback state.BeaconState,
state state.BeaconState,
signed interfaces.ReadOnlySignedBeaconBlock,
) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "core.state.CalculateStateRoot")
@@ -122,7 +122,7 @@ func CalculateStateRoot(
tracing.AnnotateError(span, ctx.Err())
return [32]byte{}, ctx.Err()
}
if rollback == nil || rollback.IsNil() {
if state == nil || state.IsNil() {
return [32]byte{}, errors.New("nil state")
}
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
@@ -130,7 +130,7 @@ func CalculateStateRoot(
}
// Copy state to avoid mutating the state reference.
state := rollback.Copy()
state = state.Copy()
// Execute per slots transition.
var err error
@@ -141,103 +141,14 @@ func CalculateStateRoot(
}
// Execute per block transition.
if features.Get().EnableProposerPreprocessing {
state, err = processBlockForProposing(ctx, state, signed)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not process block for proposing")
}
} else {
state, err = ProcessBlockForStateRoot(ctx, state, signed)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not process block")
}
state, err = ProcessBlockForStateRoot(ctx, state, signed)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not process block")
}
return state.HashTreeRoot(ctx)
}
// processBlockVerifySigs processes the block and verifies the signatures within it. Block signatures are not verified as this block is not yet signed.
func processBlockForProposing(ctx context.Context, st state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
var err error
var set BlockSignatureBatches
set, st, err = ProcessBlockNoVerifyAnySig(ctx, st, signed)
if err != nil {
return nil, err
}
// We first try to verify all sigantures batched optimistically. We ignore block proposer signature.
sigSet := set.Batch()
valid, err := sigSet.Verify()
if err != nil || valid {
return st, err
}
// Some signature failed to verify.
// Verify Attestations signatures
attSigs := set.AttestationSignatures
if attSigs == nil {
return nil, ErrAttestationsSignatureInvalid
}
valid, err = attSigs.Verify()
if err != nil {
return nil, err
}
if !valid {
return nil, ErrAttestationsSignatureInvalid
}
// Verify Randao signature
randaoSigs := set.RandaoSignatures
if randaoSigs == nil {
return nil, ErrRandaoSignatureInvalid
}
valid, err = randaoSigs.Verify()
if err != nil {
return nil, err
}
if !valid {
return nil, ErrRandaoSignatureInvalid
}
if signed.Block().Version() < version.Capella {
//This should not happen as we must have failed one of the above signatures.
return st, nil
}
// Verify BLS to execution changes signatures
blsChangeSigs := set.BLSChangeSignatures
if blsChangeSigs == nil {
return nil, ErrBLSToExecutionChangesSignatureInvalid
}
valid, err = blsChangeSigs.Verify()
if err != nil {
return nil, err
}
if !valid {
return nil, ErrBLSToExecutionChangesSignatureInvalid
}
// We should not reach this point as one of the above signatures must have failed.
return st, nil
}
// BlockSignatureBatches holds the signature batches for different parts of a beacon block.
type BlockSignatureBatches struct {
RandaoSignatures *bls.SignatureBatch
AttestationSignatures *bls.SignatureBatch
BLSChangeSignatures *bls.SignatureBatch
}
// Batch returns the batch of signature batches in the BlockSignatureBatches.
func (b BlockSignatureBatches) Batch() *bls.SignatureBatch {
sigs := bls.NewSet()
if b.RandaoSignatures != nil {
sigs.Join(b.RandaoSignatures)
}
if b.AttestationSignatures != nil {
sigs.Join(b.AttestationSignatures)
}
if b.BLSChangeSignatures != nil {
sigs.Join(b.BLSChangeSignatures)
}
return sigs
}
// ProcessBlockNoVerifyAnySig creates a new, modified beacon state by applying block operation
// transformations as defined in the Ethereum Serenity specification. It does not validate
// any block signature except for deposit and slashing signatures. It also returns the relevant
@@ -254,48 +165,48 @@ func ProcessBlockNoVerifyAnySig(
ctx context.Context,
st state.BeaconState,
signed interfaces.ReadOnlySignedBeaconBlock,
) (BlockSignatureBatches, state.BeaconState, error) {
) (*bls.SignatureBatch, state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
defer span.End()
set := BlockSignatureBatches{}
if err := blocks.BeaconBlockIsNil(signed); err != nil {
return set, nil, err
return nil, nil, err
}
if st.Version() != signed.Block().Version() {
return set, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
return nil, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
}
blk := signed.Block()
st, err := ProcessBlockForStateRoot(ctx, st, signed)
if err != nil {
return set, nil, err
return nil, nil, err
}
randaoReveal := signed.Block().Body().RandaoReveal()
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
if err != nil {
tracing.AnnotateError(span, err)
return set, nil, errors.Wrap(err, "could not retrieve randao signature set")
return nil, nil, errors.Wrap(err, "could not retrieve randao signature set")
}
set.RandaoSignatures = rSet
aSet, err := b.AttestationSignatureBatch(ctx, st, signed.Block().Body().Attestations())
if err != nil {
return set, nil, errors.Wrap(err, "could not retrieve attestation signature set")
return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set")
}
set.AttestationSignatures = aSet
// Merge beacon block, randao and attestations signatures into a set.
set := bls.NewSet()
set.Join(rSet).Join(aSet)
if blk.Version() >= version.Capella {
changes, err := signed.Block().Body().BLSToExecutionChanges()
if err != nil {
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
}
cSet, err := b.BLSChangesSignatureBatch(st, changes)
if err != nil {
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
}
set.BLSChangeSignatures = cSet
set.Join(cSet)
}
return set, st, nil
}
@@ -357,7 +268,7 @@ func ProcessOperationsNoVerifyAttsSigs(
return nil, err
}
} else {
state, err = electraOperations(ctx, state, beaconBlock)
state, err = electra.ProcessOperations(ctx, state, beaconBlock)
if err != nil {
return nil, err
}
@@ -415,7 +326,7 @@ func ProcessBlockForStateRoot(
if state.Version() >= version.Capella {
state, err = b.ProcessWithdrawals(state, executionData)
if err != nil {
return nil, errors.Wrap(ErrProcessWithdrawalsFailed, err.Error())
return nil, errors.Wrap(err, "could not process withdrawals")
}
}
if err = b.ProcessPayload(state, blk.Body()); err != nil {
@@ -427,13 +338,13 @@ func ProcessBlockForStateRoot(
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(ErrProcessRandaoFailed, err.Error())
return nil, errors.Wrap(err, "could not verify and process randao")
}
state, err = b.ProcessEth1DataInBlock(ctx, state, signed.Block().Body().Eth1Data())
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(ErrProcessEth1DataFailed, err.Error())
return nil, errors.Wrap(err, "could not process eth1 data")
}
state, err = ProcessOperationsNoVerifyAttsSigs(ctx, state, signed.Block())
@@ -452,7 +363,7 @@ func ProcessBlockForStateRoot(
}
state, _, err = altair.ProcessSyncAggregate(ctx, state, sa)
if err != nil {
return nil, errors.Wrap(ErrProcessSyncAggregateFailed, err.Error())
return nil, errors.Wrap(err, "process_sync_aggregate failed")
}
return state, nil
@@ -468,35 +379,31 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
exitInfo := &validators.ExitInfo{}
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = validators.ExitInformation(st)
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair proposer slashing")
}
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair attester slashing")
}
st, err = altair.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
if err != nil {
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair attestation")
}
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair deposit")
}
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
return nil, errors.Wrap(err, "could not process voluntary exits")
}
st, err = b.ProcessBLSToExecutionChanges(st, beaconBlock)
if err != nil {
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
}
return st, nil
return b.ProcessBLSToExecutionChanges(st, beaconBlock)
}
// This calls phase 0 block operations.
@@ -504,32 +411,32 @@ func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock int
var err error
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
var exitInfo *validators.ExitInfo
var exitInfo *v.ExitInfo
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = validators.ExitInformation(st)
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process block proposer slashings")
}
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process block attester slashings")
}
st, err = b.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
if err != nil {
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
return nil, errors.Wrap(err, "could not process block attestations")
}
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
return nil, errors.Wrap(err, "could not process deposits")
}
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
return nil, errors.Wrap(err, "could not process voluntary exits")
}
return st, nil
}

View File

@@ -132,8 +132,7 @@ func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) {
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
require.NoError(t, err)
// Test Signature set verifies.
sigSet := set.Batch()
verified, err := sigSet.Verify()
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Could not verify signature set.")
}
@@ -146,8 +145,7 @@ func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
require.NoError(t, err)
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
require.NoError(t, err)
sigSet := set.Batch()
verified, err := sigSet.Verify()
verified, err := set.Verify()
require.NoError(t, err)
require.Equal(t, true, verified, "Could not verify signature set")
}
@@ -156,9 +154,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
beaconState, block, _, _, _ := createFullBlockWithOperations(t)
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
signatures, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
require.NoError(t, err)
set := signatures.Batch()
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
assert.Equal(t, "randao signature", set.Descriptions[0])
assert.Equal(t, "attestation signature", set.Descriptions[1])

View File

@@ -512,11 +512,6 @@ func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []u
if err != nil {
return nil, errors.Wrap(err, "data column sidecars file path open")
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during Get")
}
}()
// Read file metadata.
metadata, err := dcs.metadata(file)

View File

@@ -89,7 +89,6 @@ type NoHeadAccessDatabase interface {
SaveBlocks(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock) error
SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
SlotByBlockRoot(context.Context, [32]byte) (primitives.Slot, error)
// State related methods.
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
SaveStates(ctx context.Context, states []state.ReadOnlyBeaconState, blockRoots [][32]byte) error
@@ -97,7 +96,6 @@ type NoHeadAccessDatabase interface {
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
SlotInDiffTree(primitives.Slot) (uint64, int, error)
// Checkpoint operations.
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error

View File

@@ -32,7 +32,6 @@ go_library(
"state_diff_helpers.go",
"state_summary.go",
"state_summary_cache.go",
"testing_helpers.go",
"utils.go",
"validated_checkpoint.go",
"wss.go",

View File

@@ -1053,10 +1053,6 @@ func (s *Store) getStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
return nil, err
}
if uint64(slot) < s.getOffset() {
return nil, ErrSlotBeforeOffset
}
st, err := s.stateByDiff(ctx, slot)
if err != nil {
return nil, err
@@ -1074,10 +1070,6 @@ func (s *Store) hasStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
return false, err
}
if uint64(slot) < s.getOffset() {
return false, ErrSlotBeforeOffset
}
stateLvl := computeLevel(s.getOffset(), slot)
return stateLvl != -1, nil
}

View File

@@ -23,16 +23,6 @@ const (
The data at level 0 is saved every 2**exponent[0] slots and always contains a full state snapshot that is used as a base for the delta saved at other levels.
*/
// SlotInDiffTree returns whether the given slot is a saving point in the diff tree.
// If it is, it also returns the offset and level in the tree.
func (s *Store) SlotInDiffTree(slot primitives.Slot) (uint64, int, error) {
offset := s.getOffset()
if uint64(slot) < offset {
return 0, -1, ErrSlotBeforeOffset
}
return offset, computeLevel(offset, slot), nil
}
// saveStateByDiff takes a state and decides between saving a full state snapshot or a diff.
func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconState) error {
_, span := trace.StartSpan(ctx, "BeaconDB.saveStateByDiff")
@@ -43,10 +33,13 @@ func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconStat
}
slot := st.Slot()
offset, lvl, err := s.SlotInDiffTree(slot)
if err != nil {
return errors.Wrap(err, "could not determine if slot is in diff tree")
offset := s.getOffset()
if uint64(slot) < offset {
return ErrSlotBeforeOffset
}
// Find the level to save the state.
lvl := computeLevel(offset, slot)
if lvl == -1 {
return nil
}

View File

@@ -25,7 +25,7 @@ func newStateDiffCache(s *Store) (*stateDiffCache, error) {
return bbolt.ErrBucketNotFound
}
offsetBytes := bucket.Get(offsetKey)
offsetBytes := bucket.Get([]byte("offset"))
if offsetBytes == nil {
return errors.New("state diff cache: offset not found")
}

View File

@@ -19,7 +19,7 @@ import (
var (
offsetKey = []byte("offset")
ErrSlotBeforeOffset = errors.New("slot is before state-diff root offset")
ErrSlotBeforeOffset = errors.New("slot is before root offset")
)
func makeKeyForStateDiffTree(level int, slot uint64) []byte {
@@ -73,9 +73,6 @@ func (s *Store) getAnchorState(offset uint64, lvl int, slot primitives.Slot) (an
// computeLevel computes the level in the diff tree. Returns -1 in case slot should not be in tree.
func computeLevel(offset uint64, slot primitives.Slot) int {
if uint64(slot) < offset {
return -1
}
rel := uint64(slot) - offset
for i, exp := range flags.Get().StateDiffExponents {
if exp < 2 || exp >= 64 {

View File

@@ -43,12 +43,8 @@ func TestStateDiff_ComputeLevel(t *testing.T) {
offset := db.getOffset()
// should be -1. slot < offset
lvl := computeLevel(10, primitives.Slot(9))
require.Equal(t, -1, lvl)
// 2 ** 21
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
lvl := computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
require.Equal(t, 0, lvl)
// 2 ** 21 * 3

View File

@@ -1395,23 +1395,6 @@ func TestStore_CanSaveRetrieveStateUsingStateDiff(t *testing.T) {
require.IsNil(t, readSt)
})
t.Run("slot before offset", func(t *testing.T) {
db := setupDB(t)
setDefaultStateDiffExponents()
err := setOffsetInDB(db, 10)
require.NoError(t, err)
r := bytesutil.ToBytes32([]byte{'A'})
ss := &ethpb.StateSummary{Slot: 9, Root: r[:]}
err = db.SaveStateSummary(t.Context(), ss)
require.NoError(t, err)
st, err := db.getStateUsingStateDiff(t.Context(), r)
require.ErrorIs(t, err, ErrSlotBeforeOffset)
require.IsNil(t, st)
})
t.Run("Full state snapshot", func(t *testing.T) {
t.Run("using state summary", func(t *testing.T) {
for v := range version.All() {
@@ -1644,21 +1627,4 @@ func TestStore_HasStateUsingStateDiff(t *testing.T) {
}
})
t.Run("slot before offset", func(t *testing.T) {
db := setupDB(t)
setDefaultStateDiffExponents()
err := setOffsetInDB(db, 10)
require.NoError(t, err)
r := bytesutil.ToBytes32([]byte{'B'})
ss := &ethpb.StateSummary{Slot: 0, Root: r[:]}
err = db.SaveStateSummary(t.Context(), ss)
require.NoError(t, err)
hasState, err := db.hasStateUsingStateDiff(t.Context(), r)
require.ErrorIs(t, err, ErrSlotBeforeOffset)
require.Equal(t, false, hasState)
})
}

View File

@@ -1,37 +0,0 @@
package kv
import (
"encoding/binary"
"testing"
"go.etcd.io/bbolt"
)
// InitStateDiffCacheForTesting initializes the state diff cache with the given offset.
// This is intended for testing purposes when setting up state diff after database creation.
// This file is only compiled when the "testing" build tag is set.
func (s *Store) InitStateDiffCacheForTesting(t testing.TB, offset uint64) error {
// First, set the offset in the database.
err := s.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
offsetBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(offsetBytes, offset)
return bucket.Put([]byte("offset"), offsetBytes)
})
if err != nil {
return err
}
// Then create the state diff cache.
sdCache, err := newStateDiffCache(s)
if err != nil {
return err
}
s.stateDiffCache = sdCache
return nil
}

View File

@@ -49,7 +49,6 @@ func (s *Service) prepareForkChoiceAtts() {
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
ticker.Done()
return
}
}

View File

@@ -7,7 +7,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//api/server/structs:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/httputil:go_default_library",

View File

@@ -9,7 +9,6 @@ import (
"strings"
"github.com/OffchainLabs/prysm/v7/api/server/structs"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v7/network/httputil"
@@ -182,16 +181,6 @@ func prepareConfigSpec() (map[string]any, error) {
data[tag] = convertValueForJSON(val, tag)
}
// Add Fulu preset values. These are compile-time constants from fieldparams,
// not runtime configs, but are required by the /eth/v1/config/spec API.
data["NUMBER_OF_COLUMNS"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.NumberOfColumns)), "NUMBER_OF_COLUMNS")
data["CELLS_PER_EXT_BLOB"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.NumberOfColumns)), "CELLS_PER_EXT_BLOB")
data["FIELD_ELEMENTS_PER_CELL"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.CellsPerBlob)), "FIELD_ELEMENTS_PER_CELL")
data["FIELD_ELEMENTS_PER_EXT_BLOB"] = convertValueForJSON(reflect.ValueOf(config.FieldElementsPerBlob*2), "FIELD_ELEMENTS_PER_EXT_BLOB")
data["KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH"] = convertValueForJSON(reflect.ValueOf(uint64(4)), "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH")
// UPDATE_TIMEOUT is derived from SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD
data["UPDATE_TIMEOUT"] = convertValueForJSON(reflect.ValueOf(uint64(config.SlotsPerEpoch)*uint64(config.EpochsPerSyncCommitteePeriod)), "UPDATE_TIMEOUT")
return data, nil
}

View File

@@ -132,7 +132,7 @@ func TestGetSpec(t *testing.T) {
config.MinSyncCommitteeParticipants = 71
config.ProposerReorgCutoffBPS = primitives.BP(121)
config.AttestationDueBPS = primitives.BP(122)
config.AggregateDueBPS = primitives.BP(123)
config.AggregrateDueBPS = primitives.BP(123)
config.ContributionDueBPS = primitives.BP(124)
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
config.TerminalBlockHashActivationEpoch = 72
@@ -168,10 +168,6 @@ func TestGetSpec(t *testing.T) {
config.BlobsidecarSubnetCount = 101
config.BlobsidecarSubnetCountElectra = 102
config.SyncMessageDueBPS = 103
config.BuilderWithdrawalPrefixByte = byte('b')
config.BuilderIndexSelfBuild = primitives.BuilderIndex(125)
config.BuilderPaymentThresholdNumerator = 104
config.BuilderPaymentThresholdDenominator = 105
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
@@ -194,9 +190,6 @@ func TestGetSpec(t *testing.T) {
var daap [4]byte
copy(daap[:], []byte{'0', '0', '0', '7'})
config.DomainAggregateAndProof = daap
var dbb [4]byte
copy(dbb[:], []byte{'0', '0', '0', '8'})
config.DomainBeaconBuilder = dbb
var dam [4]byte
copy(dam[:], []byte{'1', '0', '0', '0'})
config.DomainApplicationMask = dam
@@ -212,7 +205,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]any)
require.Equal(t, true, ok)
assert.Equal(t, 186, len(data))
assert.Equal(t, 175, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -426,14 +419,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x0a000000", v)
case "DOMAIN_APPLICATION_BUILDER":
assert.Equal(t, "0x00000001", v)
case "DOMAIN_BEACON_BUILDER":
assert.Equal(t, "0x30303038", v)
case "DOMAIN_BLOB_SIDECAR":
assert.Equal(t, "0x00000000", v)
case "BUILDER_WITHDRAWAL_PREFIX":
assert.Equal(t, "0x62", v)
case "BUILDER_INDEX_SELF_BUILD":
assert.Equal(t, "125", v)
case "TRANSITION_TOTAL_DIFFICULTY":
assert.Equal(t, "0", v)
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
@@ -470,7 +457,7 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "121", v)
case "ATTESTATION_DUE_BPS":
assert.Equal(t, "122", v)
case "AGGREGATE_DUE_BPS":
case "AGGREGRATE_DUE_BPS":
assert.Equal(t, "123", v)
case "CONTRIBUTION_DUE_BPS":
assert.Equal(t, "124", v)
@@ -590,26 +577,10 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "102", v)
case "SYNC_MESSAGE_DUE_BPS":
assert.Equal(t, "103", v)
case "BUILDER_PAYMENT_THRESHOLD_NUMERATOR":
assert.Equal(t, "104", v)
case "BUILDER_PAYMENT_THRESHOLD_DENOMINATOR":
assert.Equal(t, "105", v)
case "BLOB_SCHEDULE":
blobSchedule, ok := v.([]any)
assert.Equal(t, true, ok)
assert.Equal(t, 2, len(blobSchedule))
case "FIELD_ELEMENTS_PER_CELL":
assert.Equal(t, "64", v) // From fieldparams.CellsPerBlob
case "FIELD_ELEMENTS_PER_EXT_BLOB":
assert.Equal(t, "198", v) // FieldElementsPerBlob (99) * 2
case "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH":
assert.Equal(t, "4", v) // Preset value
case "CELLS_PER_EXT_BLOB":
assert.Equal(t, "128", v) // From fieldparams.NumberOfColumns
case "NUMBER_OF_COLUMNS":
assert.Equal(t, "128", v) // From fieldparams.NumberOfColumns
case "UPDATE_TIMEOUT":
assert.Equal(t, "1782", v) // SlotsPerEpoch (27) * EpochsPerSyncCommitteePeriod (66)
default:
t.Errorf("Incorrect key: %s", k)
}

View File

@@ -602,7 +602,7 @@ func (vs *Server) GetFeeRecipientByPubKey(ctx context.Context, request *ethpb.Fe
// computeStateRoot computes the state root after a block has been processed through a state transition and
// returns it to the validator client.
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedBeaconBlock) ([]byte, error) {
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
beaconState, err := vs.StateGen.StateByRoot(ctx, block.Block().ParentRoot())
if err != nil {
return nil, errors.Wrap(err, "could not retrieve beacon state")
@@ -613,72 +613,13 @@ func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedB
block,
)
if err != nil {
return vs.handleStateRootError(ctx, block, err)
return nil, errors.Wrapf(err, "could not calculate state root at slot %d", beaconState.Slot())
}
log.WithField("beaconStateRoot", fmt.Sprintf("%#x", root)).Debugf("Computed state root")
return root[:], nil
}
type computeStateRootAttemptsKeyType string
const computeStateRootAttemptsKey = computeStateRootAttemptsKeyType("compute-state-root-attempts")
const maxComputeStateRootAttempts = 3
// handleStateRootError retries block construction in some error cases.
func (vs *Server) handleStateRootError(ctx context.Context, block interfaces.SignedBeaconBlock, err error) ([]byte, error) {
if ctx.Err() != nil {
return nil, status.Errorf(codes.Canceled, "context error: %v", ctx.Err())
}
switch {
case errors.Is(err, transition.ErrAttestationsSignatureInvalid),
errors.Is(err, transition.ErrProcessAttestationsFailed):
log.WithError(err).Warn("Retrying block construction without attestations")
if err := block.SetAttestations([]ethpb.Att{}); err != nil {
return nil, errors.Wrap(err, "could not set attestations")
}
case errors.Is(err, transition.ErrProcessBLSChangesFailed), errors.Is(err, transition.ErrBLSToExecutionChangesSignatureInvalid):
log.WithError(err).Warn("Retrying block construction without BLS to execution changes")
if err := block.SetBLSToExecutionChanges([]*ethpb.SignedBLSToExecutionChange{}); err != nil {
return nil, errors.Wrap(err, "could not set BLS to execution changes")
}
case errors.Is(err, transition.ErrProcessProposerSlashingsFailed):
log.WithError(err).Warn("Retrying block construction without proposer slashings")
block.SetProposerSlashings([]*ethpb.ProposerSlashing{})
case errors.Is(err, transition.ErrProcessAttesterSlashingsFailed):
log.WithError(err).Warn("Retrying block construction without attester slashings")
if err := block.SetAttesterSlashings([]ethpb.AttSlashing{}); err != nil {
return nil, errors.Wrap(err, "could not set attester slashings")
}
case errors.Is(err, transition.ErrProcessVoluntaryExitsFailed):
log.WithError(err).Warn("Retrying block construction without voluntary exits")
block.SetVoluntaryExits([]*ethpb.SignedVoluntaryExit{})
case errors.Is(err, transition.ErrProcessSyncAggregateFailed):
log.WithError(err).Warn("Retrying block construction without sync aggregate")
emptySig := [96]byte{0xC0}
emptyAggregate := &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize/8),
SyncCommitteeSignature: emptySig[:],
}
if err := block.SetSyncAggregate(emptyAggregate); err != nil {
log.WithError(err).Error("Could not set sync aggregate")
}
default:
return nil, errors.Wrap(err, "could not compute state root")
}
// prevent deep recursion by limiting max attempts.
if v, ok := ctx.Value(computeStateRootAttemptsKey).(int); !ok {
ctx = context.WithValue(ctx, computeStateRootAttemptsKey, int(1))
} else if v >= maxComputeStateRootAttempts {
return nil, fmt.Errorf("attempted max compute state root attempts %d", maxComputeStateRootAttempts)
} else {
ctx = context.WithValue(ctx, computeStateRootAttemptsKey, v+1)
}
// recursive call to compute state root again
return vs.computeStateRoot(ctx, block)
}
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
//
// SubmitValidatorRegistrations submits validator registrations.

View File

@@ -1313,59 +1313,6 @@ func TestProposer_ComputeStateRoot_OK(t *testing.T) {
require.NoError(t, err)
}
func TestHandleStateRootError_MaxAttemptsReached(t *testing.T) {
// Test that handleStateRootError returns an error when max attempts is reached
// instead of recursing infinitely.
ctx := t.Context()
vs := &Server{}
// Create a minimal block for testing
blk := util.NewBeaconBlock()
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// Pre-seed the context with max attempts already reached
ctx = context.WithValue(ctx, computeStateRootAttemptsKey, maxComputeStateRootAttempts)
// Call handleStateRootError with a retryable error
_, err = vs.handleStateRootError(ctx, wsb, transition.ErrAttestationsSignatureInvalid)
// Should return an error about max attempts instead of recursing
require.ErrorContains(t, "attempted max compute state root attempts", err)
}
func TestHandleStateRootError_IncrementsAttempts(t *testing.T) {
// Test that handleStateRootError properly increments the attempts counter
// and eventually fails after max attempts.
db := dbutil.SetupDB(t)
ctx := t.Context()
beaconState, parentRoot, _ := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 100)
stateGen := stategen.New(db, doublylinkedtree.New())
vs := &Server{
StateGen: stateGen,
}
// Create a block that will trigger retries
blk := util.NewBeaconBlock()
blk.Block.ParentRoot = parentRoot[:]
blk.Block.Slot = 1
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// Add a state for the parent root so StateByRoot succeeds
require.NoError(t, stateGen.SaveState(ctx, parentRoot, beaconState))
// Call handleStateRootError with a retryable error - it will recurse
// but eventually hit the max attempts limit since CalculateStateRoot
// will keep failing (no valid attestations, randao, etc.)
_, err = vs.handleStateRootError(ctx, wsb, transition.ErrAttestationsSignatureInvalid)
// Should eventually fail - either with max attempts or another error
require.NotNil(t, err)
}
func TestProposer_PendingDeposits_Eth1DataVoteOK(t *testing.T) {
ctx := t.Context()

View File

@@ -5,14 +5,12 @@ go_library(
srcs = [
"error.go",
"interfaces.go",
"interfaces_gloas.go",
"prometheus.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/state",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/state/state-native/custom-types:go_default_library",
"//beacon-chain/state/state-native/types:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
"github.com/OffchainLabs/go-bitfield"
customtypes "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/custom-types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -44,8 +43,6 @@ type Prover interface {
FinalizedRootProof(ctx context.Context) ([][]byte, error)
CurrentSyncCommitteeProof(ctx context.Context) ([][]byte, error)
NextSyncCommitteeProof(ctx context.Context) ([][]byte, error)
ProofByFieldIndex(ctx context.Context, f types.FieldIndex) ([][]byte, error)
}
// ReadOnlyBeaconState defines a struct which only has read access to beacon state methods.
@@ -66,7 +63,6 @@ type ReadOnlyBeaconState interface {
ReadOnlyDeposits
ReadOnlyConsolidations
ReadOnlyProposerLookahead
readOnlyGloasFields
ToProtoUnsafe() any
ToProto() any
GenesisTime() time.Time
@@ -102,7 +98,6 @@ type WriteOnlyBeaconState interface {
WriteOnlyWithdrawals
WriteOnlyDeposits
WriteOnlyProposerLookahead
writeOnlyGloasFields
SetGenesisTime(val time.Time) error
SetGenesisValidatorsRoot(val []byte) error
SetSlot(val primitives.Slot) error

View File

@@ -1,24 +0,0 @@
package state
import (
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
type writeOnlyGloasFields interface {
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
ClearBuilderPendingPayment(index primitives.Slot) error
RotateBuilderPendingPayments() error
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
}
type readOnlyGloasFields interface {
BuilderPubkey(primitives.BuilderIndex) ([48]byte, error)
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
LatestBlockHash() ([32]byte, error)
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
}

View File

@@ -14,7 +14,6 @@ go_library(
"getters_deposits.go",
"getters_eth1.go",
"getters_exit.go",
"getters_gloas.go",
"getters_misc.go",
"getters_participation.go",
"getters_payload_header.go",
@@ -37,7 +36,6 @@ go_library(
"setters_deposit_requests.go",
"setters_deposits.go",
"setters_eth1.go",
"setters_gloas.go",
"setters_misc.go",
"setters_participation.go",
"setters_payload_header.go",
@@ -99,7 +97,6 @@ go_test(
"getters_deposit_requests_test.go",
"getters_deposits_test.go",
"getters_exit_test.go",
"getters_gloas_test.go",
"getters_participation_test.go",
"getters_setters_lookahead_test.go",
"getters_test.go",
@@ -117,7 +114,6 @@ go_test(
"setters_deposit_requests_test.go",
"setters_deposits_test.go",
"setters_eth1_test.go",
"setters_gloas_test.go",
"setters_misc_test.go",
"setters_participation_test.go",
"setters_payload_header_test.go",

View File

@@ -1,149 +0,0 @@
package state_native
import (
"fmt"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
)
// LatestBlockHash returns the hash of the latest execution block.
func (b *BeaconState) LatestBlockHash() ([32]byte, error) {
if b.version < version.Gloas {
return [32]byte{}, errNotSupported("LatestBlockHash", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
if b.latestBlockHash == nil {
return [32]byte{}, nil
}
return [32]byte(b.latestBlockHash), nil
}
// BuilderPubkey returns the builder pubkey at the provided index.
func (b *BeaconState) BuilderPubkey(builderIndex primitives.BuilderIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
if b.version < version.Gloas {
return [fieldparams.BLSPubkeyLength]byte{}, errNotSupported("BuilderPubkey", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
builder, err := b.builderAtIndex(builderIndex)
if err != nil {
return [fieldparams.BLSPubkeyLength]byte{}, err
}
var pk [fieldparams.BLSPubkeyLength]byte
copy(pk[:], builder.Pubkey)
return pk, nil
}
// IsActiveBuilder returns true if the builder placement is finalized and it has not initiated exit.
// Spec v1.7.0-alpha.0 (pseudocode):
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
//
// builder = state.builders[builder_index]
// return (
// builder.deposit_epoch < state.finalized_checkpoint.epoch
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
// )
func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("IsActiveBuilder", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
builder, err := b.builderAtIndex(builderIndex)
if err != nil {
return false, err
}
finalizedEpoch := b.finalizedCheckpoint.Epoch
return builder.DepositEpoch < finalizedEpoch && builder.WithdrawableEpoch == params.BeaconConfig().FarFutureEpoch, nil
}
// CanBuilderCoverBid returns true if the builder has enough balance to cover the given bid amount.
// Spec v1.7.0-alpha.0 (pseudocode):
// def can_builder_cover_bid(state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei) -> bool:
//
// builder_balance = state.builders[builder_index].balance
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
// if builder_balance < min_balance:
// return False
// return builder_balance - min_balance >= bid_amount
func (b *BeaconState) CanBuilderCoverBid(builderIndex primitives.BuilderIndex, bidAmount primitives.Gwei) (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("CanBuilderCoverBid", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
builder, err := b.builderAtIndex(builderIndex)
if err != nil {
return false, err
}
pendingBalanceToWithdraw := b.builderPendingBalanceToWithdraw(builderIndex)
minBalance := params.BeaconConfig().MinDepositAmount + pendingBalanceToWithdraw
balance := uint64(builder.Balance)
if balance < minBalance {
return false, nil
}
return balance-minBalance >= uint64(bidAmount), nil
}
// builderAtIndex intentionally returns the underlying pointer without copying.
func (b *BeaconState) builderAtIndex(builderIndex primitives.BuilderIndex) (*ethpb.Builder, error) {
idx := uint64(builderIndex)
if idx >= uint64(len(b.builders)) {
return nil, fmt.Errorf("builder index %d out of range (len=%d)", builderIndex, len(b.builders))
}
builder := b.builders[idx]
if builder == nil {
return nil, fmt.Errorf("builder at index %d is nil", builderIndex)
}
return builder, nil
}
// builderPendingBalanceToWithdraw mirrors get_pending_balance_to_withdraw_for_builder in the spec,
// summing both pending withdrawals and pending payments for a builder.
func (b *BeaconState) builderPendingBalanceToWithdraw(builderIndex primitives.BuilderIndex) uint64 {
var total uint64
for _, withdrawal := range b.builderPendingWithdrawals {
if withdrawal.BuilderIndex == builderIndex {
total += uint64(withdrawal.Amount)
}
}
for _, payment := range b.builderPendingPayments {
if payment.Withdrawal.BuilderIndex == builderIndex {
total += uint64(payment.Withdrawal.Amount)
}
}
return total
}
// BuilderPendingPayments returns a copy of the builder pending payments.
func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
if b.version < version.Gloas {
return nil, errNotSupported("BuilderPendingPayments", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
return b.builderPendingPaymentsVal(), nil
}

View File

@@ -1,168 +0,0 @@
package state_native_test
import (
"bytes"
"testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func TestLatestBlockHash(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
st, _ := util.DeterministicGenesisState(t, 1)
_, err := st.LatestBlockHash()
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns zero hash when unset", func(t *testing.T) {
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{})
require.NoError(t, err)
got, err := st.LatestBlockHash()
require.NoError(t, err)
require.Equal(t, [32]byte{}, got)
})
t.Run("returns configured hash", func(t *testing.T) {
hashBytes := bytes.Repeat([]byte{0xAB}, 32)
var want [32]byte
copy(want[:], hashBytes)
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
LatestBlockHash: hashBytes,
})
require.NoError(t, err)
got, err := st.LatestBlockHash()
require.NoError(t, err)
require.Equal(t, want, got)
})
}
func TestBuilderPubkey(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
stIface, _ := util.DeterministicGenesisState(t, 1)
native, ok := stIface.(*state_native.BeaconState)
require.Equal(t, true, ok)
_, err := native.BuilderPubkey(0)
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns pubkey copy", func(t *testing.T) {
pubkey := bytes.Repeat([]byte{0xAA}, 48)
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Pubkey: pubkey,
Balance: 42,
DepositEpoch: 3,
WithdrawableEpoch: 4,
},
},
})
require.NoError(t, err)
gotPk, err := stIface.BuilderPubkey(0)
require.NoError(t, err)
var wantPk [48]byte
copy(wantPk[:], pubkey)
require.Equal(t, wantPk, gotPk)
// Mutate original to ensure copy.
pubkey[0] = 0
require.Equal(t, byte(0xAA), gotPk[0])
})
t.Run("out of range returns error", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{},
})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
_, err = st.BuilderPubkey(1)
require.ErrorContains(t, "out of range", err)
})
}
func TestBuilderHelpers(t *testing.T) {
t.Run("is active builder", func(t *testing.T) {
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Balance: 10,
DepositEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
},
},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1},
})
require.NoError(t, err)
active, err := st.IsActiveBuilder(0)
require.NoError(t, err)
require.Equal(t, true, active)
// Not active when withdrawable epoch is set.
stProto := &ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Balance: 10,
DepositEpoch: 0,
WithdrawableEpoch: 1,
},
},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 2},
}
stInactive, err := state_native.InitializeFromProtoGloas(stProto)
require.NoError(t, err)
active, err = stInactive.IsActiveBuilder(0)
require.NoError(t, err)
require.Equal(t, false, active)
})
t.Run("can builder cover bid", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Balance: primitives.Gwei(params.BeaconConfig().MinDepositAmount + 50),
DepositEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
},
},
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{Amount: 10, BuilderIndex: 0},
},
BuilderPendingPayments: []*ethpb.BuilderPendingPayment{
{Withdrawal: &ethpb.BuilderPendingWithdrawal{Amount: 15, BuilderIndex: 0}},
},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1},
})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
ok, err := st.CanBuilderCoverBid(0, 20)
require.NoError(t, err)
require.Equal(t, true, ok)
ok, err = st.CanBuilderCoverBid(0, 30)
require.NoError(t, err)
require.Equal(t, false, ok)
})
}
func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
stIface, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
_, err = st.BuilderPendingPayments()
require.ErrorContains(t, "BuilderPendingPayments", err)
}

View File

@@ -725,13 +725,3 @@ func ProtobufBeaconStateFulu(s any) (*ethpb.BeaconStateFulu, error) {
}
return pbState, nil
}
// ProtobufBeaconStateGloas transforms an input into beacon state Gloas in the form of protobuf.
// Error is returned if the input is not type protobuf beacon state.
func ProtobufBeaconStateGloas(s any) (*ethpb.BeaconStateGloas, error) {
pbState, ok := s.(*ethpb.BeaconStateGloas)
if !ok {
return nil, errors.New("input is not type pb.BeaconStateGloas")
}
return pbState, nil
}

View File

@@ -5,7 +5,6 @@ import (
"encoding/binary"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/container/trie"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/runtime/version"
@@ -40,51 +39,33 @@ func (b *BeaconState) NextSyncCommitteeGeneralizedIndex() (uint64, error) {
// CurrentSyncCommitteeProof from the state's Merkle trie representation.
func (b *BeaconState) CurrentSyncCommitteeProof(ctx context.Context) ([][]byte, error) {
return b.ProofByFieldIndex(ctx, types.CurrentSyncCommittee)
b.lock.Lock()
defer b.lock.Unlock()
if b.version == version.Phase0 {
return nil, errNotSupported("CurrentSyncCommitteeProof", b.version)
}
// In case the Merkle layers of the trie are not populated, we need
// to perform some initialization.
if err := b.initializeMerkleLayers(ctx); err != nil {
return nil, err
}
// Our beacon state uses a "dirty" fields pattern which requires us to
// recompute branches of the Merkle layers that are marked as dirty.
if err := b.recomputeDirtyFields(ctx); err != nil {
return nil, err
}
return trie.ProofFromMerkleLayers(b.merkleLayers, types.CurrentSyncCommittee.RealPosition()), nil
}
// NextSyncCommitteeProof from the state's Merkle trie representation.
func (b *BeaconState) NextSyncCommitteeProof(ctx context.Context) ([][]byte, error) {
return b.ProofByFieldIndex(ctx, types.NextSyncCommittee)
}
// FinalizedRootProof crafts a Merkle proof for the finalized root
// contained within the finalized checkpoint of a beacon state.
func (b *BeaconState) FinalizedRootProof(ctx context.Context) ([][]byte, error) {
b.lock.Lock()
defer b.lock.Unlock()
branchProof, err := b.proofByFieldIndex(ctx, types.FinalizedCheckpoint)
if err != nil {
return nil, err
}
// The epoch field of a finalized checkpoint is the neighbor
// index of the finalized root field in its Merkle tree representation
// of the checkpoint. This neighbor is the first element added to the proof.
epochBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(epochBuf, uint64(b.finalizedCheckpointVal().Epoch))
epochRoot := bytesutil.ToBytes32(epochBuf)
proof := make([][]byte, 0)
proof = append(proof, epochRoot[:])
proof = append(proof, branchProof...)
return proof, nil
}
// ProofByFieldIndex constructs proofs for given field index with lock acquisition.
func (b *BeaconState) ProofByFieldIndex(ctx context.Context, f types.FieldIndex) ([][]byte, error) {
b.lock.Lock()
defer b.lock.Unlock()
return b.proofByFieldIndex(ctx, f)
}
// proofByFieldIndex constructs proofs for given field index.
// Important: it is assumed that beacon state mutex is locked when calling this method.
func (b *BeaconState) proofByFieldIndex(ctx context.Context, f types.FieldIndex) ([][]byte, error) {
err := b.validateFieldIndex(f)
if err != nil {
return nil, err
if b.version == version.Phase0 {
return nil, errNotSupported("NextSyncCommitteeProof", b.version)
}
if err := b.initializeMerkleLayers(ctx); err != nil {
@@ -93,40 +74,35 @@ func (b *BeaconState) proofByFieldIndex(ctx context.Context, f types.FieldIndex)
if err := b.recomputeDirtyFields(ctx); err != nil {
return nil, err
}
return trie.ProofFromMerkleLayers(b.merkleLayers, f.RealPosition()), nil
return trie.ProofFromMerkleLayers(b.merkleLayers, types.NextSyncCommittee.RealPosition()), nil
}
func (b *BeaconState) validateFieldIndex(f types.FieldIndex) error {
switch b.version {
case version.Phase0:
if f.RealPosition() > params.BeaconConfig().BeaconStateFieldCount-1 {
return errNotSupported(f.String(), b.version)
}
case version.Altair:
if f.RealPosition() > params.BeaconConfig().BeaconStateAltairFieldCount-1 {
return errNotSupported(f.String(), b.version)
}
case version.Bellatrix:
if f.RealPosition() > params.BeaconConfig().BeaconStateBellatrixFieldCount-1 {
return errNotSupported(f.String(), b.version)
}
case version.Capella:
if f.RealPosition() > params.BeaconConfig().BeaconStateCapellaFieldCount-1 {
return errNotSupported(f.String(), b.version)
}
case version.Deneb:
if f.RealPosition() > params.BeaconConfig().BeaconStateDenebFieldCount-1 {
return errNotSupported(f.String(), b.version)
}
case version.Electra:
if f.RealPosition() > params.BeaconConfig().BeaconStateElectraFieldCount-1 {
return errNotSupported(f.String(), b.version)
}
case version.Fulu:
if f.RealPosition() > params.BeaconConfig().BeaconStateFuluFieldCount-1 {
return errNotSupported(f.String(), b.version)
}
// FinalizedRootProof crafts a Merkle proof for the finalized root
// contained within the finalized checkpoint of a beacon state.
func (b *BeaconState) FinalizedRootProof(ctx context.Context) ([][]byte, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.version == version.Phase0 {
return nil, errNotSupported("FinalizedRootProof", b.version)
}
return nil
if err := b.initializeMerkleLayers(ctx); err != nil {
return nil, err
}
if err := b.recomputeDirtyFields(ctx); err != nil {
return nil, err
}
cpt := b.finalizedCheckpointVal()
// The epoch field of a finalized checkpoint is the neighbor
// index of the finalized root field in its Merkle tree representation
// of the checkpoint. This neighbor is the first element added to the proof.
epochBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(epochBuf, uint64(cpt.Epoch))
epochRoot := bytesutil.ToBytes32(epochBuf)
proof := make([][]byte, 0)
proof = append(proof, epochRoot[:])
branch := trie.ProofFromMerkleLayers(b.merkleLayers, types.FinalizedCheckpoint.RealPosition())
proof = append(proof, branch...)
return proof, nil
}

View File

@@ -21,6 +21,10 @@ func TestBeaconStateMerkleProofs_phase0_notsupported(t *testing.T) {
_, err := st.NextSyncCommitteeProof(ctx)
require.ErrorContains(t, "not supported", err)
})
t.Run("finalized root", func(t *testing.T) {
_, err := st.FinalizedRootProof(ctx)
require.ErrorContains(t, "not supported", err)
})
}
func TestBeaconStateMerkleProofs_altair(t *testing.T) {
ctx := t.Context()

View File

@@ -1,163 +0,0 @@
package state_native
import (
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
)
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
// front and appending slots per epoch empty payments to the end.
// This implements: state.builder_pending_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:] + [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
func (b *BeaconState) RotateBuilderPendingPayments() error {
if b.version < version.Gloas {
return errNotSupported("RotateBuilderPendingPayments", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
copy(b.builderPendingPayments[:slotsPerEpoch], b.builderPendingPayments[slotsPerEpoch:2*slotsPerEpoch])
for i := slotsPerEpoch; i < primitives.Slot(len(b.builderPendingPayments)); i++ {
b.builderPendingPayments[i] = emptyBuilderPendingPayment
}
b.markFieldAsDirty(types.BuilderPendingPayments)
b.rebuildTrie[types.BuilderPendingPayments] = true
return nil
}
// emptyBuilderPendingPayment is a shared zero-value payment used to clear entries.
var emptyBuilderPendingPayment = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
},
}
// AppendBuilderPendingWithdrawals appends builder pending withdrawals to the beacon state.
// If the withdrawals slice is shared, it copies the slice first to preserve references.
func (b *BeaconState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
if b.version < version.Gloas {
return errNotSupported("AppendBuilderPendingWithdrawals", b.version)
}
if len(withdrawals) == 0 {
return nil
}
b.lock.Lock()
defer b.lock.Unlock()
pendingWithdrawals := b.builderPendingWithdrawals
if b.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs() > 1 {
pendingWithdrawals = make([]*ethpb.BuilderPendingWithdrawal, 0, len(b.builderPendingWithdrawals)+len(withdrawals))
pendingWithdrawals = append(pendingWithdrawals, b.builderPendingWithdrawals...)
b.sharedFieldReferences[types.BuilderPendingWithdrawals].MinusRef()
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1)
}
b.builderPendingWithdrawals = append(pendingWithdrawals, withdrawals...)
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
return nil
}
// SetExecutionPayloadBid sets the latest execution payload bid in the state.
func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error {
if b.version < version.Gloas {
return errNotSupported("SetExecutionPayloadBid", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
parentBlockHash := h.ParentBlockHash()
parentBlockRoot := h.ParentBlockRoot()
blockHash := h.BlockHash()
randao := h.PrevRandao()
blobKzgCommitmentsRoot := h.BlobKzgCommitmentsRoot()
feeRecipient := h.FeeRecipient()
b.latestExecutionPayloadBid = &ethpb.ExecutionPayloadBid{
ParentBlockHash: parentBlockHash[:],
ParentBlockRoot: parentBlockRoot[:],
BlockHash: blockHash[:],
PrevRandao: randao[:],
GasLimit: h.GasLimit(),
BuilderIndex: h.BuilderIndex(),
Slot: h.Slot(),
Value: h.Value(),
ExecutionPayment: h.ExecutionPayment(),
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot[:],
FeeRecipient: feeRecipient[:],
}
b.markFieldAsDirty(types.LatestExecutionPayloadBid)
return nil
}
// ClearBuilderPendingPayment clears a builder pending payment at the specified index.
func (b *BeaconState) ClearBuilderPendingPayment(index primitives.Slot) error {
if b.version < version.Gloas {
return errNotSupported("ClearBuilderPendingPayment", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
if uint64(index) >= uint64(len(b.builderPendingPayments)) {
return fmt.Errorf("builder pending payments index %d out of range (len=%d)", index, len(b.builderPendingPayments))
}
b.builderPendingPayments[index] = emptyBuilderPendingPayment
b.markFieldAsDirty(types.BuilderPendingPayments)
return nil
}
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
if b.version < version.Gloas {
return errNotSupported("SetBuilderPendingPayment", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
if uint64(index) >= uint64(len(b.builderPendingPayments)) {
return fmt.Errorf("builder pending payments index %d out of range (len=%d)", index, len(b.builderPendingPayments))
}
b.builderPendingPayments[index] = ethpb.CopyBuilderPendingPayment(payment)
b.markFieldAsDirty(types.BuilderPendingPayments)
return nil
}
// UpdateExecutionPayloadAvailabilityAtIndex updates the execution payload availability bit at a specific index.
func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error {
b.lock.Lock()
defer b.lock.Unlock()
byteIndex := idx / 8
bitIndex := idx % 8
if byteIndex >= uint64(len(b.executionPayloadAvailability)) {
return fmt.Errorf("bit index %d (byte index %d) out of range for execution payload availability length %d", idx, byteIndex, len(b.executionPayloadAvailability))
}
if val != 0 {
b.executionPayloadAvailability[byteIndex] |= (1 << bitIndex)
} else {
b.executionPayloadAvailability[byteIndex] &^= (1 << bitIndex)
}
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
return nil
}

View File

@@ -1,330 +0,0 @@
package state_native
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
type testExecutionPayloadBid struct {
parentBlockHash [32]byte
parentBlockRoot [32]byte
blockHash [32]byte
prevRandao [32]byte
blobKzgCommitmentsRoot [32]byte
feeRecipient [20]byte
gasLimit uint64
builderIndex primitives.BuilderIndex
slot primitives.Slot
value primitives.Gwei
executionPayment primitives.Gwei
}
func (t testExecutionPayloadBid) ParentBlockHash() [32]byte { return t.parentBlockHash }
func (t testExecutionPayloadBid) ParentBlockRoot() [32]byte { return t.parentBlockRoot }
func (t testExecutionPayloadBid) PrevRandao() [32]byte { return t.prevRandao }
func (t testExecutionPayloadBid) BlockHash() [32]byte { return t.blockHash }
func (t testExecutionPayloadBid) GasLimit() uint64 { return t.gasLimit }
func (t testExecutionPayloadBid) BuilderIndex() primitives.BuilderIndex {
return t.builderIndex
}
func (t testExecutionPayloadBid) Slot() primitives.Slot { return t.slot }
func (t testExecutionPayloadBid) Value() primitives.Gwei { return t.value }
func (t testExecutionPayloadBid) ExecutionPayment() primitives.Gwei {
return t.executionPayment
}
func (t testExecutionPayloadBid) BlobKzgCommitmentsRoot() [32]byte { return t.blobKzgCommitmentsRoot }
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
func (t testExecutionPayloadBid) IsNil() bool { return false }
func TestSetExecutionPayloadBid(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.SetExecutionPayloadBid(testExecutionPayloadBid{})
require.ErrorContains(t, "is not supported", err)
})
t.Run("sets bid and marks dirty", func(t *testing.T) {
var (
parentBlockHash = [32]byte(bytes.Repeat([]byte{0xAB}, 32))
parentBlockRoot = [32]byte(bytes.Repeat([]byte{0xCD}, 32))
blockHash = [32]byte(bytes.Repeat([]byte{0xEF}, 32))
prevRandao = [32]byte(bytes.Repeat([]byte{0x11}, 32))
blobRoot = [32]byte(bytes.Repeat([]byte{0x22}, 32))
feeRecipient [20]byte
)
copy(feeRecipient[:], bytes.Repeat([]byte{0x33}, len(feeRecipient)))
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
}
bid := testExecutionPayloadBid{
parentBlockHash: parentBlockHash,
parentBlockRoot: parentBlockRoot,
blockHash: blockHash,
prevRandao: prevRandao,
blobKzgCommitmentsRoot: blobRoot,
feeRecipient: feeRecipient,
gasLimit: 123,
builderIndex: 7,
slot: 9,
value: 11,
executionPayment: 22,
}
require.NoError(t, st.SetExecutionPayloadBid(bid))
require.NotNil(t, st.latestExecutionPayloadBid)
require.DeepEqual(t, parentBlockHash[:], st.latestExecutionPayloadBid.ParentBlockHash)
require.DeepEqual(t, parentBlockRoot[:], st.latestExecutionPayloadBid.ParentBlockRoot)
require.DeepEqual(t, blockHash[:], st.latestExecutionPayloadBid.BlockHash)
require.DeepEqual(t, prevRandao[:], st.latestExecutionPayloadBid.PrevRandao)
require.DeepEqual(t, blobRoot[:], st.latestExecutionPayloadBid.BlobKzgCommitmentsRoot)
require.DeepEqual(t, feeRecipient[:], st.latestExecutionPayloadBid.FeeRecipient)
require.Equal(t, uint64(123), st.latestExecutionPayloadBid.GasLimit)
require.Equal(t, primitives.BuilderIndex(7), st.latestExecutionPayloadBid.BuilderIndex)
require.Equal(t, primitives.Slot(9), st.latestExecutionPayloadBid.Slot)
require.Equal(t, primitives.Gwei(11), st.latestExecutionPayloadBid.Value)
require.Equal(t, primitives.Gwei(22), st.latestExecutionPayloadBid.ExecutionPayment)
require.Equal(t, true, st.dirtyFields[types.LatestExecutionPayloadBid])
})
}
func TestSetBuilderPendingPayment(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.SetBuilderPendingPayment(0, &ethpb.BuilderPendingPayment{})
require.ErrorContains(t, "is not supported", err)
})
t.Run("sets copy and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 2),
}
payment := &ethpb.BuilderPendingPayment{
Weight: 2,
Withdrawal: &ethpb.BuilderPendingWithdrawal{
Amount: 99,
BuilderIndex: 1,
},
}
require.NoError(t, st.SetBuilderPendingPayment(1, payment))
require.DeepEqual(t, payment, st.builderPendingPayments[1])
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
// Mutating the original should not affect the state copy.
payment.Withdrawal.Amount = 12345
require.Equal(t, primitives.Gwei(99), st.builderPendingPayments[1].Withdrawal.Amount)
})
t.Run("returns error on out of range index", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 1),
}
err := st.SetBuilderPendingPayment(2, &ethpb.BuilderPendingPayment{})
require.ErrorContains(t, "out of range", err)
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
})
}
func TestClearBuilderPendingPayment(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.ClearBuilderPendingPayment(0)
require.ErrorContains(t, "is not supported", err)
})
t.Run("clears and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 2),
}
st.builderPendingPayments[1] = &ethpb.BuilderPendingPayment{
Weight: 2,
Withdrawal: &ethpb.BuilderPendingWithdrawal{
Amount: 99,
BuilderIndex: 1,
},
}
require.NoError(t, st.ClearBuilderPendingPayment(1))
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[1])
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
})
t.Run("returns error on out of range index", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 1),
}
err := st.ClearBuilderPendingPayment(2)
require.ErrorContains(t, "out of range", err)
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
})
}
func TestRotateBuilderPendingPayments(t *testing.T) {
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
for i := range payments {
idx := uint64(i)
payments[i] = &ethpb.BuilderPendingPayment{
Weight: primitives.Gwei(idx * 100e9),
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
Amount: primitives.Gwei(idx * 1e9),
BuilderIndex: primitives.BuilderIndex(idx + 100),
},
}
}
statePb, err := InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
BuilderPendingPayments: payments,
})
require.NoError(t, err)
st, ok := statePb.(*BeaconState)
require.Equal(t, true, ok)
oldPayments, err := st.BuilderPendingPayments()
require.NoError(t, err)
require.NoError(t, st.RotateBuilderPendingPayments())
newPayments, err := st.BuilderPendingPayments()
require.NoError(t, err)
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
for i := range slotsPerEpoch {
require.DeepEqual(t, oldPayments[slotsPerEpoch+i], newPayments[i])
}
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
payment := newPayments[i]
require.Equal(t, primitives.Gwei(0), payment.Weight)
require.Equal(t, 20, len(payment.Withdrawal.FeeRecipient))
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
require.Equal(t, primitives.BuilderIndex(0), payment.Withdrawal.BuilderIndex)
}
}
func TestRotateBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
st := &BeaconState{version: version.Electra}
err := st.RotateBuilderPendingPayments()
require.ErrorContains(t, "RotateBuilderPendingPayments", err)
}
func TestAppendBuilderPendingWithdrawal_CopyOnWrite(t *testing.T) {
wd := &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
Amount: 1,
BuilderIndex: 2,
}
statePb, err := InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{wd},
})
require.NoError(t, err)
st, ok := statePb.(*BeaconState)
require.Equal(t, true, ok)
copied := st.Copy().(*BeaconState)
require.Equal(t, uint(2), st.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
appended := &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
Amount: 4,
BuilderIndex: 5,
}
require.NoError(t, copied.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{appended}))
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.Equal(t, 2, len(copied.builderPendingWithdrawals))
require.DeepEqual(t, wd, copied.builderPendingWithdrawals[0])
require.DeepEqual(t, appended, copied.builderPendingWithdrawals[1])
require.DeepEqual(t, wd, st.builderPendingWithdrawals[0])
require.Equal(t, uint(1), st.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
require.Equal(t, uint(1), copied.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
}
func TestAppendBuilderPendingWithdrawals(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
},
builderPendingWithdrawals: make([]*ethpb.BuilderPendingWithdrawal, 0),
}
first := &ethpb.BuilderPendingWithdrawal{Amount: 1}
second := &ethpb.BuilderPendingWithdrawal{Amount: 2}
require.NoError(t, st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{first, second}))
require.Equal(t, 2, len(st.builderPendingWithdrawals))
require.DeepEqual(t, first, st.builderPendingWithdrawals[0])
require.DeepEqual(t, second, st.builderPendingWithdrawals[1])
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
}
func TestAppendBuilderPendingWithdrawals_UnsupportedVersion(t *testing.T) {
st := &BeaconState{version: version.Electra}
err := st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{{}})
require.ErrorContains(t, "AppendBuilderPendingWithdrawals", err)
}
func TestUpdateExecutionPayloadAvailabilityAtIndex_SetAndClear(t *testing.T) {
st := newGloasStateWithAvailability(t, make([]byte, 1024))
otherIdx := uint64(8) // byte 1, bit 0
idx := uint64(9) // byte 1, bit 1
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(otherIdx, 1))
require.Equal(t, byte(0x01), st.executionPayloadAvailability[1])
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 1))
require.Equal(t, byte(0x03), st.executionPayloadAvailability[1])
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 0))
require.Equal(t, byte(0x01), st.executionPayloadAvailability[1])
}
func TestUpdateExecutionPayloadAvailabilityAtIndex_OutOfRange(t *testing.T) {
st := newGloasStateWithAvailability(t, make([]byte, 1024))
idx := uint64(len(st.executionPayloadAvailability)) * 8
err := st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 1)
require.ErrorContains(t, "out of range", err)
for _, b := range st.executionPayloadAvailability {
if b != 0 {
t.Fatalf("execution payload availability mutated on error")
}
}
}
func newGloasStateWithAvailability(t *testing.T, availability []byte) *BeaconState {
t.Helper()
st, err := InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
ExecutionPayloadAvailability: availability,
})
require.NoError(t, err)
return st.(*BeaconState)
}

View File

@@ -29,7 +29,6 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync/backfill/coverage:go_default_library",
"//cache/lru:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
@@ -69,14 +68,11 @@ go_test(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/blocks/testing:go_default_library",

View File

@@ -5,13 +5,10 @@ import (
"encoding/hex"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -28,10 +25,6 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
s.migrationLock.Lock()
defer s.migrationLock.Unlock()
if features.Get().EnableStateDiff {
return s.migrateToColdHdiff(ctx, fRoot)
}
s.finalizedInfo.lock.RLock()
oldFSlot := s.finalizedInfo.slot
s.finalizedInfo.lock.RUnlock()
@@ -97,8 +90,21 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
}
}
}
if s.beaconDB.HasState(ctx, aRoot) {
s.migrateHotToCold(aRoot)
// If you are migrating a state and its already part of the hot state cache saved to the db,
// you can just remove it from the hot state cache as it becomes redundant.
s.saveHotStateDB.lock.Lock()
roots := s.saveHotStateDB.blockRootsOfSavedStates
for i := range roots {
if aRoot == roots[i] {
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
// Break here is ok.
break
}
}
s.saveHotStateDB.lock.Unlock()
continue
}
@@ -123,103 +129,3 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
return nil
}
// migrateToColdHdiff saves the state-diffs for slots that are in the state diff tree after finalization
func (s *State) migrateToColdHdiff(ctx context.Context, fRoot [32]byte) error {
s.finalizedInfo.lock.RLock()
oldFSlot := s.finalizedInfo.slot
s.finalizedInfo.lock.RUnlock()
fSlot, err := s.beaconDB.SlotByBlockRoot(ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get slot by block root")
}
for slot := oldFSlot; slot < fSlot; slot++ {
if ctx.Err() != nil {
return ctx.Err()
}
_, lvl, err := s.beaconDB.SlotInDiffTree(slot)
if err != nil {
log.WithError(err).Errorf("could not determine if slot %d is in diff tree", slot)
continue
}
if lvl == -1 {
continue
}
// The state needs to be saved.
// Try the epoch boundary cache first.
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)
if err != nil {
log.WithError(err).Errorf("could not get epoch boundary state for slot %d", slot)
cached = nil
exists = false
}
var aRoot [32]byte
var aState state.BeaconState
if exists {
aRoot = cached.root
aState = cached.state
} else {
_, roots, err := s.beaconDB.HighestRootsBelowSlot(ctx, slot)
if err != nil {
return err
}
// Given the block has been finalized, the db should not have more than one block in a given slot.
// We should error out when this happens.
if len(roots) != 1 {
return errUnknownBlock
}
aRoot = roots[0]
// Different than the legacy MigrateToCold, we need to always get the state even if
// the state exists in DB as part of the hot state db, because we need to process slots
// to the state diff tree slots.
aState, err = s.StateByRoot(ctx, aRoot)
if err != nil {
return err
}
}
if s.beaconDB.HasState(ctx, aRoot) {
s.migrateHotToCold(aRoot)
continue
}
// advance slots to the target slot
if aState.Slot() < slot {
aState, err = transition.ProcessSlots(ctx, aState, slot)
if err != nil {
return errors.Wrapf(err, "could not process slots to slot %d", slot)
}
}
if err := s.beaconDB.SaveState(ctx, aState, aRoot); err != nil {
return err
}
log.WithFields(
logrus.Fields{
"slot": aState.Slot(),
"root": fmt.Sprintf("%#x", aRoot),
}).Info("Saved state in DB")
}
// Update finalized info in memory.
fInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(fRoot)
if err != nil {
return err
}
if ok {
s.SaveFinalizedState(fSlot, fRoot, fInfo.state)
}
return nil
}
func (s *State) migrateHotToCold(aRoot [32]byte) {
// If you are migrating a state and its already part of the hot state cache saved to the db,
// you can just remove it from the hot state cache as it becomes redundant.
s.saveHotStateDB.lock.Lock()
roots := s.saveHotStateDB.blockRootsOfSavedStates
for i := range roots {
if aRoot == roots[i] {
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
// Break here is ok.
break
}
}
s.saveHotStateDB.lock.Unlock()
}

View File

@@ -4,11 +4,8 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
@@ -227,170 +224,3 @@ func TestMigrateToCold_ParallelCalls(t *testing.T) {
assert.DeepEqual(t, [][32]byte{r7}, service.saveHotStateDB.blockRootsOfSavedStates, "Did not remove all saved hot state roots")
require.LogsContain(t, hook, "Saved state in DB")
}
// =========================================================================
// Tests for migrateToColdHdiff (state diff migration)
// =========================================================================
// setStateDiffExponents sets state diff exponents for testing.
// Uses exponents [6, 5] which means:
// - Level 0: Every 2^6 = 64 slots (full snapshot)
// - Level 1: Every 2^5 = 32 slots (diff)
func setStateDiffExponents() {
globalFlags := flags.GlobalFlags{
StateDiffExponents: []int{6, 5},
}
flags.Init(&globalFlags)
}
// TestMigrateToColdHdiff_CanUpdateFinalizedInfo verifies that the migration
// correctly updates finalized info when migrating to slots not in the diff tree.
func TestMigrateToColdHdiff_CanUpdateFinalizedInfo(t *testing.T) {
ctx := t.Context()
// Set exponents and create DB first (without EnableStateDiff flag).
setStateDiffExponents()
beaconDB := testDB.SetupDB(t)
// Initialize the state diff cache via the method on *kv.Store (not in interface).
require.NoError(t, beaconDB.(*kv.Store).InitStateDiffCacheForTesting(t, 0))
// Now enable the feature flag.
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
defer resetCfg()
service := New(beaconDB, doublylinkedtree.New())
beaconState, _ := util.DeterministicGenesisState(t, 32)
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
// Put genesis state in epoch boundary cache so migrateToColdHdiff doesn't need to retrieve from DB.
require.NoError(t, service.epochBoundaryStateCache.put(gRoot, beaconState))
// Set initial finalized info at genesis.
service.finalizedInfo = &finalizedInfo{
slot: 0,
root: gRoot,
state: beaconState,
}
// Create finalized block at slot 10 (not in diff tree, so no intermediate states saved).
finalizedState := beaconState.Copy()
require.NoError(t, finalizedState.SetSlot(10))
b := util.NewBeaconBlock()
b.Block.Slot = 10
fRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, b)
require.NoError(t, service.epochBoundaryStateCache.put(fRoot, finalizedState))
require.NoError(t, service.MigrateToCold(ctx, fRoot))
// Verify finalized info is updated.
assert.Equal(t, primitives.Slot(10), service.finalizedInfo.slot)
assert.DeepEqual(t, fRoot, service.finalizedInfo.root)
expectedHTR, err := finalizedState.HashTreeRoot(ctx)
require.NoError(t, err)
actualHTR, err := service.finalizedInfo.state.HashTreeRoot(ctx)
require.NoError(t, err)
assert.DeepEqual(t, expectedHTR, actualHTR)
}
// TestMigrateToColdHdiff_SkipsSlotsNotInDiffTree verifies that the migration
// skips slots that are not in the diff tree.
func TestMigrateToColdHdiff_SkipsSlotsNotInDiffTree(t *testing.T) {
hook := logTest.NewGlobal()
ctx := t.Context()
// Set exponents and create DB first (without EnableStateDiff flag).
setStateDiffExponents()
beaconDB := testDB.SetupDB(t)
// Initialize the state diff cache via the method on *kv.Store (not in interface).
require.NoError(t, beaconDB.(*kv.Store).InitStateDiffCacheForTesting(t, 0))
// Now enable the feature flag.
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
defer resetCfg()
service := New(beaconDB, doublylinkedtree.New())
beaconState, pks := util.DeterministicGenesisState(t, 32)
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
// Start from slot 1 to avoid slot 0 which is in the diff tree.
service.finalizedInfo = &finalizedInfo{
slot: 1,
root: gRoot,
state: beaconState,
}
// Reset the log hook to ignore setup logs.
hook.Reset()
// Create a block at slot 20 (NOT in diff tree with exponents [6,5]).
b20, err := util.GenerateFullBlock(beaconState, pks, util.DefaultBlockGenConfig(), 20)
require.NoError(t, err)
r20, err := b20.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, b20)
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 20, Root: r20[:]}))
// Put finalized state in cache.
finalizedState := beaconState.Copy()
require.NoError(t, finalizedState.SetSlot(20))
require.NoError(t, service.epochBoundaryStateCache.put(r20, finalizedState))
require.NoError(t, service.MigrateToCold(ctx, r20))
// Verify NO states were saved during migration (slots 1-19 are not in diff tree).
assert.LogsDoNotContain(t, hook, "Saved state in DB")
}
// TestMigrateToColdHdiff_NoOpWhenFinalizedSlotNotAdvanced verifies that
// migration is a no-op when the finalized slot has not advanced.
func TestMigrateToColdHdiff_NoOpWhenFinalizedSlotNotAdvanced(t *testing.T) {
ctx := t.Context()
// Set exponents and create DB first (without EnableStateDiff flag).
setStateDiffExponents()
beaconDB := testDB.SetupDB(t)
// Initialize the state diff cache via the method on *kv.Store (not in interface).
require.NoError(t, beaconDB.(*kv.Store).InitStateDiffCacheForTesting(t, 0))
// Now enable the feature flag.
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
defer resetCfg()
service := New(beaconDB, doublylinkedtree.New())
beaconState, _ := util.DeterministicGenesisState(t, 32)
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
// Set finalized info already at slot 50.
finalizedState := beaconState.Copy()
require.NoError(t, finalizedState.SetSlot(50))
service.finalizedInfo = &finalizedInfo{
slot: 50,
root: gRoot,
state: finalizedState,
}
// Create block at same slot 50.
b := util.NewBeaconBlock()
b.Block.Slot = 50
fRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, b)
require.NoError(t, service.epochBoundaryStateCache.put(fRoot, finalizedState))
// Migration should be a no-op (finalized slot not advancing).
require.NoError(t, service.MigrateToCold(ctx, fRoot))
}

View File

@@ -238,9 +238,14 @@ func recomputeRootFromLayerVariable(idx int, item [32]byte, layers [][]*[32]byte
// AddInMixin describes a method from which a length mixin is added to the
// provided root.
func AddInMixin(root [32]byte, length uint64) ([32]byte, error) {
var rootBufRoot [32]byte
binary.LittleEndian.PutUint64(rootBufRoot[:], length)
return ssz.MixInLength(root, rootBufRoot[:]), nil
rootBuf := new(bytes.Buffer)
if err := binary.Write(rootBuf, binary.LittleEndian, length); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal eth1data votes length")
}
// We need to mix in the length of the slice.
rootBufRoot := make([]byte, 32)
copy(rootBufRoot, rootBuf.Bytes())
return ssz.MixInLength(root, rootBufRoot), nil
}
// Merkleize 32-byte leaves into a Merkle trie for its adequate depth, returning

View File

@@ -148,7 +148,7 @@ func (b batch) ensureParent(expected [32]byte) error {
func (b batch) blockRequest() *eth.BeaconBlocksByRangeRequest {
return &eth.BeaconBlocksByRangeRequest{
StartSlot: b.begin,
Count: uint64(b.end.FlooredSubSlot(b.begin)),
Count: uint64(b.end - b.begin),
Step: 1,
}
}
@@ -156,7 +156,7 @@ func (b batch) blockRequest() *eth.BeaconBlocksByRangeRequest {
func (b batch) blobRequest() *eth.BlobSidecarsByRangeRequest {
return &eth.BlobSidecarsByRangeRequest{
StartSlot: b.begin,
Count: uint64(b.end.FlooredSubSlot(b.begin)),
Count: uint64(b.end - b.begin),
}
}

View File

@@ -10,93 +10,6 @@ import (
"github.com/pkg/errors"
)
func TestBlockRequest(t *testing.T) {
cases := []struct {
name string
begin primitives.Slot
end primitives.Slot
expectedCount uint64
}{
{
name: "normal case",
begin: 100,
end: 200,
expectedCount: 100,
},
{
name: "end equals begin",
begin: 100,
end: 100,
expectedCount: 0,
},
{
name: "end less than begin (would underflow without check)",
begin: 200,
end: 100,
expectedCount: 0,
},
{
name: "zero values",
begin: 0,
end: 0,
expectedCount: 0,
},
{
name: "single slot",
begin: 0,
end: 1,
expectedCount: 1,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
b := batch{begin: tc.begin, end: tc.end}
req := b.blockRequest()
require.Equal(t, tc.expectedCount, req.Count)
require.Equal(t, tc.begin, req.StartSlot)
require.Equal(t, uint64(1), req.Step)
})
}
}
func TestBlobRequest(t *testing.T) {
cases := []struct {
name string
begin primitives.Slot
end primitives.Slot
expectedCount uint64
}{
{
name: "normal case",
begin: 100,
end: 200,
expectedCount: 100,
},
{
name: "end equals begin",
begin: 100,
end: 100,
expectedCount: 0,
},
{
name: "end less than begin (would underflow without check)",
begin: 200,
end: 100,
expectedCount: 0,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
b := batch{begin: tc.begin, end: tc.end}
req := b.blobRequest()
require.Equal(t, tc.expectedCount, req.Count)
require.Equal(t, tc.begin, req.StartSlot)
})
}
}
func TestSortBatchDesc(t *testing.T) {
orderIn := []primitives.Slot{100, 10000, 1}
orderOut := []primitives.Slot{10000, 100, 1}

View File

@@ -4,6 +4,9 @@ import (
"context"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
@@ -53,6 +56,32 @@ func (s *Service) verifierRoutine() {
}
}
// A routine that runs in the background to perform batch
// KZG verifications by draining the channel and processing all pending requests.
func (s *Service) kzgVerifierRoutine() {
for {
kzgBatch := make([]*kzgVerifier, 0, 1)
select {
case <-s.ctx.Done():
return
case kzg := <-s.kzgChan:
kzgBatch = append(kzgBatch, kzg)
}
for {
select {
case <-s.ctx.Done():
return
case kzg := <-s.kzgChan:
kzgBatch = append(kzgBatch, kzg)
continue
default:
verifyKzgBatch(kzgBatch)
}
break
}
}
}
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
defer span.End()
@@ -125,3 +154,71 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
}
return aggSet, nil
}
func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateWithKzgBatchVerifier")
defer span.End()
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
resChan := make(chan error, 1)
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
select {
case s.kzgChan <- verificationSet:
case <-ctx.Done():
return pubsub.ValidationIgnore, ctx.Err()
}
select {
case <-ctx.Done():
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
case err := <-resChan:
if err != nil {
log.WithError(err).Trace("Could not perform batch verification")
tracing.AnnotateError(span, err)
return s.validateUnbatchedColumnsKzg(ctx, dataColumns)
}
}
return pubsub.ValidationAccept, nil
}
func (s *Service) validateUnbatchedColumnsKzg(ctx context.Context, columns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateUnbatchedColumnsKzg")
defer span.End()
start := time.Now()
if err := peerdas.VerifyDataColumnsSidecarKZGProofs(columns); err != nil {
err = errors.Wrap(err, "could not verify")
tracing.AnnotateError(span, err)
return pubsub.ValidationReject, err
}
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("fallback").Observe(float64(time.Since(start).Milliseconds()))
return pubsub.ValidationAccept, nil
}
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
if len(kzgBatch) == 0 {
return
}
allDataColumns := make([]blocks.RODataColumn, 0, len(kzgBatch))
for _, kzgVerifier := range kzgBatch {
allDataColumns = append(allDataColumns, kzgVerifier.dataColumns...)
}
var verificationErr error
start := time.Now()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allDataColumns)
if err != nil {
verificationErr = errors.Wrap(err, "batch KZG verification failed")
} else {
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("batch").Observe(float64(time.Since(start).Milliseconds()))
}
// Send the same result to all verifiers in the batch
for _, verifier := range kzgBatch {
verifier.resChan <- verificationErr
}
}

View File

@@ -10,72 +10,20 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var nilFinalizedStateError = errors.New("finalized state is nil")
func (s *Service) maintainCustodyInfo() error {
// Rationale of slot choice:
// - If syncing with an empty DB from genesis, then justifiedSlot = finalizedSlot = 0,
// and the node starts to sync from slot 0 ==> Using justifiedSlot is correct.
// - If syncing with an empty DB from a checkpoint, then justifiedSlot = finalizedSlot = checkpointSlot,
// and the node starts to sync from checkpointSlot ==> Using justifiedSlot is correct.
// - If syncing with a non-empty DB, then justifiedSlot > finalizedSlot,
// and the node starts to sync from justifiedSlot + 1 ==> Using justifiedSlot + 1 is correct.
func (s *Service) maintainCustodyInfo() {
const interval = 1 * time.Minute
finalizedCheckpoint, err := s.cfg.beaconDB.FinalizedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "finalized checkpoint")
}
if finalizedCheckpoint == nil {
return errors.New("finalized checkpoint is nil")
}
finalizedSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
if err != nil {
return errors.Wrap(err, "epoch start for finalized slot")
}
justifiedCheckpoint, err := s.cfg.beaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "justified checkpoint")
}
if justifiedCheckpoint == nil {
return errors.New("justified checkpoint is nil")
}
justifiedSlot, err := slots.EpochStart(justifiedCheckpoint.Epoch)
if err != nil {
return errors.Wrap(err, "epoch start for justified slot")
}
slot := justifiedSlot
if justifiedSlot > finalizedSlot {
slot++
}
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(slot)
if err != nil {
return errors.Wrap(err, "could not get and save custody group count")
}
if _, _, err := s.cfg.p2p.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
return errors.Wrap(err, "update custody info")
}
async.RunEvery(s.ctx, interval, func() {
if err := s.updateCustodyInfoIfNeeded(); err != nil {
log.WithError(err).Error("Failed to update custody info")
}
})
return nil
}
func (s *Service) updateCustodyInfoIfNeeded() error {

View File

@@ -668,7 +668,7 @@ func populateBlock(bw *blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
missStr := make([]string, 0, len(missing))
for _, k := range missing {
for k := range missing {
missStr = append(missStr, fmt.Sprintf("%#x", k))
}
return errors.Wrapf(errMissingBlobsForBlockCommitments,

View File

@@ -226,6 +226,8 @@ func (s *Service) Start() {
// fetchOriginSidecars fetches origin sidecars
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
const delay = 10 * time.Second // The delay between each attempt to fetch origin data column sidecars
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
return nil
@@ -258,7 +260,7 @@ func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
blockVersion := roBlock.Version()
if blockVersion >= version.Fulu {
if err := s.fetchOriginDataColumnSidecars(roBlock); err != nil {
if err := s.fetchOriginDataColumnSidecars(roBlock, delay); err != nil {
return errors.Wrap(err, "fetch origin columns")
}
return nil
@@ -412,7 +414,7 @@ func (s *Service) fetchOriginBlobSidecars(pids []peer.ID, rob blocks.ROBlock) er
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
}
func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock) error {
func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock, delay time.Duration) error {
const (
errorMessage = "Failed to fetch origin data column sidecars"
warningIteration = 10
@@ -499,6 +501,7 @@ func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock) error {
log := log.WithFields(logrus.Fields{
"attempt": attempt,
"missingIndices": helpers.SortedPrettySliceFromMap(missingIndicesByRoot[root]),
"delay": delay,
})
logFunc := log.Debug

View File

@@ -687,7 +687,10 @@ func TestFetchOriginColumns(t *testing.T) {
cfg.BlobSchedule = []params.BlobScheduleEntry{{Epoch: 0, MaxBlobsPerBlock: 10}}
params.OverrideBeaconConfig(cfg)
const blobCount = 1
const (
delay = 0
blobCount = 1
)
t.Run("block has no commitments", func(t *testing.T) {
service := new(Service)
@@ -699,7 +702,7 @@ func TestFetchOriginColumns(t *testing.T) {
roBlock, err := blocks.NewROBlock(signedBlock)
require.NoError(t, err)
err = service.fetchOriginDataColumnSidecars(roBlock)
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
require.NoError(t, err)
})
@@ -721,7 +724,7 @@ func TestFetchOriginColumns(t *testing.T) {
err := storage.Save(verifiedSidecars)
require.NoError(t, err)
err = service.fetchOriginDataColumnSidecars(roBlock)
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
require.NoError(t, err)
})
@@ -826,7 +829,7 @@ func TestFetchOriginColumns(t *testing.T) {
attempt++
})
err = service.fetchOriginDataColumnSidecars(roBlock)
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
require.NoError(t, err)
// Check all corresponding sidecars are saved in the store.

View File

@@ -1,14 +1,339 @@
package sync
import (
"context"
"sync"
"testing"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
pubsub "github.com/libp2p/go-libp2p-pubsub"
)
func TestValidateWithKzgBatchVerifier(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
tests := []struct {
name string
dataColumns []blocks.RODataColumn
expectedResult pubsub.ValidationResult
expectError bool
}{
{
name: "single valid data column",
dataColumns: createValidTestDataColumns(t, 1),
expectedResult: pubsub.ValidationAccept,
expectError: false,
},
{
name: "multiple valid data columns",
dataColumns: createValidTestDataColumns(t, 3),
expectedResult: pubsub.ValidationAccept,
expectError: false,
},
{
name: "single invalid data column",
dataColumns: createInvalidTestDataColumns(t, 1),
expectedResult: pubsub.ValidationReject,
expectError: true,
},
{
name: "empty data column slice",
dataColumns: []blocks.RODataColumn{},
expectedResult: pubsub.ValidationAccept,
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
result, err := service.validateWithKzgBatchVerifier(ctx, tt.dataColumns)
require.Equal(t, tt.expectedResult, result)
if tt.expectError {
assert.NotNil(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestVerifierRoutine(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
t.Run("processes single request", func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
dataColumns := createValidTestDataColumns(t, 1)
resChan := make(chan error, 1)
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
select {
case err := <-resChan:
require.NoError(t, err)
case <-time.After(time.Second):
t.Fatal("timeout waiting for verification result")
}
})
t.Run("batches multiple requests", func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
const numRequests = 5
resChans := make([]chan error, numRequests)
for i := range numRequests {
dataColumns := createValidTestDataColumns(t, 1)
resChan := make(chan error, 1)
resChans[i] = resChan
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
}
for i := range numRequests {
select {
case err := <-resChans[i]:
require.NoError(t, err)
case <-time.After(time.Second):
t.Fatalf("timeout waiting for verification result %d", i)
}
}
})
t.Run("context cancellation stops routine", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
routineDone := make(chan struct{})
go func() {
service.kzgVerifierRoutine()
close(routineDone)
}()
cancel()
select {
case <-routineDone:
case <-time.After(time.Second):
t.Fatal("timeout waiting for routine to exit")
}
})
}
func TestVerifyKzgBatch(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
t.Run("all valid data columns succeed", func(t *testing.T) {
dataColumns := createValidTestDataColumns(t, 3)
resChan := make(chan error, 1)
kzgVerifiers := []*kzgVerifier{{dataColumns: dataColumns, resChan: resChan}}
verifyKzgBatch(kzgVerifiers)
select {
case err := <-resChan:
require.NoError(t, err)
case <-time.After(time.Second):
t.Fatal("timeout waiting for batch verification")
}
})
t.Run("invalid proofs fail entire batch", func(t *testing.T) {
validColumns := createValidTestDataColumns(t, 1)
invalidColumns := createInvalidTestDataColumns(t, 1)
allColumns := append(validColumns, invalidColumns...)
resChan := make(chan error, 1)
kzgVerifiers := []*kzgVerifier{{dataColumns: allColumns, resChan: resChan}}
verifyKzgBatch(kzgVerifiers)
select {
case err := <-resChan:
assert.NotNil(t, err)
case <-time.After(time.Second):
t.Fatal("timeout waiting for batch verification")
}
})
t.Run("empty batch handling", func(t *testing.T) {
verifyKzgBatch([]*kzgVerifier{})
})
}
func TestKzgBatchVerifierConcurrency(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
const numGoroutines = 10
const numRequestsPerGoroutine = 5
var wg sync.WaitGroup
wg.Add(numGoroutines)
// Multiple goroutines sending verification requests simultaneously
for i := range numGoroutines {
go func(goroutineID int) {
defer wg.Done()
for range numRequestsPerGoroutine {
dataColumns := createValidTestDataColumns(t, 1)
result, err := service.validateWithKzgBatchVerifier(ctx, dataColumns)
require.Equal(t, pubsub.ValidationAccept, result)
require.NoError(t, err)
}
}(i)
}
wg.Wait()
}
func TestKzgBatchVerifierFallback(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
t.Run("fallback handles mixed valid/invalid batch correctly", func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
validColumns := createValidTestDataColumns(t, 1)
invalidColumns := createInvalidTestDataColumns(t, 1)
result, err := service.validateWithKzgBatchVerifier(ctx, validColumns)
require.Equal(t, pubsub.ValidationAccept, result)
require.NoError(t, err)
result, err = service.validateWithKzgBatchVerifier(ctx, invalidColumns)
require.Equal(t, pubsub.ValidationReject, result)
assert.NotNil(t, err)
})
t.Run("empty data columns fallback", func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
result, err := service.validateWithKzgBatchVerifier(ctx, []blocks.RODataColumn{})
require.Equal(t, pubsub.ValidationAccept, result)
require.NoError(t, err)
})
}
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.SecondsPerSlot = 0
params.OverrideBeaconConfig(cfg)
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier),
}
go service.kzgVerifierRoutine()
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
require.Equal(t, pubsub.ValidationIgnore, result)
require.ErrorIs(t, err, context.DeadlineExceeded)
done := make(chan struct{})
go func() {
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
close(done)
}()
select {
case <-done:
case <-time.After(500 * time.Millisecond):
t.Fatal("validateWithKzgBatchVerifier blocked")
}
}
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
cancelledCtx, cancel := context.WithCancel(t.Context())
cancel()
service := &Service{
ctx: context.Background(),
kzgChan: make(chan *kzgVerifier),
}
done := make(chan struct{})
go func() {
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
require.Equal(t, pubsub.ValidationIgnore, result)
require.ErrorIs(t, err, context.Canceled)
close(done)
}()
select {
case <-done:
case <-time.After(500 * time.Millisecond):
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
}
select {
case <-service.kzgChan:
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
default:
}
}
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
if len(roSidecars) >= count {

View File

@@ -77,13 +77,8 @@ func SendBeaconBlocksByRangeRequest(
}
defer closeStream(stream, log)
// Cap the slice capacity to MaxRequestBlock to prevent panic from invalid Count values.
// This guards against upstream bugs that may produce astronomically large Count values
// (e.g., due to unsigned integer underflow).
sliceCap := min(req.Count, params.MaxRequestBlock(slots.ToEpoch(tor.CurrentSlot())))
// Augment block processing function, if non-nil block processor is provided.
blocks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, sliceCap)
blocks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, req.Count)
process := func(blk interfaces.ReadOnlySignedBeaconBlock) error {
blocks = append(blocks, blk)
if blockProcessor != nil {

View File

@@ -12,7 +12,6 @@ import (
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
dbTest "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
testingDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
@@ -935,7 +934,6 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
r := &Service{
cfg: &config{
p2p: p1,
beaconDB: dbTest.SetupDB(t),
chain: chain,
stateNotifier: chain.StateNotifier(),
initialSync: &mockSync.Sync{IsSyncing: false},

View File

@@ -17,7 +17,6 @@ import (
blockfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/block"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
@@ -34,11 +33,9 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/backfill/coverage"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v7/crypto/rand"
"github.com/OffchainLabs/prysm/v7/runtime"
@@ -168,6 +165,7 @@ type Service struct {
syncContributionBitsOverlapLock sync.RWMutex
syncContributionBitsOverlapCache *lru.Cache
signatureChan chan *signatureVerifier
kzgChan chan *kzgVerifier
clockWaiter startup.ClockWaiter
initialSyncComplete chan struct{}
verifierWaiter *verification.InitializerWaiter
@@ -208,7 +206,10 @@ func NewService(ctx context.Context, opts ...Option) *Service {
}
// Initialize signature channel with configured limit
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
// Initialize KZG channel with fixed buffer size of 100.
// This buffer size is designed to handle burst traffic of data column gossip messages:
// - Data columns arrive less frequently than attestations (default batchVerifierLimit=1000)
r.kzgChan = make(chan *kzgVerifier, 100)
// Correctly remove it from our seen pending block map.
// The eviction method always assumes that the mutex is held.
r.slotToPendingBlocks.OnEvicted(func(s string, i any) {
@@ -261,6 +262,7 @@ func (s *Service) Start() {
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
go s.verifierRoutine()
go s.kzgVerifierRoutine()
go s.startDiscoveryAndSubscriptions()
go s.processDataColumnLogs()
@@ -273,6 +275,11 @@ func (s *Service) Start() {
s.processPendingBlocksQueue()
s.maintainPeerStatuses()
if params.FuluEnabled() {
s.maintainCustodyInfo()
}
s.resyncIfBehind()
// Update sync metrics.
@@ -280,15 +287,6 @@ func (s *Service) Start() {
// Prune data column cache periodically on finalization.
async.RunEvery(s.ctx, 30*time.Second, s.pruneDataColumnCache)
if !params.FuluEnabled() {
return
}
if err := s.maintainCustodyInfo(); err != nil {
log.WithError(err).Error("Failed to maintain custody info")
}
}
// Stop the regular sync service.
@@ -454,89 +452,6 @@ func (s *Service) waitForInitialSync(ctx context.Context) error {
}
}
// UpdateCustodyInfoInDB updates the custody information in the database.
// It returns the (potentially updated) custody group count and the earliest available slot.
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
isSupernode := flags.Get().Supernode
isSemiSupernode := flags.Get().SemiSupernode
cfg := params.BeaconConfig()
custodyRequirement := cfg.CustodyRequirement
// Check if the node was previously subscribed to all data subnets, and if so,
// store the new status accordingly.
wasSupernode, err := s.cfg.beaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
if err != nil {
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
}
// Compute the target custody group count based on current flag configuration.
targetCustodyGroupCount := custodyRequirement
// Supernode: custody all groups (either currently set or previously enabled)
if isSupernode {
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
}
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
if isSemiSupernode {
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
if err != nil {
return 0, 0, errors.Wrap(err, "minimum custody group count")
}
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
}
// Safely compute the fulu fork slot.
fuluForkSlot, err := fuluForkSlot()
if err != nil {
return 0, 0, errors.Wrap(err, "fulu fork slot")
}
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
if slot < fuluForkSlot {
slot, err = s.cfg.beaconDB.EarliestSlot(s.ctx)
if err != nil {
return 0, 0, errors.Wrap(err, "earliest slot")
}
}
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.beaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
if err != nil {
return 0, 0, errors.Wrap(err, "update custody info")
}
if isSupernode {
log.WithFields(logrus.Fields{
"current": actualCustodyGroupCount,
"target": cfg.NumberOfCustodyGroups,
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
}
if wasSupernode && !isSupernode {
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
}
return earliestAvailableSlot, actualCustodyGroupCount, nil
}
func fuluForkSlot() (primitives.Slot, error) {
cfg := params.BeaconConfig()
fuluForkEpoch := cfg.FuluForkEpoch
if fuluForkEpoch == cfg.FarFutureEpoch {
return cfg.FarFutureSlot, nil
}
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
if err != nil {
return 0, errors.Wrap(err, "epoch start")
}
return forkFuluSlot, nil
}
// Checker defines a struct which can verify whether a node is currently
// synchronizing a chain with the rest of peers in the network.
type Checker interface {

View File

@@ -16,9 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
@@ -443,224 +440,3 @@ func TestService_Stop_ConcurrentGoodbyeMessages(t *testing.T) {
require.Equal(t, false, util.WaitTimeout(&wg, 2*time.Second))
}
func TestUpdateCustodyInfoInDB(t *testing.T) {
const (
fuluForkEpoch = 10
custodyRequirement = uint64(4)
earliestStoredSlot = primitives.Slot(12)
numberOfCustodyGroups = uint64(64)
)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = fuluForkEpoch
cfg.CustodyRequirement = custodyRequirement
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
params.OverrideBeaconConfig(cfg)
ctx := t.Context()
pbBlock := util.NewBeaconBlock()
pbBlock.Block.Slot = 12
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbBlock)
require.NoError(t, err)
roBlock, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
t.Run("CGC increases before fulu", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
service := Service{cfg: &config{beaconDB: beaconDB}}
err = beaconDB.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Before Fulu
// -----------
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(19)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
// After Fulu
// ----------
actualEas, actualCgc, err = service.updateCustodyInfoInDB(fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
})
t.Run("CGC increases after fulu", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
service := Service{cfg: &config{beaconDB: beaconDB}}
err = beaconDB.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Before Fulu
// -----------
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
// After Fulu
// ----------
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
})
t.Run("Supernode downgrade prevented", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
service := Service{cfg: &config{beaconDB: beaconDB}}
err = beaconDB.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
// Try to downgrade by removing flag
gFlags.Supernode = false
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Should still be supernode
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
})
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
service := Service{cfg: &config{beaconDB: beaconDB}}
err = beaconDB.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
// Try to downgrade by removing flag
gFlags.SemiSupernode = false
flags.Init(gFlags)
defer flags.Init(resetFlags)
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
})
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
service := Service{cfg: &config{beaconDB: beaconDB}}
err = beaconDB.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Start with semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
// Upgrade to full supernode
gFlags.SemiSupernode = false
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Should upgrade to full supernode
upgradeSlot := slot + 2
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
require.NoError(t, err)
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
})
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
service := Service{cfg: &config{beaconDB: beaconDB}}
err = beaconDB.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Mock a high custody requirement (simulating many validators)
// We need to override the custody requirement calculation
// For this test, we'll verify the logic by checking if custodyRequirement > 64
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
// This would require a different test setup with actual validators
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
// With low validator requirements (4), should use semi-supernode minimum (64)
require.Equal(t, semiSupernodeCustody, actualCgc)
})
}

View File

@@ -144,9 +144,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
}
// [REJECT] The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`.
if err := verifier.SidecarKzgProofVerified(); err != nil {
return pubsub.ValidationReject, err
validationResult, err := s.validateWithKzgBatchVerifier(ctx, roDataColumns)
if validationResult != pubsub.ValidationAccept {
return validationResult, err
}
// Mark KZG verification as satisfied since we did it via batch verifier
verifier.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
// [IGNORE] The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)`
// with valid header signature, sidecar inclusion proof, and kzg proof.

View File

@@ -71,7 +71,10 @@ func TestValidateDataColumn(t *testing.T) {
ctx: ctx,
newColumnsVerifier: newDataColumnsVerifier,
seenDataColumnCache: newSlotAwareCache(seenDataColumnSize),
kzgChan: make(chan *kzgVerifier, 100),
}
// Start the KZG verifier routine for batch verification
go service.kzgVerifierRoutine()
// Encode a `beaconBlock` message instead of expected.
buf := new(bytes.Buffer)

View File

@@ -588,12 +588,6 @@ func fcReturnsTargetRoot(root [32]byte) func([32]byte, primitives.Epoch) ([32]by
}
}
func fcReturnsDependentRoot() func([32]byte, primitives.Epoch) ([32]byte, error) {
return func(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
return root, nil
}
}
type mockSignatureCache struct {
svCalledForSig map[signatureData]bool
svcb func(sig signatureData) (bool, error)

View File

@@ -1,6 +1,7 @@
package verification
import (
"bytes"
"context"
"crypto/sha256"
"fmt"
@@ -18,7 +19,6 @@ import (
"github.com/OffchainLabs/prysm/v7/runtime/logging"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var (
@@ -293,57 +293,55 @@ func (dv *RODataColumnsVerifier) ValidProposerSignature(ctx context.Context) (er
// The returned state is guaranteed to be at the same epoch as the data column's epoch, and have the same randao mix and active
// validator indices as the data column's parent state advanced to the data column's slot.
func (dv *RODataColumnsVerifier) getVerifyingState(ctx context.Context, dataColumn blocks.RODataColumn) (state.ReadOnlyBeaconState, error) {
dataColumnSlot := dataColumn.Slot()
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
if dataColumnEpoch == 0 {
return dv.hsp.HeadStateReadOnly(ctx)
}
parentRoot := dataColumn.ParentRoot()
dcDependentRoot, err := dv.fc.DependentRootForEpoch(parentRoot, dataColumnEpoch-1)
if err != nil {
return nil, err
}
headRoot, err := dv.hsp.HeadRoot(ctx)
if err != nil {
return nil, err
}
headDependentRoot, err := dv.fc.DependentRootForEpoch(bytesutil.ToBytes32(headRoot), dataColumnEpoch-1)
if err != nil {
return nil, err
}
if dcDependentRoot == headDependentRoot {
headSlot := dv.hsp.HeadSlot()
headEpoch := slots.ToEpoch(headSlot)
if headEpoch == dataColumnEpoch || headEpoch == dataColumnEpoch-1 {
parentRoot := dataColumn.ParentRoot()
dataColumnSlot := dataColumn.Slot()
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
headSlot := dv.hsp.HeadSlot()
headEpoch := slots.ToEpoch(headSlot)
// Use head if it's the parent
if bytes.Equal(parentRoot[:], headRoot) {
// If they are in the same epoch, then we can return the head state directly
if dataColumnEpoch == headEpoch {
return dv.hsp.HeadStateReadOnly(ctx)
}
if headEpoch+1 < dataColumnEpoch {
headState, err := dv.hsp.HeadState(ctx)
if err != nil {
return nil, err
}
return transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, dataColumnSlot)
// Otherwise, we need to process the head state to the data column's slot
headState, err := dv.hsp.HeadState(ctx)
if err != nil {
return nil, err
}
return transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, dataColumnSlot)
}
// If head and data column are in the same epoch and head is compatible with the parent's depdendent root, then use head
if dataColumnEpoch == headEpoch {
headDependent, err := dv.fc.DependentRootForEpoch(bytesutil.ToBytes32(headRoot), dataColumnEpoch)
if err != nil {
return nil, err
}
parentDependent, err := dv.fc.DependentRootForEpoch(parentRoot, dataColumnEpoch)
if err != nil {
return nil, err
}
if bytes.Equal(headDependent[:], parentDependent[:]) {
return dv.hsp.HeadStateReadOnly(ctx)
}
}
logrus.WithFields(logrus.Fields{
"slot": dataColumnSlot,
"parentRoot": fmt.Sprintf("%#x", parentRoot),
"headRoot": fmt.Sprintf("%#x", headRoot),
}).Debug("Replying state for data column verification")
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
// Otherwise retrieve the parent state and advance it to the data column's slot
parentState, err := dv.sr.StateByRoot(ctx, parentRoot)
if err != nil {
return nil, err
}
targetState, err := dv.sr.StateByRoot(ctx, targetRoot)
if err != nil {
return nil, err
parentEpoch := slots.ToEpoch(parentState.Slot())
if dataColumnEpoch == parentEpoch {
return parentState, nil
}
targetEpoch := slots.ToEpoch(targetState.Slot())
if targetEpoch == dataColumnEpoch || targetEpoch == dataColumnEpoch-1 {
return targetState, nil
}
return transition.ProcessSlotsUsingNextSlotCache(ctx, targetState, parentRoot[:], dataColumnSlot)
return transition.ProcessSlotsUsingNextSlotCache(ctx, parentState, parentRoot[:], dataColumnSlot)
}
func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) (err error) {

View File

@@ -1,7 +1,6 @@
package verification
import (
"context"
"reflect"
"testing"
"time"
@@ -10,7 +9,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -283,7 +281,7 @@ func TestColumnSlotAboveFinalized(t *testing.T) {
func TestValidProposerSignature(t *testing.T) {
const (
columnSlot = 97
columnSlot = 0
blobCount = 1
)
@@ -296,83 +294,59 @@ func TestValidProposerSignature(t *testing.T) {
// The signature data does not depend on the data column itself, so we can use the first one.
expectedSignatureData := columnToSignatureData(firstColumn)
// Create a proper Fulu state for verification.
// We need enough validators to cover the proposer index.
numValidators := max(uint64(firstColumn.ProposerIndex()+1), 64)
fuluState, _ := util.DeterministicGenesisStateFulu(t, numValidators)
// Head state provider that returns the fuluState via HeadStateReadOnly path.
headStateWithState := &mockHeadStateProvider{
headRoot: parentRoot[:],
headSlot: columnSlot,
headStateReadOnly: fuluState,
}
// Head state provider that will fail (headStateReadOnly is nil).
headStateNotFound := &mockHeadStateProvider{
headRoot: parentRoot[:],
headSlot: columnSlot,
}
testCases := []struct {
isError bool
vscbShouldError bool
svcbReturn bool
stateByRooter StateByRooter
headStateProvider *mockHeadStateProvider
vscbError error
svcbError error
name string
isError bool
vscbShouldError bool
svcbReturn bool
stateByRooter StateByRooter
vscbError error
svcbError error
name string
}{
{
name: "cache hit - success",
svcbReturn: true,
svcbError: nil,
vscbShouldError: true,
vscbError: nil,
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
headStateProvider: headStateWithState,
isError: false,
name: "cache hit - success",
svcbReturn: true,
svcbError: nil,
vscbShouldError: true,
vscbError: nil,
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
isError: false,
},
{
name: "cache hit - error",
svcbReturn: true,
svcbError: errors.New("derp"),
vscbShouldError: true,
vscbError: nil,
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
headStateProvider: headStateWithState,
isError: true,
name: "cache hit - error",
svcbReturn: true,
svcbError: errors.New("derp"),
vscbShouldError: true,
vscbError: nil,
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
isError: true,
},
{
name: "cache miss - success",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: nil,
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
headStateProvider: headStateWithState,
isError: false,
name: "cache miss - success",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: nil,
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
isError: false,
},
{
name: "cache miss - state not found",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: nil,
stateByRooter: sbrNotFound(t, expectedSignatureData.Parent),
headStateProvider: headStateNotFound,
isError: true,
name: "cache miss - state not found",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: nil,
stateByRooter: sbrNotFound(t, expectedSignatureData.Parent),
isError: true,
},
{
name: "cache miss - signature failure",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: errors.New("signature, not so good!"),
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
headStateProvider: headStateWithState,
isError: true,
name: "cache miss - signature failure",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: errors.New("signature, not so good!"),
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
isError: true,
},
}
@@ -403,10 +377,9 @@ func TestValidProposerSignature(t *testing.T) {
shared: &sharedResources{
sc: signatureCache,
sr: tc.stateByRooter,
hsp: tc.headStateProvider,
hsp: &mockHeadStateProvider{},
fc: &mockForkchoicer{
DependentRootForEpochCB: fcReturnsDependentRoot(),
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
},
},
}
@@ -432,7 +405,7 @@ func TestValidProposerSignature(t *testing.T) {
func TestDataColumnsSidecarParentSeen(t *testing.T) {
const (
columnSlot = 97
columnSlot = 0
blobCount = 1
)
@@ -536,7 +509,7 @@ func TestDataColumnsSidecarParentValid(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
const (
columnSlot = 97
columnSlot = 0
blobCount = 1
)
@@ -657,7 +630,7 @@ func TestDataColumnsSidecarDescendsFromFinalized(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
const (
columnSlot = 97
columnSlot = 0
blobCount = 1
)
@@ -720,7 +693,7 @@ func TestDataColumnsSidecarInclusionProven(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
const (
columnSlot = 97
columnSlot = 0
blobCount = 1
)
@@ -775,7 +748,7 @@ func TestDataColumnsSidecarKzgProofVerified(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
const (
columnSlot = 97
columnSlot = 0
blobCount = 1
)
@@ -951,135 +924,3 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
require.NoError(t, err)
}
func TestGetVerifyingStateEdgeCases(t *testing.T) {
const (
columnSlot = 97 // epoch 3
blobCount = 1
)
parentRoot := [fieldparams.RootLength]byte{}
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
// Create a proper Fulu state for verification.
numValidators := max(uint64(columns[0].ProposerIndex()+1), 64)
fuluState, _ := util.DeterministicGenesisStateFulu(t, numValidators)
t.Run("different dependent roots - uses StateByRoot path", func(t *testing.T) {
// Parent and head are on different forks with different dependent roots.
// This forces the code to use TargetRootForEpoch -> StateByRoot path.
signatureCache := &mockSignatureCache{
svcb: func(signatureData signatureData) (bool, error) {
return false, nil // Cache miss
},
vscb: func(signatureData signatureData, _ validatorAtIndexer) (err error) {
return nil // Signature valid
},
}
// StateByRoot will be called because dependent roots differ
stateByRootCalled := false
stateByRooter := &mockStateByRooter{
sbr: func(_ context.Context, root [32]byte) (state.BeaconState, error) {
stateByRootCalled = true
return fuluState, nil
},
}
initializer := Initializer{
shared: &sharedResources{
sc: signatureCache,
sr: stateByRooter,
hsp: &mockHeadStateProvider{
headRoot: []byte{0xff}, // Different from parentRoot
headSlot: columnSlot,
},
fc: &mockForkchoicer{
// Return different roots for parent vs head to simulate different forks
DependentRootForEpochCB: func(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
return root, nil // Returns input, so parent [0...] != head [0xff...]
},
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
},
},
}
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
err := verifier.ValidProposerSignature(t.Context())
require.NoError(t, err)
require.Equal(t, true, stateByRootCalled, "StateByRoot should be called when dependent roots differ")
})
t.Run("same dependent root head far ahead - uses head state with ProcessSlots", func(t *testing.T) {
// Parent is ancestor of head on same chain, but head is in epoch 1 while column is in epoch 3.
// headEpoch (1) + 1 < dataColumnEpoch (3), so ProcessSlots is called on head state.
signatureCache := &mockSignatureCache{
svcb: func(signatureData signatureData) (bool, error) {
return false, nil // Cache miss
},
vscb: func(signatureData signatureData, _ validatorAtIndexer) (err error) {
return nil // Signature valid
},
}
headStateCalled := false
initializer := Initializer{
shared: &sharedResources{
sc: signatureCache,
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
hsp: &mockHeadStateProvider{
headRoot: parentRoot[:], // Same as parent
headSlot: 32, // Epoch 1
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
headStateReadOnly: nil, // Should not use ReadOnly path
},
fc: &mockForkchoicer{
// Return same root for both to simulate same chain
DependentRootForEpochCB: func(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
return [32]byte{0xaa}, nil // Same for all inputs
},
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
},
},
}
// Wrap to detect HeadState call
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
wrappedHsp := &mockHeadStateProvider{
headRoot: originalHsp.headRoot,
headSlot: originalHsp.headSlot,
headState: originalHsp.headState,
}
initializer.shared.hsp = &headStateCallTracker{
mockHeadStateProvider: wrappedHsp,
headStateCalled: &headStateCalled,
}
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
err := verifier.ValidProposerSignature(t.Context())
require.NoError(t, err)
require.Equal(t, true, headStateCalled, "HeadState should be called when head is far ahead")
})
}
// headStateCallTracker wraps mockHeadStateProvider to track HeadState calls.
type headStateCallTracker struct {
*mockHeadStateProvider
headStateCalled *bool
}
func (h *headStateCallTracker) HeadState(ctx context.Context) (state.BeaconState, error) {
*h.headStateCalled = true
return h.mockHeadStateProvider.HeadState(ctx)
}
func (h *headStateCallTracker) HeadRoot(ctx context.Context) ([]byte, error) {
return h.mockHeadStateProvider.HeadRoot(ctx)
}
func (h *headStateCallTracker) HeadSlot() primitives.Slot {
return h.mockHeadStateProvider.HeadSlot()
}
func (h *headStateCallTracker) HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error) {
return h.mockHeadStateProvider.HeadStateReadOnly(ctx)
}

View File

@@ -1,3 +0,0 @@
### Ignored
- Add `NewBeaconStateGloas()`.

View File

@@ -1,3 +0,0 @@
### Ignored
- small touch ups on state diff code.

View File

@@ -1,2 +0,0 @@
### Fixed
- Fixed a typo: AggregrateDueBPS -> AggregateDueBPS.

View File

@@ -1,3 +0,0 @@
### Fixed
- Fix `prysmctl testnet generate-genesis` to use the timestamp from `--geth-genesis-json-in` when `--genesis-time` is not explicitly provided.

View File

@@ -1,3 +0,0 @@
### Ignored
- Removed deprecated gometalinter references from Bazel configuration.

View File

@@ -1,3 +0,0 @@
### Fixed
- Prevent authentication bypass on direct `/v2/validator/*` endpoints by enforcing auth checks for non-public routes.

View File

@@ -1,3 +0,0 @@
### Fixed
- stop SlotIntervalTicker goroutine leaks [#16241](https://github.com/OffchainLabs/prysm/pull/16241)

View File

@@ -1,3 +0,0 @@
### Changed
- post electra we now call attestation data once per slot and use a cache for subsequent requests

View File

@@ -1,11 +0,0 @@
### Added
- Adding basic fulu fork transition support for mainnet and minimal e2e tests (multi scenario is not included)
### Changed
- updated go ethereum to 1.16.7
### Removed
- removed github.com/MariusVanDerWijden/FuzzyVM and github.com/MariusVanDerWijden/tx-fuzz due to lack of support post 1.16.7, only used in e2e for transaction fuzzing

View File

@@ -1,3 +0,0 @@
### Ignored
- delayed head evaluator check to mid epoch for e2e.

View File

@@ -1,3 +0,0 @@
### Ignored
- updating phase 0 constants for ethspecify

View File

@@ -1,3 +0,0 @@
### Fixed
- When adding the `--[semi-]supernode` flag, update the ealiest available slot accordingly.

View File

@@ -1,3 +0,0 @@
### Removed
- Batching of KZG verification for incoming via gossip data column sidecars

View File

@@ -1,2 +0,0 @@
### Added
- `commitment_count_in_gossip_processed_blocks` gauge metric to track the number of blob KZG commitments in processed beacon blocks.

View File

@@ -1,2 +0,0 @@
### Removed
- `--disable-get-blobs-v2` flag from help.

Some files were not shown because too many files have changed in this diff Show More