mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 14:28:09 -05:00
Compare commits
3 Commits
cache-atte
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
76420d9428 | ||
|
|
21366e11ca | ||
|
|
7ed4d496dd |
10
WORKSPACE
10
WORKSPACE
@@ -273,16 +273,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.7.0-alpha.0"
|
||||
consensus_spec_version = "v1.7.0-alpha.1"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-b+rJOuVqq+Dy53quPcNYcQwPFoMU7Wp7tdUVe7n0g8w=",
|
||||
"minimal": "sha256-qxRIxtjPxVsVCY90WsBJKhk0027XDSmhjnRvRN14V1c=",
|
||||
"mainnet": "sha256-NsuOQG3LzeiEE1TrWuvQ6vu6BboHv7h7f/RTS0pWkCs=",
|
||||
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
|
||||
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
|
||||
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -298,7 +298,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-hwNdUBgdBrkk6pWIpNYbzbwswUuOu6AMD2exN8uv+QQ=",
|
||||
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -26,7 +26,6 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -213,12 +212,7 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get next withdrawal validator index")
|
||||
}
|
||||
if features.Get().LowValcountSweep {
|
||||
bound := min(uint64(st.NumValidators()), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex += primitives.ValidatorIndex(bound)
|
||||
} else {
|
||||
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
}
|
||||
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
|
||||
} else {
|
||||
nextValidatorIndex = expectedWithdrawals[len(expectedWithdrawals)-1].ValidatorIndex + 1
|
||||
|
||||
@@ -12,7 +12,6 @@ go_library(
|
||||
"log.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"transition_no_verify_sig.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
"withdrawals.go",
|
||||
@@ -62,7 +61,6 @@ go_test(
|
||||
"error_test.go",
|
||||
"export_test.go",
|
||||
"registry_updates_test.go",
|
||||
"transition_no_verify_sig_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
|
||||
@@ -6,6 +6,11 @@ type execReqErr struct {
|
||||
error
|
||||
}
|
||||
|
||||
// NewExecReqError creates a new execReqErr.
|
||||
func NewExecReqError(msg string) error {
|
||||
return execReqErr{errors.New(msg)}
|
||||
}
|
||||
|
||||
// IsExecutionRequestError returns true if the error has `execReqErr`.
|
||||
func IsExecutionRequestError(e error) bool {
|
||||
if e == nil {
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessOperationsWithNilRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "Nil deposit request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
|
||||
},
|
||||
errMsg: "nil deposit request",
|
||||
},
|
||||
{
|
||||
name: "Nil withdrawal request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
|
||||
},
|
||||
errMsg: "nil withdrawal request",
|
||||
},
|
||||
{
|
||||
name: "Nil consolidation request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
|
||||
},
|
||||
errMsg: "nil consolidation request",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
|
||||
_, err = electra.ProcessOperations(t.Context(), st, b.Block())
|
||||
require.ErrorContains(t, tc.errMsg, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,8 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"electra.go",
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"skip_slot_cache.go",
|
||||
"state.go",
|
||||
@@ -62,6 +64,8 @@ go_test(
|
||||
"altair_transition_no_verify_sig_test.go",
|
||||
"bellatrix_transition_no_verify_sig_test.go",
|
||||
"benchmarks_test.go",
|
||||
"electra_test.go",
|
||||
"exports_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"state_fuzz_test.go",
|
||||
"state_test.go",
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package electra
|
||||
package transition
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
@@ -47,7 +48,7 @@ var (
|
||||
// # [New in Electra:EIP7251]
|
||||
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||
|
||||
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
func electraOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
@@ -63,59 +64,60 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
st, err = blocks.ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
st, err = blocks.ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
st, err = electra.ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
}
|
||||
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
if _, err := electra.ProcessDeposits(ctx, st, bb.Deposits()); err != nil {
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
st, err = blocks.ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessBLSToExecutionChanges(st, block)
|
||||
st, err = blocks.ProcessBLSToExecutionChanges(st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process bls-to-execution changes")
|
||||
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
|
||||
}
|
||||
// new in electra
|
||||
requests, err := bb.ExecutionRequests()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution requests")
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not get execution requests").Error())
|
||||
}
|
||||
for _, d := range requests.Deposits {
|
||||
if d == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
return nil, electra.NewExecReqError("nil deposit request")
|
||||
}
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
st, err = electra.ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process deposit requests")}
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process deposit requests").Error())
|
||||
}
|
||||
|
||||
for _, w := range requests.Withdrawals {
|
||||
if w == nil {
|
||||
return nil, errors.New("nil withdrawal request")
|
||||
return nil, electra.NewExecReqError("nil withdrawal request")
|
||||
}
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
st, err = electra.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process withdrawal requests")}
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process withdrawal requests").Error())
|
||||
}
|
||||
for _, c := range requests.Consolidations {
|
||||
if c == nil {
|
||||
return nil, errors.New("nil consolidation request")
|
||||
return nil, electra.NewExecReqError("nil consolidation request")
|
||||
}
|
||||
}
|
||||
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process consolidation requests")}
|
||||
if err := electra.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process consolidation requests").Error())
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
216
beacon-chain/core/transition/electra_test.go
Normal file
216
beacon-chain/core/transition/electra_test.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessOperationsWithNilRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "Nil deposit request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
|
||||
},
|
||||
errMsg: "nil deposit request",
|
||||
},
|
||||
{
|
||||
name: "Nil withdrawal request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
|
||||
},
|
||||
errMsg: "nil withdrawal request",
|
||||
},
|
||||
{
|
||||
name: "Nil consolidation request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
|
||||
},
|
||||
errMsg: "nil consolidation request",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
|
||||
_, err = transition.ElectraOperations(t.Context(), st, b.Block())
|
||||
require.ErrorContains(t, tc.errMsg, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestElectraOperations_ProcessingErrors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blk *ethpb.SignedBeaconBlockElectra)
|
||||
errCheck func(t *testing.T, err error)
|
||||
}{
|
||||
{
|
||||
name: "ErrProcessProposerSlashingsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create invalid proposer slashing with out-of-bounds proposer index
|
||||
blk.Block.Body.ProposerSlashings = []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 999999, // Invalid index (out of bounds)
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 999999,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process proposer slashings failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessProposerSlashingsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessAttestationsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create attestation with invalid committee index
|
||||
blk.Block.Body.Attestations = []*ethpb.AttestationElectra{
|
||||
{
|
||||
AggregationBits: []byte{0b00000001},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 999999, // Invalid committee index
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
CommitteeBits: []byte{0b00000001},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process attestations failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessAttestationsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessDepositsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create deposit with invalid proof length
|
||||
blk.Block.Body.Deposits = []*ethpb.Deposit{
|
||||
{
|
||||
Proof: [][]byte{}, // Invalid: empty proof
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 32000000000, // 32 ETH in Gwei
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process deposits failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessDepositsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessVoluntaryExitsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create voluntary exit with invalid validator index
|
||||
blk.Block.Body.VoluntaryExits = []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 0,
|
||||
ValidatorIndex: 999999, // Invalid index (out of bounds)
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process voluntary exits failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessVoluntaryExitsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessBLSChangesFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create BLS to execution change with invalid validator index
|
||||
blk.Block.Body.BlsToExecutionChanges = []*ethpb.SignedBLSToExecutionChange{
|
||||
{
|
||||
Message: ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: 999999, // Invalid index (out of bounds)
|
||||
FromBlsPubkey: make([]byte, 48),
|
||||
ToExecutionAddress: make([]byte, 20),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process BLS to execution changes failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessBLSChangesFailed))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(1)))
|
||||
|
||||
_, err = transition.ElectraOperations(ctx, st, b.Block())
|
||||
require.NotNil(t, err, "Expected an error but got nil")
|
||||
tc.errCheck(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
19
beacon-chain/core/transition/errors.go
Normal file
19
beacon-chain/core/transition/errors.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package transition
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrAttestationsSignatureInvalid = errors.New("attestations signature invalid")
|
||||
ErrRandaoSignatureInvalid = errors.New("randao signature invalid")
|
||||
ErrBLSToExecutionChangesSignatureInvalid = errors.New("BLS to execution changes signature invalid")
|
||||
ErrProcessWithdrawalsFailed = errors.New("process withdrawals failed")
|
||||
ErrProcessRandaoFailed = errors.New("process randao failed")
|
||||
ErrProcessEth1DataFailed = errors.New("process eth1 data failed")
|
||||
ErrProcessProposerSlashingsFailed = errors.New("process proposer slashings failed")
|
||||
ErrProcessAttesterSlashingsFailed = errors.New("process attester slashings failed")
|
||||
ErrProcessAttestationsFailed = errors.New("process attestations failed")
|
||||
ErrProcessDepositsFailed = errors.New("process deposits failed")
|
||||
ErrProcessVoluntaryExitsFailed = errors.New("process voluntary exits failed")
|
||||
ErrProcessBLSChangesFailed = errors.New("process BLS to execution changes failed")
|
||||
ErrProcessSyncAggregateFailed = errors.New("process sync aggregate failed")
|
||||
)
|
||||
3
beacon-chain/core/transition/exports_test.go
Normal file
3
beacon-chain/core/transition/exports_test.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package transition
|
||||
|
||||
var ElectraOperations = electraOperations
|
||||
@@ -7,12 +7,11 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
|
||||
b "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
@@ -70,10 +69,11 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
}
|
||||
|
||||
// Execute per block transition.
|
||||
set, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
sigSlice, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
set := sigSlice.Batch()
|
||||
|
||||
// State root validation.
|
||||
postStateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -113,7 +113,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
// assert block.state_root == hash_tree_root(state)
|
||||
func CalculateStateRoot(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
rollback state.BeaconState,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.CalculateStateRoot")
|
||||
@@ -122,7 +122,7 @@ func CalculateStateRoot(
|
||||
tracing.AnnotateError(span, ctx.Err())
|
||||
return [32]byte{}, ctx.Err()
|
||||
}
|
||||
if state == nil || state.IsNil() {
|
||||
if rollback == nil || rollback.IsNil() {
|
||||
return [32]byte{}, errors.New("nil state")
|
||||
}
|
||||
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
|
||||
@@ -130,7 +130,7 @@ func CalculateStateRoot(
|
||||
}
|
||||
|
||||
// Copy state to avoid mutating the state reference.
|
||||
state = state.Copy()
|
||||
state := rollback.Copy()
|
||||
|
||||
// Execute per slots transition.
|
||||
var err error
|
||||
@@ -141,12 +141,101 @@ func CalculateStateRoot(
|
||||
}
|
||||
|
||||
// Execute per block transition.
|
||||
state, err = ProcessBlockForStateRoot(ctx, state, signed)
|
||||
if features.Get().EnableProposerPreprocessing {
|
||||
state, err = processBlockForProposing(ctx, state, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block for proposing")
|
||||
}
|
||||
} else {
|
||||
state, err = ProcessBlockForStateRoot(ctx, state, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
}
|
||||
return state.HashTreeRoot(ctx)
|
||||
}
|
||||
|
||||
// processBlockVerifySigs processes the block and verifies the signatures within it. Block signatures are not verified as this block is not yet signed.
|
||||
func processBlockForProposing(ctx context.Context, st state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
var set BlockSignatureBatches
|
||||
set, st, err = ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block")
|
||||
return nil, err
|
||||
}
|
||||
// We first try to verify all sigantures batched optimistically. We ignore block proposer signature.
|
||||
sigSet := set.Batch()
|
||||
valid, err := sigSet.Verify()
|
||||
if err != nil || valid {
|
||||
return st, err
|
||||
}
|
||||
// Some signature failed to verify.
|
||||
// Verify Attestations signatures
|
||||
attSigs := set.AttestationSignatures
|
||||
if attSigs == nil {
|
||||
return nil, ErrAttestationsSignatureInvalid
|
||||
}
|
||||
valid, err = attSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrAttestationsSignatureInvalid
|
||||
}
|
||||
|
||||
return state.HashTreeRoot(ctx)
|
||||
// Verify Randao signature
|
||||
randaoSigs := set.RandaoSignatures
|
||||
if randaoSigs == nil {
|
||||
return nil, ErrRandaoSignatureInvalid
|
||||
}
|
||||
valid, err = randaoSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrRandaoSignatureInvalid
|
||||
}
|
||||
|
||||
if signed.Block().Version() < version.Capella {
|
||||
//This should not happen as we must have failed one of the above signatures.
|
||||
return st, nil
|
||||
}
|
||||
// Verify BLS to execution changes signatures
|
||||
blsChangeSigs := set.BLSChangeSignatures
|
||||
if blsChangeSigs == nil {
|
||||
return nil, ErrBLSToExecutionChangesSignatureInvalid
|
||||
}
|
||||
valid, err = blsChangeSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrBLSToExecutionChangesSignatureInvalid
|
||||
}
|
||||
// We should not reach this point as one of the above signatures must have failed.
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatches holds the signature batches for different parts of a beacon block.
|
||||
type BlockSignatureBatches struct {
|
||||
RandaoSignatures *bls.SignatureBatch
|
||||
AttestationSignatures *bls.SignatureBatch
|
||||
BLSChangeSignatures *bls.SignatureBatch
|
||||
}
|
||||
|
||||
// Batch returns the batch of signature batches in the BlockSignatureBatches.
|
||||
func (b BlockSignatureBatches) Batch() *bls.SignatureBatch {
|
||||
sigs := bls.NewSet()
|
||||
if b.RandaoSignatures != nil {
|
||||
sigs.Join(b.RandaoSignatures)
|
||||
}
|
||||
if b.AttestationSignatures != nil {
|
||||
sigs.Join(b.AttestationSignatures)
|
||||
}
|
||||
if b.BLSChangeSignatures != nil {
|
||||
sigs.Join(b.BLSChangeSignatures)
|
||||
}
|
||||
return sigs
|
||||
}
|
||||
|
||||
// ProcessBlockNoVerifyAnySig creates a new, modified beacon state by applying block operation
|
||||
@@ -165,48 +254,48 @@ func ProcessBlockNoVerifyAnySig(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (*bls.SignatureBatch, state.BeaconState, error) {
|
||||
) (BlockSignatureBatches, state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
|
||||
defer span.End()
|
||||
set := BlockSignatureBatches{}
|
||||
if err := blocks.BeaconBlockIsNil(signed); err != nil {
|
||||
return nil, nil, err
|
||||
return set, nil, err
|
||||
}
|
||||
|
||||
if st.Version() != signed.Block().Version() {
|
||||
return nil, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
|
||||
return set, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
|
||||
}
|
||||
|
||||
blk := signed.Block()
|
||||
st, err := ProcessBlockForStateRoot(ctx, st, signed)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return set, nil, err
|
||||
}
|
||||
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
return set, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
}
|
||||
set.RandaoSignatures = rSet
|
||||
aSet, err := b.AttestationSignatureBatch(ctx, st, signed.Block().Body().Attestations())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
return set, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
}
|
||||
set.AttestationSignatures = aSet
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
cSet, err := b.BLSChangesSignatureBatch(st, changes)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
|
||||
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
|
||||
}
|
||||
set.Join(cSet)
|
||||
set.BLSChangeSignatures = cSet
|
||||
}
|
||||
return set, st, nil
|
||||
}
|
||||
@@ -268,7 +357,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
state, err = electra.ProcessOperations(ctx, state, beaconBlock)
|
||||
state, err = electraOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -326,7 +415,7 @@ func ProcessBlockForStateRoot(
|
||||
if state.Version() >= version.Capella {
|
||||
state, err = b.ProcessWithdrawals(state, executionData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
return nil, errors.Wrap(ErrProcessWithdrawalsFailed, err.Error())
|
||||
}
|
||||
}
|
||||
if err = b.ProcessPayload(state, blk.Body()); err != nil {
|
||||
@@ -338,13 +427,13 @@ func ProcessBlockForStateRoot(
|
||||
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not verify and process randao")
|
||||
return nil, errors.Wrap(ErrProcessRandaoFailed, err.Error())
|
||||
}
|
||||
|
||||
state, err = b.ProcessEth1DataInBlock(ctx, state, signed.Block().Body().Eth1Data())
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process eth1 data")
|
||||
return nil, errors.Wrap(ErrProcessEth1DataFailed, err.Error())
|
||||
}
|
||||
|
||||
state, err = ProcessOperationsNoVerifyAttsSigs(ctx, state, signed.Block())
|
||||
@@ -363,7 +452,7 @@ func ProcessBlockForStateRoot(
|
||||
}
|
||||
state, _, err = altair.ProcessSyncAggregate(ctx, state, sa)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "process_sync_aggregate failed")
|
||||
return nil, errors.Wrap(ErrProcessSyncAggregateFailed, err.Error())
|
||||
}
|
||||
|
||||
return state, nil
|
||||
@@ -379,31 +468,35 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
exitInfo := &validators.ExitInfo{}
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = altair.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
}
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
}
|
||||
return b.ProcessBLSToExecutionChanges(st, beaconBlock)
|
||||
st, err = b.ProcessBLSToExecutionChanges(st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// This calls phase 0 block operations.
|
||||
@@ -411,32 +504,32 @@ func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
var err error
|
||||
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
|
||||
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
|
||||
var exitInfo *v.ExitInfo
|
||||
var exitInfo *validators.ExitInfo
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attestations")
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
}
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposits")
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -132,7 +132,8 @@ func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
// Test Signature set verifies.
|
||||
verified, err := set.Verify()
|
||||
sigSet := set.Batch()
|
||||
verified, err := sigSet.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Could not verify signature set.")
|
||||
}
|
||||
@@ -145,7 +146,8 @@ func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
sigSet := set.Batch()
|
||||
verified, err := sigSet.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, "Could not verify signature set")
|
||||
}
|
||||
@@ -154,8 +156,9 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
beaconState, block, _, _, _ := createFullBlockWithOperations(t)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
signatures, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
set := signatures.Batch()
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
|
||||
@@ -602,7 +602,7 @@ func (vs *Server) GetFeeRecipientByPubKey(ctx context.Context, request *ethpb.Fe
|
||||
|
||||
// computeStateRoot computes the state root after a block has been processed through a state transition and
|
||||
// returns it to the validator client.
|
||||
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedBeaconBlock) ([]byte, error) {
|
||||
beaconState, err := vs.StateGen.StateByRoot(ctx, block.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve beacon state")
|
||||
@@ -613,13 +613,72 @@ func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.ReadOnl
|
||||
block,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not calculate state root at slot %d", beaconState.Slot())
|
||||
return vs.handleStateRootError(ctx, block, err)
|
||||
}
|
||||
|
||||
log.WithField("beaconStateRoot", fmt.Sprintf("%#x", root)).Debugf("Computed state root")
|
||||
return root[:], nil
|
||||
}
|
||||
|
||||
type computeStateRootAttemptsKeyType string
|
||||
|
||||
const computeStateRootAttemptsKey = computeStateRootAttemptsKeyType("compute-state-root-attempts")
|
||||
const maxComputeStateRootAttempts = 3
|
||||
|
||||
// handleStateRootError retries block construction in some error cases.
|
||||
func (vs *Server) handleStateRootError(ctx context.Context, block interfaces.SignedBeaconBlock, err error) ([]byte, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, status.Errorf(codes.Canceled, "context error: %v", ctx.Err())
|
||||
}
|
||||
switch {
|
||||
case errors.Is(err, transition.ErrAttestationsSignatureInvalid),
|
||||
errors.Is(err, transition.ErrProcessAttestationsFailed):
|
||||
log.WithError(err).Warn("Retrying block construction without attestations")
|
||||
if err := block.SetAttestations([]ethpb.Att{}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set attestations")
|
||||
}
|
||||
case errors.Is(err, transition.ErrProcessBLSChangesFailed), errors.Is(err, transition.ErrBLSToExecutionChangesSignatureInvalid):
|
||||
log.WithError(err).Warn("Retrying block construction without BLS to execution changes")
|
||||
if err := block.SetBLSToExecutionChanges([]*ethpb.SignedBLSToExecutionChange{}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set BLS to execution changes")
|
||||
}
|
||||
case errors.Is(err, transition.ErrProcessProposerSlashingsFailed):
|
||||
log.WithError(err).Warn("Retrying block construction without proposer slashings")
|
||||
block.SetProposerSlashings([]*ethpb.ProposerSlashing{})
|
||||
case errors.Is(err, transition.ErrProcessAttesterSlashingsFailed):
|
||||
log.WithError(err).Warn("Retrying block construction without attester slashings")
|
||||
if err := block.SetAttesterSlashings([]ethpb.AttSlashing{}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set attester slashings")
|
||||
}
|
||||
case errors.Is(err, transition.ErrProcessVoluntaryExitsFailed):
|
||||
log.WithError(err).Warn("Retrying block construction without voluntary exits")
|
||||
block.SetVoluntaryExits([]*ethpb.SignedVoluntaryExit{})
|
||||
case errors.Is(err, transition.ErrProcessSyncAggregateFailed):
|
||||
log.WithError(err).Warn("Retrying block construction without sync aggregate")
|
||||
emptySig := [96]byte{0xC0}
|
||||
emptyAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize/8),
|
||||
SyncCommitteeSignature: emptySig[:],
|
||||
}
|
||||
if err := block.SetSyncAggregate(emptyAggregate); err != nil {
|
||||
log.WithError(err).Error("Could not set sync aggregate")
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not compute state root")
|
||||
}
|
||||
// prevent deep recursion by limiting max attempts.
|
||||
if v, ok := ctx.Value(computeStateRootAttemptsKey).(int); !ok {
|
||||
ctx = context.WithValue(ctx, computeStateRootAttemptsKey, int(1))
|
||||
} else if v >= maxComputeStateRootAttempts {
|
||||
return nil, fmt.Errorf("attempted max compute state root attempts %d", maxComputeStateRootAttempts)
|
||||
} else {
|
||||
ctx = context.WithValue(ctx, computeStateRootAttemptsKey, v+1)
|
||||
}
|
||||
// recursive call to compute state root again
|
||||
return vs.computeStateRoot(ctx, block)
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// SubmitValidatorRegistrations submits validator registrations.
|
||||
|
||||
@@ -1313,6 +1313,59 @@ func TestProposer_ComputeStateRoot_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestHandleStateRootError_MaxAttemptsReached(t *testing.T) {
|
||||
// Test that handleStateRootError returns an error when max attempts is reached
|
||||
// instead of recursing infinitely.
|
||||
ctx := t.Context()
|
||||
vs := &Server{}
|
||||
|
||||
// Create a minimal block for testing
|
||||
blk := util.NewBeaconBlock()
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Pre-seed the context with max attempts already reached
|
||||
ctx = context.WithValue(ctx, computeStateRootAttemptsKey, maxComputeStateRootAttempts)
|
||||
|
||||
// Call handleStateRootError with a retryable error
|
||||
_, err = vs.handleStateRootError(ctx, wsb, transition.ErrAttestationsSignatureInvalid)
|
||||
|
||||
// Should return an error about max attempts instead of recursing
|
||||
require.ErrorContains(t, "attempted max compute state root attempts", err)
|
||||
}
|
||||
|
||||
func TestHandleStateRootError_IncrementsAttempts(t *testing.T) {
|
||||
// Test that handleStateRootError properly increments the attempts counter
|
||||
// and eventually fails after max attempts.
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
beaconState, parentRoot, _ := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 100)
|
||||
|
||||
stateGen := stategen.New(db, doublylinkedtree.New())
|
||||
vs := &Server{
|
||||
StateGen: stateGen,
|
||||
}
|
||||
|
||||
// Create a block that will trigger retries
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
blk.Block.Slot = 1
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add a state for the parent root so StateByRoot succeeds
|
||||
require.NoError(t, stateGen.SaveState(ctx, parentRoot, beaconState))
|
||||
|
||||
// Call handleStateRootError with a retryable error - it will recurse
|
||||
// but eventually hit the max attempts limit since CalculateStateRoot
|
||||
// will keep failing (no valid attestations, randao, etc.)
|
||||
_, err = vs.handleStateRootError(ctx, wsb, transition.ErrAttestationsSignatureInvalid)
|
||||
|
||||
// Should eventually fail - either with max attempts or another error
|
||||
require.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestProposer_PendingDeposits_Eth1DataVoteOK(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/state/state-native/custom-types:go_default_library",
|
||||
"//beacon-chain/state/state-native/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
customtypes "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/custom-types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -43,6 +44,8 @@ type Prover interface {
|
||||
FinalizedRootProof(ctx context.Context) ([][]byte, error)
|
||||
CurrentSyncCommitteeProof(ctx context.Context) ([][]byte, error)
|
||||
NextSyncCommitteeProof(ctx context.Context) ([][]byte, error)
|
||||
|
||||
ProofByFieldIndex(ctx context.Context, f types.FieldIndex) ([][]byte, error)
|
||||
}
|
||||
|
||||
// ReadOnlyBeaconState defines a struct which only has read access to beacon state methods.
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
@@ -39,42 +40,12 @@ func (b *BeaconState) NextSyncCommitteeGeneralizedIndex() (uint64, error) {
|
||||
|
||||
// CurrentSyncCommitteeProof from the state's Merkle trie representation.
|
||||
func (b *BeaconState) CurrentSyncCommitteeProof(ctx context.Context) ([][]byte, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version == version.Phase0 {
|
||||
return nil, errNotSupported("CurrentSyncCommitteeProof", b.version)
|
||||
}
|
||||
|
||||
// In case the Merkle layers of the trie are not populated, we need
|
||||
// to perform some initialization.
|
||||
if err := b.initializeMerkleLayers(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Our beacon state uses a "dirty" fields pattern which requires us to
|
||||
// recompute branches of the Merkle layers that are marked as dirty.
|
||||
if err := b.recomputeDirtyFields(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return trie.ProofFromMerkleLayers(b.merkleLayers, types.CurrentSyncCommittee.RealPosition()), nil
|
||||
return b.ProofByFieldIndex(ctx, types.CurrentSyncCommittee)
|
||||
}
|
||||
|
||||
// NextSyncCommitteeProof from the state's Merkle trie representation.
|
||||
func (b *BeaconState) NextSyncCommitteeProof(ctx context.Context) ([][]byte, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version == version.Phase0 {
|
||||
return nil, errNotSupported("NextSyncCommitteeProof", b.version)
|
||||
}
|
||||
|
||||
if err := b.initializeMerkleLayers(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := b.recomputeDirtyFields(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return trie.ProofFromMerkleLayers(b.merkleLayers, types.NextSyncCommittee.RealPosition()), nil
|
||||
return b.ProofByFieldIndex(ctx, types.NextSyncCommittee)
|
||||
}
|
||||
|
||||
// FinalizedRootProof crafts a Merkle proof for the finalized root
|
||||
@@ -83,8 +54,37 @@ func (b *BeaconState) FinalizedRootProof(ctx context.Context) ([][]byte, error)
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version == version.Phase0 {
|
||||
return nil, errNotSupported("FinalizedRootProof", b.version)
|
||||
branchProof, err := b.proofByFieldIndex(ctx, types.FinalizedCheckpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The epoch field of a finalized checkpoint is the neighbor
|
||||
// index of the finalized root field in its Merkle tree representation
|
||||
// of the checkpoint. This neighbor is the first element added to the proof.
|
||||
epochBuf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(epochBuf, uint64(b.finalizedCheckpointVal().Epoch))
|
||||
epochRoot := bytesutil.ToBytes32(epochBuf)
|
||||
proof := make([][]byte, 0)
|
||||
proof = append(proof, epochRoot[:])
|
||||
proof = append(proof, branchProof...)
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
// ProofByFieldIndex constructs proofs for given field index with lock acquisition.
|
||||
func (b *BeaconState) ProofByFieldIndex(ctx context.Context, f types.FieldIndex) ([][]byte, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
return b.proofByFieldIndex(ctx, f)
|
||||
}
|
||||
|
||||
// proofByFieldIndex constructs proofs for given field index.
|
||||
// Important: it is assumed that beacon state mutex is locked when calling this method.
|
||||
func (b *BeaconState) proofByFieldIndex(ctx context.Context, f types.FieldIndex) ([][]byte, error) {
|
||||
err := b.validateFieldIndex(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := b.initializeMerkleLayers(ctx); err != nil {
|
||||
@@ -93,16 +93,40 @@ func (b *BeaconState) FinalizedRootProof(ctx context.Context) ([][]byte, error)
|
||||
if err := b.recomputeDirtyFields(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpt := b.finalizedCheckpointVal()
|
||||
// The epoch field of a finalized checkpoint is the neighbor
|
||||
// index of the finalized root field in its Merkle tree representation
|
||||
// of the checkpoint. This neighbor is the first element added to the proof.
|
||||
epochBuf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(epochBuf, uint64(cpt.Epoch))
|
||||
epochRoot := bytesutil.ToBytes32(epochBuf)
|
||||
proof := make([][]byte, 0)
|
||||
proof = append(proof, epochRoot[:])
|
||||
branch := trie.ProofFromMerkleLayers(b.merkleLayers, types.FinalizedCheckpoint.RealPosition())
|
||||
proof = append(proof, branch...)
|
||||
return proof, nil
|
||||
return trie.ProofFromMerkleLayers(b.merkleLayers, f.RealPosition()), nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) validateFieldIndex(f types.FieldIndex) error {
|
||||
switch b.version {
|
||||
case version.Phase0:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Altair:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateAltairFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Bellatrix:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateBellatrixFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Capella:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateCapellaFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Deneb:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateDenebFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Electra:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateElectraFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
case version.Fulu:
|
||||
if f.RealPosition() > params.BeaconConfig().BeaconStateFuluFieldCount-1 {
|
||||
return errNotSupported(f.String(), b.version)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -21,10 +21,6 @@ func TestBeaconStateMerkleProofs_phase0_notsupported(t *testing.T) {
|
||||
_, err := st.NextSyncCommitteeProof(ctx)
|
||||
require.ErrorContains(t, "not supported", err)
|
||||
})
|
||||
t.Run("finalized root", func(t *testing.T) {
|
||||
_, err := st.FinalizedRootProof(ctx)
|
||||
require.ErrorContains(t, "not supported", err)
|
||||
})
|
||||
}
|
||||
func TestBeaconStateMerkleProofs_altair(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- post electra we now call attestation data once per slot and use a cache for subsequent requests
|
||||
3
changelog/potuz_preprocess_block_before_processing.md
Normal file
3
changelog/potuz_preprocess_block_before_processing.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add feature flag `--enable-proposer-preprocessing` to process the block and verify signatures before proposing.
|
||||
2
changelog/potuz_update_spectests_2.md
Normal file
2
changelog/potuz_update_spectests_2.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Update spectests to v1.7.0-alpha-1.
|
||||
3
changelog/syjn99_generalize-proof-generation.md
Normal file
3
changelog/syjn99_generalize-proof-generation.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add `ProofByFieldIndex` to generalize merkle proof generation for `BeaconState`.
|
||||
@@ -71,6 +71,7 @@ type Flags struct {
|
||||
IgnoreUnviableAttestations bool // Ignore attestations whose target state is not viable (avoids lagging-node DoS).
|
||||
|
||||
EnableVerboseSigVerification bool // EnableVerboseSigVerification specifies whether to verify individual signature if batch verification fails
|
||||
EnableProposerPreprocessing bool // EnableProposerPreprocessing enables proposer pre-processing of blocks before proposing.
|
||||
|
||||
PrepareAllPayloads bool // PrepareAllPayloads informs the engine to prepare a block on every slot.
|
||||
// BlobSaveFsync requires blob saving to block on fsync to ensure blobs are durably persisted before passing DA.
|
||||
@@ -80,7 +81,6 @@ type Flags struct {
|
||||
SaveInvalidBlob bool // SaveInvalidBlob saves invalid blob to temp.
|
||||
|
||||
EnableDiscoveryReboot bool // EnableDiscoveryReboot allows the node to have its local listener to be rebooted in the event of discovery issues.
|
||||
LowValcountSweep bool // LowValcountSweep bounds withdrawal sweep by validator count.
|
||||
|
||||
// KeystoreImportDebounceInterval specifies the time duration the validator waits to reload new keys if they have
|
||||
// changed on disk. This feature is for advanced use cases only.
|
||||
@@ -242,6 +242,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(disableVerboseSigVerification)
|
||||
cfg.EnableVerboseSigVerification = false
|
||||
}
|
||||
cfg.EnableProposerPreprocessing = ctx.Bool(enableProposerPreprocessing.Name)
|
||||
if cfg.EnableProposerPreprocessing {
|
||||
logEnabled(enableProposerPreprocessing)
|
||||
}
|
||||
if ctx.IsSet(prepareAllPayloads.Name) {
|
||||
logEnabled(prepareAllPayloads)
|
||||
cfg.PrepareAllPayloads = true
|
||||
@@ -285,11 +289,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(ignoreUnviableAttestations)
|
||||
cfg.IgnoreUnviableAttestations = true
|
||||
}
|
||||
if ctx.Bool(lowValcountSweep.Name) {
|
||||
logEnabled(lowValcountSweep)
|
||||
cfg.LowValcountSweep = true
|
||||
}
|
||||
|
||||
if ctx.IsSet(EnableStateDiff.Name) {
|
||||
logEnabled(EnableStateDiff)
|
||||
cfg.EnableStateDiff = true
|
||||
|
||||
@@ -137,6 +137,11 @@ var (
|
||||
Name: "disable-verbose-sig-verification",
|
||||
Usage: "Disables identifying invalid signatures if batch verification fails when processing block.",
|
||||
}
|
||||
enableProposerPreprocessing = &cli.BoolFlag{
|
||||
Name: "enable-proposer-preprocessing",
|
||||
Usage: "Enables proposer pre-processing of blocks before proposing.",
|
||||
Value: false,
|
||||
}
|
||||
prepareAllPayloads = &cli.BoolFlag{
|
||||
Name: "prepare-all-payloads",
|
||||
Usage: "Informs the engine to prepare all local payloads. Useful for relayers and builders.",
|
||||
@@ -211,13 +216,6 @@ var (
|
||||
Name: "ignore-unviable-attestations",
|
||||
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
|
||||
}
|
||||
// lowValcountSweep bounds withdrawal sweep by validator count.
|
||||
lowValcountSweep = &cli.BoolFlag{
|
||||
Name: "low-valcount-sweep",
|
||||
Usage: "Uses validator count bound for withdrawal sweep when validator count is less than MaxValidatorsPerWithdrawalsSweep.",
|
||||
Value: false,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
@@ -266,6 +264,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
ignoreUnviableAttestations,
|
||||
enableFullSSZDataLogging,
|
||||
disableVerboseSigVerification,
|
||||
enableProposerPreprocessing,
|
||||
prepareAllPayloads,
|
||||
aggregateFirstInterval,
|
||||
aggregateSecondInterval,
|
||||
@@ -279,7 +278,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
enableExperimentalAttestationPool,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
lowValcountSweep,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v1.7.0-alpha.0
|
||||
version: v1.7.0-alpha.1
|
||||
style: full
|
||||
|
||||
specrefs:
|
||||
@@ -117,6 +117,8 @@ exceptions:
|
||||
dataclasses:
|
||||
# Not implemented
|
||||
- BlobParameters#fulu
|
||||
- ExpectedWithdrawals#capella
|
||||
- ExpectedWithdrawals#electra
|
||||
- LatestMessage#phase0
|
||||
- LightClientStore#altair
|
||||
- OptimisticStore#bellatrix
|
||||
@@ -126,6 +128,7 @@ exceptions:
|
||||
- LightClientStore#capella
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- ExpectedWithdrawals#gloas
|
||||
- LatestMessage#gloas
|
||||
- Store#gloas
|
||||
|
||||
|
||||
@@ -34,6 +34,26 @@
|
||||
blobs: List[Blob, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
</spec>
|
||||
|
||||
- name: ExpectedWithdrawals#capella
|
||||
sources: []
|
||||
spec: |
|
||||
<spec dataclass="ExpectedWithdrawals" fork="capella" hash="73becb4c">
|
||||
class ExpectedWithdrawals(object):
|
||||
withdrawals: Sequence[Withdrawal]
|
||||
processed_sweep_withdrawals_count: uint64
|
||||
</spec>
|
||||
|
||||
- name: ExpectedWithdrawals#electra
|
||||
sources: []
|
||||
spec: |
|
||||
<spec dataclass="ExpectedWithdrawals" fork="electra" hash="a3827f01">
|
||||
class ExpectedWithdrawals(object):
|
||||
withdrawals: Sequence[Withdrawal]
|
||||
# [New in Electra:EIP7251]
|
||||
processed_partial_withdrawals_count: uint64
|
||||
processed_sweep_withdrawals_count: uint64
|
||||
</spec>
|
||||
|
||||
- name: GetPayloadResponse#bellatrix
|
||||
sources:
|
||||
- file: consensus-types/blocks/get_payload.go
|
||||
|
||||
@@ -2366,8 +2366,8 @@
|
||||
- file: beacon-chain/state/state-native/getters_withdrawal.go
|
||||
search: func (b *BeaconState) ExpectedWithdrawals(
|
||||
spec: |
|
||||
<spec fn="get_expected_withdrawals" fork="capella" hash="d6a98c14">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]:
|
||||
<spec fn="get_expected_withdrawals" fork="capella" hash="12088347">
|
||||
def get_expected_withdrawals(state: BeaconState) -> ExpectedWithdrawals:
|
||||
withdrawal_index = state.next_withdrawal_index
|
||||
withdrawals: List[Withdrawal] = []
|
||||
|
||||
@@ -2377,7 +2377,10 @@
|
||||
)
|
||||
withdrawals.extend(validators_sweep_withdrawals)
|
||||
|
||||
return withdrawals, processed_validators_sweep_count
|
||||
return ExpectedWithdrawals(
|
||||
withdrawals,
|
||||
processed_validators_sweep_count,
|
||||
)
|
||||
</spec>
|
||||
|
||||
- name: get_expected_withdrawals#electra
|
||||
@@ -2385,8 +2388,8 @@
|
||||
- file: beacon-chain/state/state-native/getters_withdrawal.go
|
||||
search: func (b *BeaconState) ExpectedWithdrawals(
|
||||
spec: |
|
||||
<spec fn="get_expected_withdrawals" fork="electra" hash="cfce862b">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64, uint64]:
|
||||
<spec fn="get_expected_withdrawals" fork="electra" hash="254541e3">
|
||||
def get_expected_withdrawals(state: BeaconState) -> ExpectedWithdrawals:
|
||||
withdrawal_index = state.next_withdrawal_index
|
||||
withdrawals: List[Withdrawal] = []
|
||||
|
||||
@@ -2403,8 +2406,12 @@
|
||||
)
|
||||
withdrawals.extend(validators_sweep_withdrawals)
|
||||
|
||||
# [Modified in Electra:EIP7251]
|
||||
return withdrawals, processed_partial_withdrawals_count, processed_validators_sweep_count
|
||||
return ExpectedWithdrawals(
|
||||
withdrawals,
|
||||
# [New in Electra:EIP7251]
|
||||
processed_partial_withdrawals_count,
|
||||
processed_validators_sweep_count,
|
||||
)
|
||||
</spec>
|
||||
|
||||
- name: get_filtered_block_tree
|
||||
@@ -4932,7 +4939,7 @@
|
||||
- name: prepare_execution_payload#capella
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="capella" hash="c258893e">
|
||||
<spec fn="prepare_execution_payload" fork="capella" hash="998e8b92">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -4945,15 +4952,12 @@
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
# [New in Capella]
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
# [New in Capella]
|
||||
withdrawals=withdrawals,
|
||||
withdrawals=get_expected_withdrawals(state).withdrawals,
|
||||
)
|
||||
return execution_engine.notify_forkchoice_updated(
|
||||
head_block_hash=parent_hash,
|
||||
@@ -4966,7 +4970,7 @@
|
||||
- name: prepare_execution_payload#deneb
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="deneb" hash="59f61f3a">
|
||||
<spec fn="prepare_execution_payload" fork="deneb" hash="c617aa1b">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -4978,13 +4982,11 @@
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
withdrawals=withdrawals,
|
||||
withdrawals=get_expected_withdrawals(state).withdrawals,
|
||||
# [New in Deneb:EIP4788]
|
||||
parent_beacon_block_root=hash_tree_root(state.latest_block_header),
|
||||
)
|
||||
@@ -4999,7 +5001,7 @@
|
||||
- name: prepare_execution_payload#electra
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="electra" hash="5414b883">
|
||||
<spec fn="prepare_execution_payload" fork="electra" hash="c617aa1b">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -5010,15 +5012,13 @@
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# [Modified in EIP7251]
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
withdrawals, _, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
withdrawals=withdrawals,
|
||||
withdrawals=get_expected_withdrawals(state).withdrawals,
|
||||
# [New in Deneb:EIP4788]
|
||||
parent_beacon_block_root=hash_tree_root(state.latest_block_header),
|
||||
)
|
||||
return execution_engine.notify_forkchoice_updated(
|
||||
@@ -6272,8 +6272,8 @@
|
||||
|
||||
- name: process_operations#electra
|
||||
sources:
|
||||
- file: beacon-chain/core/electra/transition_no_verify_sig.go
|
||||
search: func ProcessOperations(
|
||||
- file: beacon-chain/core/transition/electra.go
|
||||
search: func electraOperations(
|
||||
spec: |
|
||||
<spec fn="process_operations" fork="electra" hash="643f18b4">
|
||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
@@ -7071,18 +7071,18 @@
|
||||
- file: beacon-chain/core/blocks/withdrawals.go
|
||||
search: func ProcessWithdrawals(
|
||||
spec: |
|
||||
<spec fn="process_withdrawals" fork="capella" hash="901f9fc4">
|
||||
<spec fn="process_withdrawals" fork="capella" hash="47327ef6">
|
||||
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
# Get expected withdrawals
|
||||
withdrawals, processed_validators_sweep_count = get_expected_withdrawals(state)
|
||||
assert payload.withdrawals == withdrawals
|
||||
expected = get_expected_withdrawals(state)
|
||||
assert payload.withdrawals == expected.withdrawals
|
||||
|
||||
# Apply expected withdrawals
|
||||
apply_withdrawals(state, withdrawals)
|
||||
apply_withdrawals(state, expected.withdrawals)
|
||||
|
||||
# Update withdrawals fields in the state
|
||||
update_next_withdrawal_index(state, withdrawals)
|
||||
update_next_withdrawal_validator_index(state, processed_validators_sweep_count)
|
||||
update_next_withdrawal_index(state, expected.withdrawals)
|
||||
update_next_withdrawal_validator_index(state, expected.withdrawals)
|
||||
</spec>
|
||||
|
||||
- name: process_withdrawals#electra
|
||||
@@ -7090,23 +7090,20 @@
|
||||
- file: beacon-chain/core/blocks/withdrawals.go
|
||||
search: func ProcessWithdrawals(
|
||||
spec: |
|
||||
<spec fn="process_withdrawals" fork="electra" hash="67870972">
|
||||
<spec fn="process_withdrawals" fork="electra" hash="24a50daa">
|
||||
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
# [Modified in Electra:EIP7251]
|
||||
# Get expected withdrawals
|
||||
withdrawals, processed_partial_withdrawals_count, processed_validators_sweep_count = (
|
||||
get_expected_withdrawals(state)
|
||||
)
|
||||
assert payload.withdrawals == withdrawals
|
||||
expected = get_expected_withdrawals(state)
|
||||
assert payload.withdrawals == expected.withdrawals
|
||||
|
||||
# Apply expected withdrawals
|
||||
apply_withdrawals(state, withdrawals)
|
||||
apply_withdrawals(state, expected.withdrawals)
|
||||
|
||||
# Update withdrawals fields in the state
|
||||
update_next_withdrawal_index(state, withdrawals)
|
||||
update_next_withdrawal_index(state, expected.withdrawals)
|
||||
# [New in Electra:EIP7251]
|
||||
update_pending_partial_withdrawals(state, processed_partial_withdrawals_count)
|
||||
update_next_withdrawal_validator_index(state, processed_validators_sweep_count)
|
||||
update_pending_partial_withdrawals(state, expected.processed_partial_withdrawals_count)
|
||||
update_next_withdrawal_validator_index(state, expected.withdrawals)
|
||||
</spec>
|
||||
|
||||
- name: queue_excess_active_balance
|
||||
|
||||
@@ -10,7 +10,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/utils",
|
||||
visibility = ["//testing/spectest:__subpackages__"],
|
||||
deps = [
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -6,20 +6,12 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
)
|
||||
|
||||
// SetConfig sets the global params for spec tests depending on the option chosen.
|
||||
// Provides reset function allowing to get back to the previous configuration at the end of a test.
|
||||
func SetConfig(t testing.TB, config string) error {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
resetFeatures := features.InitWithReset(&features.Flags{
|
||||
LowValcountSweep: true,
|
||||
})
|
||||
t.Cleanup(resetFeatures)
|
||||
|
||||
switch config {
|
||||
case "minimal":
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig().Copy())
|
||||
|
||||
@@ -424,7 +424,8 @@ func debugStateTransition(
|
||||
return st, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
var valid bool
|
||||
valid, err = set.VerifyVerbosely()
|
||||
sigSet := set.Batch()
|
||||
valid, err = sigSet.VerifyVerbosely()
|
||||
if err != nil {
|
||||
return st, errors.Wrap(err, "could not batch verify signature")
|
||||
}
|
||||
|
||||
@@ -71,9 +71,17 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
|
||||
return
|
||||
}
|
||||
|
||||
committeeIndex := duty.CommitteeIndex
|
||||
postElectra := slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch
|
||||
if postElectra {
|
||||
committeeIndex = 0
|
||||
}
|
||||
|
||||
data, err := v.getAttestationData(ctx, slot, duty.CommitteeIndex)
|
||||
req := ðpb.AttestationDataRequest{
|
||||
Slot: slot,
|
||||
CommitteeIndex: committeeIndex,
|
||||
}
|
||||
data, err := v.validatorClient.AttestationData(ctx, req)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not request attestation to sign at slot")
|
||||
if v.emitAccountMetrics {
|
||||
|
||||
@@ -112,9 +112,6 @@ type validator struct {
|
||||
blacklistedPubkeysLock sync.RWMutex
|
||||
attSelectionLock sync.Mutex
|
||||
dutiesLock sync.RWMutex
|
||||
attestationDataCacheLock sync.RWMutex
|
||||
attestationDataCache *ethpb.AttestationData
|
||||
attestationDataCacheSlot primitives.Slot
|
||||
disableDutiesPolling bool
|
||||
accountsChangedChannel chan [][fieldparams.BLSPubkeyLength]byte
|
||||
eventsChannel chan *eventClient.Event
|
||||
@@ -980,55 +977,6 @@ func (v *validator) domainData(ctx context.Context, epoch primitives.Epoch, doma
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// getAttestationData fetches attestation data from the beacon node with caching for post-Electra.
|
||||
// Post-Electra, attestation data is identical for all validators in the same slot (committee index is always 0),
|
||||
// so we cache it to avoid redundant beacon node requests.
|
||||
func (v *validator) getAttestationData(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) (*ethpb.AttestationData, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.getAttestationData")
|
||||
defer span.End()
|
||||
|
||||
postElectra := slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch
|
||||
|
||||
// Pre-Electra: no caching since committee index varies per validator
|
||||
if !postElectra {
|
||||
return v.validatorClient.AttestationData(ctx, ðpb.AttestationDataRequest{
|
||||
Slot: slot,
|
||||
CommitteeIndex: committeeIndex,
|
||||
})
|
||||
}
|
||||
|
||||
// Post-Electra: check cache first (committee index is always 0)
|
||||
v.attestationDataCacheLock.RLock()
|
||||
if v.attestationDataCacheSlot == slot && v.attestationDataCache != nil {
|
||||
data := v.attestationDataCache
|
||||
v.attestationDataCacheLock.RUnlock()
|
||||
return data, nil
|
||||
}
|
||||
v.attestationDataCacheLock.RUnlock()
|
||||
|
||||
// Cache miss - acquire write lock and fetch
|
||||
v.attestationDataCacheLock.Lock()
|
||||
defer v.attestationDataCacheLock.Unlock()
|
||||
|
||||
// Double-check after acquiring write lock (another goroutine may have filled the cache)
|
||||
if v.attestationDataCacheSlot == slot && v.attestationDataCache != nil {
|
||||
return v.attestationDataCache, nil
|
||||
}
|
||||
|
||||
data, err := v.validatorClient.AttestationData(ctx, ðpb.AttestationDataRequest{
|
||||
Slot: slot,
|
||||
CommitteeIndex: 0,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v.attestationDataCache = data
|
||||
v.attestationDataCacheSlot = slot
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (v *validator) logDuties(slot primitives.Slot, currentEpochDuties []*ethpb.ValidatorDuty, nextEpochDuties []*ethpb.ValidatorDuty) {
|
||||
attesterKeys := make([][]string, params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := range attesterKeys {
|
||||
|
||||
@@ -2977,207 +2977,3 @@ func TestValidator_CheckDependentRoots(t *testing.T) {
|
||||
require.NoError(t, v.checkDependentRoots(ctx, head))
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetAttestationData_PreElectraNoCaching(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
client := validatormock.NewMockValidatorClient(ctrl)
|
||||
v := &validator{validatorClient: client}
|
||||
|
||||
// Pre-Electra slot (Electra fork epoch is far in the future by default)
|
||||
preElectraSlot := primitives.Slot(10)
|
||||
|
||||
expectedData := ðpb.AttestationData{
|
||||
Slot: preElectraSlot,
|
||||
CommitteeIndex: 5,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
|
||||
}
|
||||
|
||||
// Each call should go to the beacon node (no caching pre-Electra)
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: preElectraSlot,
|
||||
CommitteeIndex: 5,
|
||||
}).Return(expectedData, nil)
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: preElectraSlot,
|
||||
CommitteeIndex: 7,
|
||||
}).Return(expectedData, nil)
|
||||
|
||||
// First call with committee index 5
|
||||
data1, err := v.getAttestationData(context.Background(), preElectraSlot, 5)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedData, data1)
|
||||
|
||||
// Second call with different committee index 7 - should still call beacon node
|
||||
data2, err := v.getAttestationData(context.Background(), preElectraSlot, 7)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedData, data2)
|
||||
}
|
||||
|
||||
func TestGetAttestationData_PostElectraCaching(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
// Set up Electra fork epoch for this test
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
originalElectraForkEpoch := cfg.ElectraForkEpoch
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
defer func() {
|
||||
cfg.ElectraForkEpoch = originalElectraForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}()
|
||||
|
||||
client := validatormock.NewMockValidatorClient(ctrl)
|
||||
v := &validator{validatorClient: client}
|
||||
|
||||
// Post-Electra slot
|
||||
postElectraSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
|
||||
|
||||
expectedData := ðpb.AttestationData{
|
||||
Slot: postElectraSlot,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
|
||||
}
|
||||
|
||||
// Only ONE call should go to the beacon node (caching post-Electra)
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: postElectraSlot,
|
||||
CommitteeIndex: 0,
|
||||
}).Return(expectedData, nil).Times(1)
|
||||
|
||||
// First call - should hit beacon node
|
||||
data1, err := v.getAttestationData(context.Background(), postElectraSlot, 5)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedData, data1)
|
||||
|
||||
// Second call with different committee index - should use cache
|
||||
data2, err := v.getAttestationData(context.Background(), postElectraSlot, 7)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedData, data2)
|
||||
|
||||
// Third call - should still use cache
|
||||
data3, err := v.getAttestationData(context.Background(), postElectraSlot, 10)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedData, data3)
|
||||
}
|
||||
|
||||
func TestGetAttestationData_PostElectraCacheInvalidatesOnNewSlot(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
// Set up Electra fork epoch for this test
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
originalElectraForkEpoch := cfg.ElectraForkEpoch
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
defer func() {
|
||||
cfg.ElectraForkEpoch = originalElectraForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}()
|
||||
|
||||
client := validatormock.NewMockValidatorClient(ctrl)
|
||||
v := &validator{validatorClient: client}
|
||||
|
||||
slot1 := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
|
||||
slot2 := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 6)
|
||||
|
||||
dataSlot1 := ðpb.AttestationData{
|
||||
Slot: slot1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
|
||||
}
|
||||
dataSlot2 := ðpb.AttestationData{
|
||||
Slot: slot2,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
|
||||
}
|
||||
|
||||
// Expect one call per slot
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: slot1,
|
||||
CommitteeIndex: 0,
|
||||
}).Return(dataSlot1, nil).Times(1)
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: slot2,
|
||||
CommitteeIndex: 0,
|
||||
}).Return(dataSlot2, nil).Times(1)
|
||||
|
||||
// First slot - should hit beacon node
|
||||
data1, err := v.getAttestationData(context.Background(), slot1, 5)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, dataSlot1, data1)
|
||||
|
||||
// Same slot - should use cache
|
||||
data1Again, err := v.getAttestationData(context.Background(), slot1, 7)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, dataSlot1, data1Again)
|
||||
|
||||
// New slot - should invalidate cache and hit beacon node
|
||||
data2, err := v.getAttestationData(context.Background(), slot2, 5)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, dataSlot2, data2)
|
||||
}
|
||||
|
||||
func TestGetAttestationData_PostElectraConcurrentAccess(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
// Set up Electra fork epoch for this test
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
originalElectraForkEpoch := cfg.ElectraForkEpoch
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
defer func() {
|
||||
cfg.ElectraForkEpoch = originalElectraForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}()
|
||||
|
||||
client := validatormock.NewMockValidatorClient(ctrl)
|
||||
v := &validator{validatorClient: client}
|
||||
|
||||
postElectraSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
|
||||
|
||||
expectedData := ðpb.AttestationData{
|
||||
Slot: postElectraSlot,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
|
||||
}
|
||||
|
||||
// Should only call beacon node once despite concurrent requests
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: postElectraSlot,
|
||||
CommitteeIndex: 0,
|
||||
}).Return(expectedData, nil).Times(1)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
numGoroutines := 10
|
||||
results := make([]*ethpb.AttestationData, numGoroutines)
|
||||
errs := make([]error, numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
results[idx], errs[idx] = v.getAttestationData(context.Background(), postElectraSlot, primitives.CommitteeIndex(idx))
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
require.NoError(t, errs[i])
|
||||
require.DeepEqual(t, expectedData, results[i])
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user