mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
18 Commits
change-slo
...
deneb-devn
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b192569852 | ||
|
|
c3eff4bebd | ||
|
|
f1dd96b1ad | ||
|
|
f687a72ced | ||
|
|
ab9623ee6d | ||
|
|
3ed7fa6a17 | ||
|
|
06ff0e9c08 | ||
|
|
0521a8464f | ||
|
|
c807214b65 | ||
|
|
92d9690c71 | ||
|
|
32184933d2 | ||
|
|
cf9ac0b615 | ||
|
|
ad2f47b2d9 | ||
|
|
7daa24a540 | ||
|
|
0576053d0c | ||
|
|
6415a68ca4 | ||
|
|
f71fdcfa42 | ||
|
|
3f2b90343d |
10
WORKSPACE
10
WORKSPACE
@@ -205,7 +205,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.4.0-alpha.1"
|
||||
consensus_spec_version = "v1.4.0-alpha.2"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -221,7 +221,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "1118a663be4a00ba00f0635eb20287157f2b2f993aed64335bfbcd04af424c2b",
|
||||
sha256 = "bfba887cbe043907adf884cf6d18f2e8a31e34e9245397b84af1f54ed22b706a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -237,7 +237,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "acde6e10940d14f22277eda5b55b65a24623ac88e4c7a2e34134a6069f5eea82",
|
||||
sha256 = "9ff77bef0ca1e39bcee2769075c89f0f91fb8f89ad38a1b3e0c31cf6732650ad",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -253,7 +253,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "49c022f3a3478cea849ba8f877a9f7e4c1ded549edddc09993550bbc5bb192e1",
|
||||
sha256 = "fbcc3c9898110c675e5de9c27cb667ad7cadf930db7ebb5c6bba15d7be95bf8a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -268,7 +268,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "c3e246ff01f6b7b9e9e41939954a6ff89dfca7297415f88781809165fa83267c",
|
||||
sha256 = "9fff1bcdd0e5857797197800db091c3675b2c11b54f704fe4de1ba683bed7ba5",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -207,7 +207,18 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
|
||||
}
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
|
||||
var lastValidHash []byte
|
||||
if blk.Version() >= version.Deneb {
|
||||
_, err = blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not get blob kzg commitments")
|
||||
}
|
||||
// TODO: Convert kzg commitment to version hashes and feed to below
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, [][32]byte{})
|
||||
} else {
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, [][32]byte{} /*empty version hashes before Deneb*/)
|
||||
}
|
||||
switch err {
|
||||
case nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
@@ -305,7 +316,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
|
||||
@@ -827,6 +827,42 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
require.Equal(t, 0, len(a))
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeDeneb(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), vId)
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
suggestedVid := primitives.ValidatorIndex(1)
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
a, err := attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
}
|
||||
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
|
||||
@@ -26,6 +26,7 @@ type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
@@ -150,6 +151,11 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
return s.hasBlockInInitSyncOrDB(ctx, root)
|
||||
}
|
||||
|
||||
// RecentBlockSlot returns block slot form fork choice store
|
||||
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
|
||||
return s.cfg.ForkChoiceStore.Slot(root)
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
|
||||
@@ -52,6 +52,11 @@ func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ ui
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
|
||||
@@ -69,6 +69,7 @@ type ChainService struct {
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -389,6 +390,11 @@ func (s *ChainService) HasBlock(ctx context.Context, rt [32]byte) bool {
|
||||
return s.InitSyncBlockRoots[rt]
|
||||
}
|
||||
|
||||
// RecentBlockSlot mocks the same method in the chain service.
|
||||
func (s *ChainService) RecentBlockSlot([32]byte) (primitives.Slot, error) {
|
||||
return s.BlockSlot, nil
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service.
|
||||
func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
|
||||
@@ -145,6 +145,40 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{
|
||||
Block: ðpb.BeaconBlockDeneb{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyDeneb{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ // Deneb difference.
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
|
||||
30
beacon-chain/core/deneb/BUILD.bazel
Normal file
30
beacon-chain/core/deneb/BUILD.bazel
Normal file
@@ -0,0 +1,30 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["upgrade.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["upgrade_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
],
|
||||
)
|
||||
115
beacon-chain/core/deneb/upgrade.go
Normal file
115
beacon-chain/core/deneb/upgrade.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package deneb
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
|
||||
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
currentSyncCommittee, err := state.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := state.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := state.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := state.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := state.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := state.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summaries, err := state.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateDeneb{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().DenebForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: [][]byte{},
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
ExcessDataGas: 0,
|
||||
DataGasUsed: 0,
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
}
|
||||
|
||||
return state_native.InitializeFromProtoUnsafeDeneb(s)
|
||||
}
|
||||
94
beacon-chain/core/deneb/upgrade_test.go
Normal file
94
beacon-chain/core/deneb/upgrade_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package deneb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestUpgradeToDeneb(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
preForkState := st.Copy()
|
||||
mSt, err := deneb.UpgradeToDeneb(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
|
||||
require.Equal(t, preForkState.Slot(), mSt.Slot())
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
require.DeepSSZEqual(t, preForkState.Validators(), mSt.Validators())
|
||||
require.DeepSSZEqual(t, preForkState.Balances(), mSt.Balances())
|
||||
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
|
||||
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
|
||||
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
|
||||
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
|
||||
numValidators := mSt.NumValidators()
|
||||
p, err := mSt.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
p, err = mSt.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
s, err := mSt.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
|
||||
|
||||
f := mSt.Fork()
|
||||
require.DeepSSZEqual(t, ðpb.Fork{
|
||||
PreviousVersion: st.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().DenebForkVersion,
|
||||
Epoch: time.CurrentEpoch(st),
|
||||
}, f)
|
||||
csc, err := mSt.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err := preForkState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, csc)
|
||||
nsc, err := mSt.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err = preForkState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, nsc)
|
||||
|
||||
header, err := mSt.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
txRoot, err := prevHeader.TransactionsRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wdRoot, err := prevHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
wanted := &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: prevHeader.ParentHash(),
|
||||
FeeRecipient: prevHeader.FeeRecipient(),
|
||||
StateRoot: prevHeader.StateRoot(),
|
||||
ReceiptsRoot: prevHeader.ReceiptsRoot(),
|
||||
LogsBloom: prevHeader.LogsBloom(),
|
||||
PrevRandao: prevHeader.PrevRandao(),
|
||||
BlockNumber: prevHeader.BlockNumber(),
|
||||
GasLimit: prevHeader.GasLimit(),
|
||||
GasUsed: prevHeader.GasUsed(),
|
||||
Timestamp: prevHeader.Timestamp(),
|
||||
BaseFeePerGas: prevHeader.BaseFeePerGas(),
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
}
|
||||
require.DeepEqual(t, wanted, protoHeader)
|
||||
}
|
||||
@@ -81,6 +81,15 @@ func CanUpgradeToCapella(slot primitives.Slot) bool {
|
||||
return epochStart && capellaEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToDeneb returns true if the input `slot` can upgrade to Deneb.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == DENEB_FORK_EPOCH
|
||||
func CanUpgradeToDeneb(slot primitives.Slot) bool {
|
||||
epochStart := slots.IsEpochStart(slot)
|
||||
DenebEpoch := slots.ToEpoch(slot) == params.BeaconConfig().DenebForkEpoch
|
||||
return epochStart && DenebEpoch
|
||||
}
|
||||
|
||||
// CanProcessEpoch checks the eligibility to process epoch.
|
||||
// The epoch can be processed at the end of the last slot of every epoch.
|
||||
//
|
||||
|
||||
@@ -298,3 +298,38 @@ func TestCanUpgradeToCapella(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeToDeneb(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.DenebForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not deneb epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "deneb epoch",
|
||||
slot: primitives.Slot(params.BeaconConfig().DenebForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.CanUpgradeToDeneb(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToDeneb() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/capella:go_default_library",
|
||||
"//beacon-chain/core/deneb:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/capella"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb"
|
||||
e "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/execution"
|
||||
@@ -269,28 +270,10 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
||||
return nil, errors.Wrap(err, "failed to increment state slot")
|
||||
}
|
||||
|
||||
if time.CanUpgradeToAltair(state.Slot()) {
|
||||
state, err = altair.UpgradeToAltair(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToBellatrix(state.Slot()) {
|
||||
state, err = execution.UpgradeToBellatrix(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToCapella(state.Slot()) {
|
||||
state, err = capella.UpgradeToCapella(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
state, err = UpgradeState(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "failed to upgrade state")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -301,6 +284,45 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// UpgradeState upgrades the state to the next version if possible.
|
||||
func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.UpgradeState")
|
||||
defer span.End()
|
||||
var err error
|
||||
if time.CanUpgradeToAltair(state.Slot()) {
|
||||
state, err = altair.UpgradeToAltair(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToBellatrix(state.Slot()) {
|
||||
state, err = execution.UpgradeToBellatrix(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToCapella(state.Slot()) {
|
||||
state, err = capella.UpgradeToCapella(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToDeneb(state.Slot()) {
|
||||
state, err = deneb.UpgradeToDeneb(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// VerifyOperationLengths verifies that block operation lengths are valid.
|
||||
func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
|
||||
@@ -256,7 +256,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Altair, version.Bellatrix, version.Capella:
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -629,6 +629,20 @@ func TestProcessSlots_ThroughBellatrixEpoch(t *testing.T) {
|
||||
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
|
||||
}
|
||||
|
||||
func TestProcessSlots_ThroughDenebEpoch(t *testing.T) {
|
||||
transition.SkipSlotCache.Disable()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
conf := params.BeaconConfig()
|
||||
conf.DenebForkEpoch = 5
|
||||
params.OverrideBeaconConfig(conf)
|
||||
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.Deneb, st.Version())
|
||||
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
|
||||
}
|
||||
|
||||
func TestProcessSlotsUsingNextSlotCache(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
r := []byte{'a'}
|
||||
|
||||
@@ -54,6 +54,11 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
|
||||
// Blob operations.
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
@@ -89,6 +94,10 @@ type NoHeadAccessDatabase interface {
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"archived_point.go",
|
||||
"backup.go",
|
||||
"blob.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
"deposit_contract.go",
|
||||
@@ -38,6 +39,7 @@ go_library(
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -74,6 +76,7 @@ go_test(
|
||||
srcs = [
|
||||
"archived_point_test.go",
|
||||
"backup_test.go",
|
||||
"blob_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
"deposit_contract_test.go",
|
||||
@@ -110,6 +113,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/assertions:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
253
beacon-chain/db/kv/blob.go
Normal file
253
beacon-chain/db/kv/blob.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
|
||||
//
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
|
||||
//
|
||||
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
//
|
||||
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
|
||||
// in the rotating keys buffer, we overwrite all elements for that slot.
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
|
||||
defer span.End()
|
||||
|
||||
sortSideCars(scs)
|
||||
if err := s.verifySideCars(scs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slot := scs[0].Slot
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
newKey := blobSidecarKey(scs[0])
|
||||
rotatingBufferPrefix := newKey[0:8]
|
||||
var replacingKey []byte
|
||||
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
|
||||
if len(k) != 0 {
|
||||
replacingKey = k
|
||||
oldSlotBytes := replacingKey[8:16]
|
||||
oldSlot := bytesutil.BytesToSlotBigEndian(oldSlotBytes)
|
||||
if oldSlot >= slot {
|
||||
return fmt.Errorf("attempted to save blob with slot %d but already have older blob with slot %d", slot, oldSlot)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
|
||||
// store the blob by key and exit early.
|
||||
if len(replacingKey) != 0 {
|
||||
if err := bkt.Delete(replacingKey); err != nil {
|
||||
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
|
||||
}
|
||||
}
|
||||
return bkt.Put(newKey, encodedBlobSidecar)
|
||||
})
|
||||
}
|
||||
|
||||
// verifySideCars ensures that all sidecars have the same slot, parent root, block root, and proposer index.
|
||||
// It also ensures that indices are sequential and start at 0 and no more than MAX_BLOB_EPOCHS.
|
||||
func (s *Store) verifySideCars(scs []*ethpb.BlobSidecar) error {
|
||||
if len(scs) == 0 {
|
||||
return errors.New("nil or empty blob sidecars")
|
||||
}
|
||||
if uint64(len(scs)) > fieldparams.MaxBlobsPerBlock {
|
||||
return fmt.Errorf("too many sidecars: %d > %d", len(scs), fieldparams.MaxBlobsPerBlock)
|
||||
}
|
||||
|
||||
sl := scs[0].Slot
|
||||
pr := scs[0].BlockParentRoot
|
||||
r := scs[0].BlockRoot
|
||||
p := scs[0].ProposerIndex
|
||||
|
||||
for i, sc := range scs {
|
||||
if sc.Slot != sl {
|
||||
return fmt.Errorf("sidecar slot mismatch: %d != %d", sc.Slot, sl)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockParentRoot, pr) {
|
||||
return fmt.Errorf("sidecar parent root mismatch: %x != %x", sc.BlockParentRoot, pr)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockRoot, r) {
|
||||
return fmt.Errorf("sidecar root mismatch: %x != %x", sc.BlockRoot, r)
|
||||
}
|
||||
if sc.ProposerIndex != p {
|
||||
return fmt.Errorf("sidecar proposer index mismatch: %d != %d", sc.ProposerIndex, p)
|
||||
}
|
||||
if sc.Index != uint64(i) {
|
||||
return fmt.Errorf("sidecar index mismatch: %d != %d", sc.Index, i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortSideCars sorts the sidecars by their index.
|
||||
func sortSideCars(scs []*ethpb.BlobSidecar) {
|
||||
sort.Slice(scs, func(i, j int) bool {
|
||||
return scs[i].Index < scs[j].Index
|
||||
})
|
||||
}
|
||||
|
||||
// BlobSidecarsByRoot retrieves the blobs for the given beacon block root.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if bytes.HasSuffix(k, root[:]) {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(indices) == 0 {
|
||||
return sc.Sidecars, nil
|
||||
}
|
||||
// This loop assumes that the BlobSidecars value stores the complete set of blobs for a block
|
||||
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
|
||||
// maps 1:1 with indices in the BlobSidecars storage object.
|
||||
maxIdx := uint64(len(sc.Sidecars)) - 1
|
||||
sidecars := make([]*ethpb.BlobSidecar, len(indices))
|
||||
for i, idx := range indices {
|
||||
if idx > maxIdx {
|
||||
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
|
||||
}
|
||||
sidecars[i] = sc.Sidecars[idx]
|
||||
}
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
sk := slotKey(slot)
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
for k, v := c.Seek(sk); bytes.HasPrefix(k, sk); k, _ = c.Next() {
|
||||
slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16])
|
||||
if slotInKey == slot {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
// DeleteBlobSidecar returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
|
||||
if err := bkt.Delete(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
|
||||
func blobSidecarKey(blob *ethpb.BlobSidecar) []byte {
|
||||
key := slotKey(blob.Slot)
|
||||
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
|
||||
key = append(key, blob.BlockRoot...)
|
||||
return key
|
||||
}
|
||||
|
||||
func slotKey(slot types.Slot) []byte {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
|
||||
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(maxSlotsToPersistBlobs))
|
||||
}
|
||||
|
||||
func checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket(epochsForBlobSidecarsRequestBucket)
|
||||
k := []byte("epoch-key")
|
||||
v := b.Get(k)
|
||||
if v == nil {
|
||||
if err := b.Put(k, bytesutil.Uint64ToBytesBigEndian(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest))); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
e := bytesutil.BytesToUint64BigEndian(v)
|
||||
if e != uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest) {
|
||||
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
317
beacon-chain/db/kv/blob_test.go
Normal file
317
beacon-chain/db/kv/blob_test.go
Normal file
@@ -0,0 +1,317 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func equalBlobSlices(expect []*ethpb.BlobSidecar, got []*ethpb.BlobSidecar) error {
|
||||
if len(expect) != len(got) {
|
||||
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
|
||||
}
|
||||
for i := 0; i < len(expect); i++ {
|
||||
es := expect[i]
|
||||
gs := got[i]
|
||||
var e string
|
||||
assertions.DeepEqual(assertions.SprintfAssertionLoggerFn(&e), es, gs)
|
||||
if e != "" {
|
||||
return errors.New(e)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestStore_BlobSidecars(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 0)
|
||||
require.ErrorContains(t, "nil or empty blob sidecars", db.SaveBlobSidecar(ctx, scs))
|
||||
})
|
||||
t.Run("empty by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
got, err := db.BlobSidecarsByRoot(ctx, [32]byte{})
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("empty by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
got, err := db.BlobSidecarsBySlot(ctx, 1)
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("save and retrieve by root (one)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 1)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, 1, len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve by root (max)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve valid subset by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), 0, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(expect, got))
|
||||
require.Equal(t, uint64(0), got[0].Index)
|
||||
require.Equal(t, uint64(3), got[1].Index)
|
||||
})
|
||||
t.Run("error for invalid index when retrieving by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("save and retrieve by slot (one)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 1)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, 1, len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve by slot (max)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve valid subset by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, 0, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(expect, got))
|
||||
|
||||
require.Equal(t, uint64(0), got[0].Index)
|
||||
require.Equal(t, uint64(3), got[1].Index)
|
||||
})
|
||||
t.Run("error for invalid index when retrieving by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("delete works", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("saving a blob with older slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.ErrorContains(t, "but already have older blob with slot", db.SaveBlobSidecar(ctx, scs))
|
||||
})
|
||||
t.Run("saving a new blob for rotation", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
oldBlockRoot := scs[0].BlockRoot
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
|
||||
newScs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
for _, sc := range newScs {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, newScs))
|
||||
|
||||
_, err = db.BlobSidecarsBySlot(ctx, 100)
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(newScs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(newScs, got))
|
||||
})
|
||||
}
|
||||
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
blob := make([]byte, 131072)
|
||||
_, err := rand.Read(blob)
|
||||
require.NoError(t, err)
|
||||
kzgCommitment := make([]byte, 48)
|
||||
_, err = rand.Read(kzgCommitment)
|
||||
require.NoError(t, err)
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
Index: index,
|
||||
Slot: 100,
|
||||
BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
|
||||
ProposerIndex: 101,
|
||||
Blob: blob,
|
||||
KzgCommitment: kzgCommitment,
|
||||
KzgProof: kzgProof,
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_verifySideCars(t *testing.T) {
|
||||
s := setupDB(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
error string
|
||||
}{
|
||||
{name: "empty", scs: []*ethpb.BlobSidecar{}, error: "nil or empty blob sidecars"},
|
||||
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), error: "too many sidecars: 7 > 6"},
|
||||
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, error: "sidecar slot mismatch: 2 != 1"},
|
||||
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, error: "sidecar proposer index mismatch: 2 != 1"},
|
||||
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, error: "sidecar root mismatch: 02 != 01"},
|
||||
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, error: "sidecar parent root mismatch: 02 != 01"},
|
||||
{name: "invalid side index", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 0}}, error: "sidecar index mismatch: 0 != 1"},
|
||||
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}, error: ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := s.verifySideCars(tt.scs)
|
||||
if tt.error != "" {
|
||||
require.Equal(t, tt.error, err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_sortSidecars(t *testing.T) {
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
{Index: 6},
|
||||
{Index: 4},
|
||||
{Index: 2},
|
||||
{Index: 1},
|
||||
{Index: 3},
|
||||
{Index: 5},
|
||||
{},
|
||||
}
|
||||
sortSideCars(scs)
|
||||
for i := 0; i < len(scs)-1; i++ {
|
||||
require.Equal(t, uint64(i), scs[i].Index)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
s := setupDB(b)
|
||||
ctx := context.Background()
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
|
||||
{BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), Slot: 0},
|
||||
}))
|
||||
|
||||
err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
for i := 1; i < 131071; i++ {
|
||||
r := make([]byte, 32)
|
||||
_, err := rand.Read(r)
|
||||
require.NoError(b, err)
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
{BlockRoot: r, Slot: primitives.Slot(i)},
|
||||
}
|
||||
k := blobSidecarKey(scs[0])
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, bkt.Put(k, encodedBlobSidecar))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(b, err)
|
||||
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
|
||||
{BlockRoot: bytesutil.PadTo([]byte{'b'}, 32), Slot: 131071},
|
||||
}))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.BlobSidecarsByRoot(ctx, [32]byte{'b'})
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
|
||||
dbStore := setupDB(t)
|
||||
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First write
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First check
|
||||
|
||||
nConfig := params.BeaconNetworkConfig()
|
||||
nConfig.MinEpochsForBlobsSidecarsRequest = 42069
|
||||
params.OverrideBeaconNetworkConfig(nConfig)
|
||||
require.ErrorContains(t, "epochs for blobs request value in DB 4096 does not match config value 42069", checkEpochsForBlobSidecarsRequestBucket(dbStore.db))
|
||||
}
|
||||
@@ -818,6 +818,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
|
||||
}
|
||||
case hasDenebKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal Deneb block")
|
||||
}
|
||||
case hasDenebBlindKey(enc):
|
||||
rawBlock = ðpb.SignedBlindedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
@@ -854,6 +864,8 @@ func marshalBlockFull(
|
||||
return nil, err
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
@@ -888,6 +900,8 @@ func marshalBlockBlinded(
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
|
||||
@@ -90,6 +90,40 @@ var blockTests = []struct {
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBeaconBlockDeneb()
|
||||
b.Block.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
b.Block.Body.BlobKzgCommitments = [][]byte{
|
||||
bytesutil.PadTo([]byte{0x01}, 48),
|
||||
bytesutil.PadTo([]byte{0x02}, 48),
|
||||
bytesutil.PadTo([]byte{0x03}, 48),
|
||||
bytesutil.PadTo([]byte{0x04}, 48),
|
||||
}
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb blind",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBlindedBeaconBlockDeneb()
|
||||
b.Block.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
b.Block.Body.BlobKzgCommitments = [][]byte{
|
||||
bytesutil.PadTo([]byte{0x05}, 48),
|
||||
bytesutil.PadTo([]byte{0x06}, 48),
|
||||
bytesutil.PadTo([]byte{0x07}, 48),
|
||||
bytesutil.PadTo([]byte{0x08}, 48),
|
||||
}
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
|
||||
@@ -359,6 +393,10 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := blk.PbDenebBlock(); err == nil {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err := wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
retrievedPb, err := retrievedBlock.Proto()
|
||||
@@ -582,6 +620,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block1.PbDenebBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err := wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err := b.Proto()
|
||||
@@ -604,6 +646,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block2.PbDenebBlock(); err == nil {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wanted2Pb, err := wanted2.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -626,6 +672,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block3.PbDenebBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err = wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -666,6 +716,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block1.PbDenebBlock(); err == nil {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err := wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err := b.Proto()
|
||||
@@ -687,6 +741,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := genesisBlock.PbDenebBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err = wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -708,6 +766,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := genesisBlock.PbDenebBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err = wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -808,6 +870,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b1.PbDenebBlock(); err == nil {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
retrieved0Pb, err := retrievedBlocks[0].Proto()
|
||||
require.NoError(t, err)
|
||||
wantedPb, err := wanted.Proto()
|
||||
@@ -828,6 +894,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b2.PbDenebBlock(); err == nil {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
retrieved0Pb, err = retrievedBlocks[0].Proto()
|
||||
require.NoError(t, err)
|
||||
wantedPb, err = wanted.Proto()
|
||||
@@ -842,6 +912,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b3.PbDenebBlock(); err == nil {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
retrieved1Pb, err := retrievedBlocks[1].Proto()
|
||||
require.NoError(t, err)
|
||||
wantedPb, err = wanted.Proto()
|
||||
|
||||
@@ -37,3 +37,17 @@ func hasCapellaBlindKey(enc []byte) bool {
|
||||
}
|
||||
return bytes.Equal(enc[:len(capellaBlindKey)], capellaBlindKey)
|
||||
}
|
||||
|
||||
func hasDenebKey(enc []byte) bool {
|
||||
if len(denebKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebKey)], denebKey)
|
||||
}
|
||||
|
||||
func hasDenebBlindKey(enc []byte) bool {
|
||||
if len(denebBlindKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
|
||||
}
|
||||
|
||||
@@ -129,6 +129,9 @@ var Buckets = [][]byte{
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
|
||||
blobsBucket,
|
||||
epochsForBlobSidecarsRequestBucket,
|
||||
}
|
||||
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
@@ -199,6 +202,11 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
if err := kv.setupBlockStorageType(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkEpochsForBlobSidecarsRequestBucket(boltDB); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to check epochs for blob sidecars request bucket")
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ var (
|
||||
finalizedCheckpointKey = []byte("finalized-checkpoint")
|
||||
powchainDataKey = []byte("powchain-data")
|
||||
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
|
||||
blobsBucket = []byte("blobs")
|
||||
|
||||
// Below keys are used to identify objects are to be fork compatible.
|
||||
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
||||
@@ -55,6 +56,9 @@ var (
|
||||
capellaKey = []byte("capella")
|
||||
capellaBlindKey = []byte("blind-capella")
|
||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||
denebKey = []byte("deneb")
|
||||
denebBlindKey = []byte("blind-deneb")
|
||||
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
@@ -70,4 +74,7 @@ var (
|
||||
|
||||
// Migrations
|
||||
migrationsBucket = []byte("migrations")
|
||||
|
||||
// Stores how long to keep the blob sidecars for.
|
||||
epochsForBlobSidecarsRequestBucket = []byte("epochs-for-blob-sidecars-request")
|
||||
)
|
||||
|
||||
@@ -229,34 +229,28 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
switch rawType := states[i].ToProtoUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(rawType)
|
||||
pbState, err := getPhase0PbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateAltair:
|
||||
pbState, err := statenative.ProtobufBeaconStateAltair(rawType)
|
||||
pbState, err := getAltairPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
@@ -272,13 +266,10 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawType)
|
||||
pbState, err := getBellatrixPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
@@ -294,12 +285,28 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateCapella:
|
||||
pbState, err := statenative.ProtobufBeaconStateCapella(rawType)
|
||||
pbState, err := getCapellaPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(capellaKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
pbState, err := getDenebPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
@@ -323,6 +330,61 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
|
||||
}
|
||||
|
||||
func getPhase0PbState(rawState interface{}) (*ethpb.BeaconState, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getAltairPbState(rawState interface{}) (*ethpb.BeaconStateAltair, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateAltair(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getBellatrixPbState(rawState interface{}) (*ethpb.BeaconStateBellatrix, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getCapellaPbState(rawState interface{}) (*ethpb.BeaconStateCapella, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateCapella(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getDenebPbState(rawState interface{}) (*ethpb.BeaconStateDeneb, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateDeneb(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
for hashStr, validatorEntry := range validatorsEntries {
|
||||
@@ -472,6 +534,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasDenebKey(enc):
|
||||
protoState := ðpb.BeaconStateDeneb{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Deneb")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
protoState.Validators = validatorEntries
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeDeneb(protoState)
|
||||
case hasCapellaKey(enc):
|
||||
// Marshal state bytes to capella beacon state.
|
||||
protoState := ðpb.BeaconStateCapella{}
|
||||
@@ -579,6 +654,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
if rState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
rawObj, err := rState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
|
||||
default:
|
||||
return nil, errors.New("invalid inner state")
|
||||
}
|
||||
|
||||
@@ -900,6 +900,100 @@ func TestBellatrixState_CanDelete(t *testing.T) {
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestDenebState_CanSaveRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
require.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe())
|
||||
|
||||
savedS, err = db.State(context.Background(), [32]byte{'B'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestDenebState_CanDelete(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
require.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
require.NoError(t, db.DeleteState(context.Background(), r))
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestStateDeneb_CanSaveRetrieveValidatorEntries(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// enable historical state representation flag to test this
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableHistoricalSpaceRepresentation: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
stateValidators := validators(10)
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 20)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
require.NoError(t, st.SetValidators(stateValidators))
|
||||
|
||||
ctx := context.Background()
|
||||
require.NoError(t, db.SaveState(ctx, st, r))
|
||||
assert.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, st.Validators(), savedS.Validators(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// check if the index of the second state is still present.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
data := idxBkt.Get(r[:])
|
||||
require.NotEqual(t, 0, len(data))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if all the validator entries are still intact in the validator entry bucket.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
// if any of the original validator entry is not present, then fail the test.
|
||||
for _, val := range stateValidators {
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
assert.NoError(t, hashErr)
|
||||
data := valBkt.Get(hash[:])
|
||||
require.NotNil(t, data)
|
||||
require.NotEqual(t, 0, len(data))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkState_CheckStateSaveTime_1(b *testing.B) { checkStateSaveTime(b, 1) }
|
||||
func BenchmarkState_CheckStateSaveTime_10(b *testing.B) { checkStateSaveTime(b, 10) }
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ const (
|
||||
NewPayloadMethod = "engine_newPayloadV1"
|
||||
// NewPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
NewPayloadMethodV2 = "engine_newPayloadV2"
|
||||
NewPayloadMethodV3 = "engine_newPayloadV3"
|
||||
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
||||
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
|
||||
@@ -58,6 +59,7 @@ const (
|
||||
GetPayloadMethod = "engine_getPayloadV1"
|
||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||
// ExecutionBlockByHashMethod request string for JSON-RPC.
|
||||
@@ -96,11 +98,11 @@ type ExecutionPayloadReconstructor interface {
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
// execution node's engine service via JSON-RPC.
|
||||
type EngineCaller interface {
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error)
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes [][32]byte) ([]byte, error)
|
||||
ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error)
|
||||
ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error
|
||||
@@ -111,7 +113,7 @@ type EngineCaller interface {
|
||||
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
|
||||
// NewPayload calls the engine_newPayloadVX method via JSON-RPC.
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error) {
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes [][32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -143,6 +145,15 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
case *pb.ExecutionPayloadDeneb:
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be a Deneb execution payload")
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb, versionedHashes)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
@@ -190,7 +201,7 @@ func (s *Service) ForkchoiceUpdated(
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
a, err := attrs.PbV2()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -220,7 +231,8 @@ func (s *Service) ForkchoiceUpdated(
|
||||
}
|
||||
|
||||
// GetPayload calls the engine_getPayloadVX method via JSON-RPC.
|
||||
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, error) {
|
||||
// It returns the execution data as well as the blobs bundle.
|
||||
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -232,23 +244,44 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
result := &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(result.Value))
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, math.WeiToGwei(v))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return ed, result.BlobsBundle, nil
|
||||
}
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
result := &pb.ExecutionPayloadCapellaWithValue{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
|
||||
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(result.Value))
|
||||
return blocks.WrappedExecutionPayloadCapella(result.Payload, math.WeiToGwei(v))
|
||||
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, math.WeiToGwei(v))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return ed, nil, nil
|
||||
}
|
||||
|
||||
result := &pb.ExecutionPayload{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
return blocks.WrappedExecutionPayload(result)
|
||||
ed, err := blocks.WrappedExecutionPayload(result)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return ed, nil, nil
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
|
||||
@@ -684,7 +717,8 @@ func fullPayloadFromExecutionBlock(
|
||||
txs[i] = txBin
|
||||
}
|
||||
|
||||
if block.Version == version.Bellatrix {
|
||||
switch block.Version {
|
||||
case version.Bellatrix:
|
||||
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
@@ -701,24 +735,51 @@ func fullPayloadFromExecutionBlock(
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
})
|
||||
case version.Capella:
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
Withdrawals: block.Withdrawals,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Deneb:
|
||||
edg, err := header.ExcessDataGas()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract ExcessDataGas attribute from excution payload header")
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadDeneb(
|
||||
&pb.ExecutionPayloadDeneb{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
Withdrawals: block.Withdrawals,
|
||||
ExcessDataGas: edg,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown execution block version %d", block.Version)
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
Withdrawals: block.Withdrawals,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}
|
||||
|
||||
// Handles errors received from the RPC server according to the specification.
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
payloadId := [8]byte{1}
|
||||
resp, err := srv.GetPayload(ctx, payloadId, 1)
|
||||
resp, _, err := srv.GetPayload(ctx, payloadId, 1)
|
||||
require.NoError(t, err)
|
||||
resPb, err := resp.PbBellatrix()
|
||||
require.NoError(t, err)
|
||||
@@ -85,7 +85,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
want, ok := fix["ExecutionPayloadCapellaWithValue"].(*pb.ExecutionPayloadCapellaWithValue)
|
||||
require.Equal(t, true, ok)
|
||||
payloadId := [8]byte{1}
|
||||
resp, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
resp, _, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
resPb, err := resp.PbCapella()
|
||||
require.NoError(t, err)
|
||||
@@ -118,7 +118,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(req)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
@@ -129,7 +129,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, 0)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
@@ -163,6 +163,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
@@ -203,7 +204,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, 1)
|
||||
resp, _, err := client.GetPayload(ctx, payloadId, 1)
|
||||
require.NoError(t, err)
|
||||
pb, err := resp.PbBellatrix()
|
||||
require.NoError(t, err)
|
||||
@@ -247,7 +248,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
resp, _, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
pb, err := resp.PbCapella()
|
||||
require.NoError(t, err)
|
||||
@@ -262,6 +263,60 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1236), v)
|
||||
})
|
||||
t.Run(GetPayloadMethodV3, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionPayloadDenebWithValue"].(*pb.GetPayloadV3ResponseJson)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, blobsBundle, err := client.GetPayload(ctx, payloadId, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
g, err := resp.ExcessDataGas()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint64(3), g)
|
||||
g, err = resp.DataGasUsed()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, blobsBundle.KzgCommitments)
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, blobsBundle.Proofs)
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, blobsBundle.Blobs)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
@@ -412,7 +467,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -426,7 +481,21 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV3+" VALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -440,7 +509,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -454,7 +523,21 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV3+" SYNCING status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["SyncingStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -468,7 +551,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -482,7 +565,21 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV3+" INVALID_BLOCK_HASH status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidBlockHashStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -496,7 +593,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -510,7 +607,21 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV3+" INVALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -524,7 +635,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -1306,6 +1417,25 @@ func fixtures() map[string]interface{} {
|
||||
Transactions: [][]byte{foo[:]},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
executionPayloadFixtureDeneb := &pb.ExecutionPayloadDeneb{
|
||||
ParentHash: foo[:],
|
||||
FeeRecipient: bar,
|
||||
StateRoot: foo[:],
|
||||
ReceiptsRoot: foo[:],
|
||||
LogsBloom: baz,
|
||||
PrevRandao: foo[:],
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: foo[:],
|
||||
BaseFeePerGas: bytesutil.PadTo(baseFeePerGas.Bytes(), fieldparams.RootLength),
|
||||
BlockHash: foo[:],
|
||||
Transactions: [][]byte{foo[:]},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
DataGasUsed: 2,
|
||||
ExcessDataGas: 3,
|
||||
}
|
||||
hexUint := hexutil.Uint64(1)
|
||||
executionPayloadWithValueFixtureCapella := &pb.GetPayloadV2ResponseJson{
|
||||
ExecutionPayload: &pb.ExecutionPayloadCapellaJSON{
|
||||
@@ -1326,6 +1456,34 @@ func fixtures() map[string]interface{} {
|
||||
},
|
||||
BlockValue: "0x11fffffffff",
|
||||
}
|
||||
dgu := hexutil.Uint64(2)
|
||||
edg := hexutil.Uint64(3)
|
||||
executionPayloadWithValueFixtureDeneb := &pb.GetPayloadV3ResponseJson{
|
||||
ExecutionPayload: &pb.ExecutionPayloadDenebJSON{
|
||||
ParentHash: &common.Hash{'a'},
|
||||
FeeRecipient: &common.Address{'b'},
|
||||
StateRoot: &common.Hash{'c'},
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlockNumber: &hexUint,
|
||||
GasLimit: &hexUint,
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
DataGasUsed: &dgu,
|
||||
ExcessDataGas: &edg,
|
||||
},
|
||||
BlockValue: "0x11fffffffff",
|
||||
BlobsBundle: &pb.BlobBundleJSON{
|
||||
Commitments: [][48]byte{bytesutil.ToBytes48([]byte("commitment1")), bytesutil.ToBytes48([]byte("commitment2"))},
|
||||
Proofs: [][48]byte{bytesutil.ToBytes48([]byte("proof1")), bytesutil.ToBytes48([]byte("proof2"))},
|
||||
Blobs: [][]byte{{'a'}, {'b'}},
|
||||
},
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
|
||||
@@ -1421,7 +1579,9 @@ func fixtures() map[string]interface{} {
|
||||
"ExecutionBlock": executionBlock,
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"ExecutionPayloadCapella": executionPayloadFixtureCapella,
|
||||
"ExecutionPayloadDeneb": executionPayloadFixtureDeneb,
|
||||
"ExecutionPayloadCapellaWithValue": executionPayloadWithValueFixtureCapella,
|
||||
"ExecutionPayloadDenebWithValue": executionPayloadWithValueFixtureDeneb,
|
||||
"ValidPayloadStatus": validStatus,
|
||||
"InvalidBlockHashStatus": inValidBlockHashStatus,
|
||||
"AcceptedStatus": acceptedStatus,
|
||||
@@ -1496,6 +1656,126 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
|
||||
type args struct {
|
||||
header *pb.ExecutionPayloadHeaderCapella
|
||||
block *pb.ExecutionBlock
|
||||
}
|
||||
wantedHash := common.BytesToHash([]byte("foo"))
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want func() interfaces.ExecutionData
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "block hash field in header and block hash mismatch",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderCapella{
|
||||
BlockHash: []byte("foo"),
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Version: version.Capella,
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
},
|
||||
},
|
||||
err: "does not match execution block hash",
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderCapella{
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Version: version.Capella,
|
||||
Hash: wantedHash,
|
||||
},
|
||||
},
|
||||
want: func() interfaces.ExecutionData {
|
||||
p, err := blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
BlockHash: wantedHash[:],
|
||||
Transactions: [][]byte{},
|
||||
}, 0)
|
||||
require.NoError(t, err)
|
||||
return p
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, 0)
|
||||
require.NoError(t, err)
|
||||
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
|
||||
if err != nil {
|
||||
assert.ErrorContains(t, tt.err, err)
|
||||
} else {
|
||||
assert.DeepEqual(t, tt.want(), got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
|
||||
type args struct {
|
||||
header *pb.ExecutionPayloadHeaderDeneb
|
||||
block *pb.ExecutionBlock
|
||||
}
|
||||
wantedHash := common.BytesToHash([]byte("foo"))
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want func() interfaces.ExecutionData
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "block hash field in header and block hash mismatch",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderDeneb{
|
||||
BlockHash: []byte("foo"),
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Version: version.Deneb,
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
},
|
||||
},
|
||||
err: "does not match execution block hash",
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderDeneb{
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Version: version.Deneb,
|
||||
Hash: wantedHash,
|
||||
},
|
||||
},
|
||||
want: func() interfaces.ExecutionData {
|
||||
p, err := blocks.WrappedExecutionPayloadDeneb(&pb.ExecutionPayloadDeneb{
|
||||
BlockHash: wantedHash[:],
|
||||
Transactions: [][]byte{},
|
||||
}, 0)
|
||||
require.NoError(t, err)
|
||||
return p
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, 0)
|
||||
require.NoError(t, err)
|
||||
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
|
||||
if err != nil {
|
||||
assert.ErrorContains(t, tt.err, err)
|
||||
} else {
|
||||
assert.DeepEqual(t, tt.want(), got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderByHash_NotFound(t *testing.T) {
|
||||
srv := &Service{}
|
||||
srv.rpcClient = RPCClientBad{}
|
||||
@@ -1819,6 +2099,40 @@ func newPayloadV2Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu
|
||||
return service
|
||||
}
|
||||
|
||||
func newPayloadV3Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayloadDeneb) *Service {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": status,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func TestCapella_PayloadBodiesByHash(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableOptionalEngineMethods: true,
|
||||
|
||||
@@ -25,6 +25,7 @@ type EngineClient struct {
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionPayloadCapella *pb.ExecutionPayloadCapella
|
||||
ExecutionPayloadDeneb *pb.ExecutionPayloadDeneb
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
@@ -39,10 +40,11 @@ type EngineClient struct {
|
||||
TerminalBlockHashExists bool
|
||||
OverrideValidHash [32]byte
|
||||
BlockValue uint64
|
||||
BlobsBundle *pb.BlobsBundle
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData) ([]byte, error) {
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ [][32]byte) ([]byte, error) {
|
||||
return e.NewPayloadResp, e.ErrNewPayload
|
||||
}
|
||||
|
||||
@@ -57,15 +59,26 @@ func (e *EngineClient) ForkchoiceUpdated(
|
||||
}
|
||||
|
||||
// GetPayload --
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, error) {
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error) {
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().DenebForkEpoch {
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, e.BlockValue)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return ed, e.BlobsBundle, nil
|
||||
}
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, e.BlockValue)
|
||||
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, e.BlockValue)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return ed, nil, nil
|
||||
}
|
||||
p, err := blocks.WrappedExecutionPayload(e.ExecutionPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return p, e.ErrGetPayload
|
||||
return p, nil, e.ErrGetPayload
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration --
|
||||
|
||||
@@ -202,6 +202,67 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input subnet.
|
||||
func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
if blob == nil {
|
||||
return errors.New("attempted to broadcast nil blob sidecar")
|
||||
}
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
|
||||
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
|
||||
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
wrappedSubIdx := subnet + blobSubnetLockerVal
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(blobSubnetToTopic(subnet, forkDigest))
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
blobSidecarCommitteeBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
ok, err := s.FindPeersWithSubnet(ctx, blobSubnetToTopic(subnet, forkDigest), subnet, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
blobSidecarCommitteeBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, blobSidecar, blobSubnetToTopic(subnet, forkDigest)); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
@@ -238,3 +299,7 @@ func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -447,3 +448,76 @@ func TestService_BroadcastSyncCommittee(t *testing.T) {
|
||||
t.Error("Failed to receive pubsub within 1s")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_BroadcastBlob(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.NotEqual(t, 0, len(p1.BHost.Network().Peers()), "No peers")
|
||||
|
||||
p := &Service{
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.SignedBlobSidecar{
|
||||
Message: ðpb.BlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength),
|
||||
Index: 1,
|
||||
Slot: 2,
|
||||
BlockParentRoot: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength),
|
||||
ProposerIndex: 3,
|
||||
Blob: bytesutil.PadTo([]byte{'C'}, fieldparams.BlobLength),
|
||||
KzgCommitment: bytesutil.PadTo([]byte{'D'}, fieldparams.BLSPubkeyLength),
|
||||
KzgProof: bytesutil.PadTo([]byte{'E'}, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte{'F'}, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
subnet := uint64(0)
|
||||
|
||||
topic := BlobSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(blobSidecar)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ðpb.SignedBlobSidecar{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
require.DeepEqual(t, result, blobSidecar)
|
||||
}(t)
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
ctx := context.Background()
|
||||
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastBlob(ctx, subnet, nil))
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
require.NoError(t, p.BroadcastBlob(ctx, subnet, blobSidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
SyncContributionAndProofSubnetTopicFormat: ðpb.SignedContributionAndProof{},
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||
BlobSubnetTopicFormat: ðpb.SignedBlobSidecar{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -35,6 +35,7 @@ type Broadcaster interface {
|
||||
Broadcast(context.Context, proto.Message) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -60,10 +60,18 @@ var (
|
||||
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
|
||||
"until a subnet peer can be found.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were broadcast with no peer on.",
|
||||
})
|
||||
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_attempted_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were attempted to be broadcast.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
pubsubTopicsActive = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
|
||||
@@ -57,12 +57,17 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
|
||||
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Deneb fork digest")
|
||||
return false
|
||||
}
|
||||
switch parts[2] {
|
||||
case fmt.Sprintf("%x", phase0ForkDigest):
|
||||
case fmt.Sprintf("%x", altairForkDigest):
|
||||
case fmt.Sprintf("%x", bellatrixForkDigest):
|
||||
case fmt.Sprintf("%x", capellaForkDigest):
|
||||
case fmt.Sprintf("%x", denebForkDigest):
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat {
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,13 @@ var syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
// chosen as more than 64(attestation subnet count).
|
||||
const syncLockerVal = 100
|
||||
|
||||
// The value used with the blob sidecar subnet, in order
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// blob subnets from others. This is deliberately
|
||||
// chosen more than sync and attestation subnet combined.
|
||||
const blobSubnetLockerVal = 110
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then we try to connect
|
||||
// with those peers. This method will block until the required amount of
|
||||
|
||||
@@ -143,6 +143,11 @@ func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob -- fake.
|
||||
func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InterceptPeerDial -- fake.
|
||||
func (_ *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
|
||||
@@ -33,3 +33,9 @@ func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uin
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob for mock.
|
||||
func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -176,6 +176,12 @@ func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob for mock.
|
||||
func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStreamHandler for RPC.
|
||||
func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler) {
|
||||
p.BHost.SetStreamHandler(protocol.ID(topic), handler)
|
||||
|
||||
@@ -28,7 +28,8 @@ const (
|
||||
GossipContributionAndProofMessage = "sync_committee_contribution_and_proof"
|
||||
// GossipBlsToExecutionChangeMessage is the name for the bls to execution change message type.
|
||||
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
|
||||
|
||||
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
|
||||
GossipBlobSidecarMessage = "blob_sidecar"
|
||||
// Topic Formats
|
||||
//
|
||||
// AttestationSubnetTopicFormat is the topic format for the attestation subnet.
|
||||
@@ -49,4 +50,6 @@ const (
|
||||
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
|
||||
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
|
||||
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
||||
// BlobSubnetTopicFormat is the topic format for the blob subnet.
|
||||
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
|
||||
)
|
||||
|
||||
@@ -51,6 +51,8 @@ func TestGetSpec(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 101
|
||||
config.CapellaForkVersion = []byte("CapellaForkVersion")
|
||||
config.CapellaForkEpoch = 103
|
||||
config.DenebForkVersion = []byte("DenebForkVersion")
|
||||
config.DenebForkEpoch = 105
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
||||
config.GenesisDelay = 24
|
||||
@@ -129,6 +131,9 @@ func TestGetSpec(t *testing.T) {
|
||||
var dam [4]byte
|
||||
copy(dam[:], []byte{'1', '0', '0', '0'})
|
||||
config.DomainApplicationMask = dam
|
||||
var dbs [4]byte
|
||||
copy(dam[:], []byte{'2', '0', '0', '0'})
|
||||
config.DomainBlobSidecar = dbs
|
||||
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
@@ -136,7 +141,7 @@ func TestGetSpec(t *testing.T) {
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 108, len(resp.Data))
|
||||
assert.Equal(t, 111, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@@ -205,6 +210,10 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("CapellaForkVersion")), v)
|
||||
case "CAPELLA_FORK_EPOCH":
|
||||
assert.Equal(t, "103", v)
|
||||
case "DENEB_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("DenebForkVersion")), v)
|
||||
case "DENEB_FORK_EPOCH":
|
||||
assert.Equal(t, "105", v)
|
||||
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
|
||||
assert.Equal(t, "1000", v)
|
||||
case "BLS_WITHDRAWAL_PREFIX":
|
||||
@@ -269,6 +278,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "51", v)
|
||||
case "MAX_VOLUNTARY_EXITS":
|
||||
assert.Equal(t, "52", v)
|
||||
case "MAX_BLOBS_PER_BLOCK":
|
||||
assert.Equal(t, "4", v)
|
||||
case "TIMELY_HEAD_FLAG_INDEX":
|
||||
assert.Equal(t, "0x35", v)
|
||||
case "TIMELY_SOURCE_FLAG_INDEX":
|
||||
@@ -335,6 +346,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "0x0a000000", v)
|
||||
case "DOMAIN_APPLICATION_BUILDER":
|
||||
assert.Equal(t, "0x00000001", v)
|
||||
case "DOMAIN_BLOB_SIDECAR":
|
||||
assert.Equal(t, "0x00000000", v)
|
||||
case "TRANSITION_TOTAL_DIFFICULTY":
|
||||
assert.Equal(t, "0", v)
|
||||
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
|
||||
|
||||
@@ -104,6 +104,18 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.BeaconState
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: isFinalized,
|
||||
}, nil
|
||||
case version.Deneb:
|
||||
protoState, err := migration.BeaconStateDenebToProto(beaconSt)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
|
||||
}
|
||||
return ðpbv2.BeaconStateResponseV2{
|
||||
Version: ethpbv2.Version_DENEB,
|
||||
Data: ðpbv2.BeaconStateContainer{
|
||||
State: ðpbv2.BeaconStateContainer_DenebState{DenebState: protoState},
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||
}
|
||||
@@ -133,6 +145,8 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
|
||||
ver = ethpbv2.Version_BELLATRIX
|
||||
case version.Capella:
|
||||
ver = ethpbv2.Version_CAPELLA
|
||||
case version.Deneb:
|
||||
ver = ethpbv2.Version_DENEB
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||
}
|
||||
|
||||
@@ -97,6 +97,24 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
server := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
OptimisticModeFetcher: &blockchainmock.ChainService{},
|
||||
FinalizationFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.BeaconStateRequestV2{
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, ethpbv2.Version_DENEB, resp.Version)
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
@@ -256,6 +274,25 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
assert.DeepEqual(t, sszState, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
sszState, err := fakeState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
server := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
resp, err := server.GetBeaconStateSSZV2(context.Background(), ðpbv2.BeaconStateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
|
||||
assert.DeepEqual(t, sszState, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_DENEB, resp.Version)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
|
||||
@@ -317,7 +317,7 @@ func (s *Server) streamPayloadAttributes(stream ethpbservice.Events_StreamEvents
|
||||
},
|
||||
},
|
||||
})
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
withdrawals, err := headState.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"proposer_bellatrix.go",
|
||||
"proposer_builder.go",
|
||||
"proposer_capella.go",
|
||||
"proposer_deneb.go",
|
||||
"proposer_deposits.go",
|
||||
"proposer_empty_block.go",
|
||||
"proposer_eth1data.go",
|
||||
@@ -184,6 +185,7 @@ go_test(
|
||||
"proposer_attestations_test.go",
|
||||
"proposer_bellatrix_test.go",
|
||||
"proposer_builder_test.go",
|
||||
"proposer_deneb_test.go",
|
||||
"proposer_deposits_test.go",
|
||||
"proposer_empty_block_test.go",
|
||||
"proposer_execution_payload_test.go",
|
||||
|
||||
@@ -100,6 +100,17 @@ func sendVerifiedBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltairServe
|
||||
return nil
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: phBlk}
|
||||
case version.Deneb:
|
||||
pb, err := data.SignedBlock.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get protobuf block")
|
||||
}
|
||||
phBlk, ok := pb.(*ethpb.SignedBeaconBlockDeneb)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockDeneb")
|
||||
return nil
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_DenebBlock{DenebBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
@@ -149,6 +160,8 @@ func (vs *Server) sendBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltair
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: p}
|
||||
case *ethpb.SignedBeaconBlockCapella:
|
||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: p}
|
||||
case *ethpb.SignedBeaconBlockDeneb:
|
||||
b.Block = ðpb.StreamBlocksResponse_DenebBlock{DenebBlock: p}
|
||||
default:
|
||||
log.Errorf("Unknown block type %T", p)
|
||||
}
|
||||
|
||||
@@ -19,14 +19,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -105,95 +105,6 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
sBlk.SetProposerIndex(idx)
|
||||
|
||||
if features.Get().BuildBlockParallel {
|
||||
if err := vs.BuildBlockParallel(ctx, sBlk, head); err != nil {
|
||||
return nil, errors.Wrap(err, "could not build block in parallel")
|
||||
}
|
||||
} else {
|
||||
// Set eth1 data.
|
||||
eth1Data, err := vs.eth1DataMajorityVote(ctx, head)
|
||||
if err != nil {
|
||||
eth1Data = ðpb.Eth1Data{DepositRoot: params.BeaconConfig().ZeroHash[:], BlockHash: params.BeaconConfig().ZeroHash[:]}
|
||||
log.WithError(err).Error("Could not get eth1data")
|
||||
}
|
||||
sBlk.SetEth1Data(eth1Data)
|
||||
|
||||
// Set deposit and attestation.
|
||||
deposits, atts, err := vs.packDepositsAndAttestations(ctx, head, eth1Data) // TODO: split attestations and deposits
|
||||
if err != nil {
|
||||
sBlk.SetDeposits([]*ethpb.Deposit{})
|
||||
sBlk.SetAttestations([]*ethpb.Attestation{})
|
||||
log.WithError(err).Error("Could not pack deposits and attestations")
|
||||
} else {
|
||||
sBlk.SetDeposits(deposits)
|
||||
sBlk.SetAttestations(atts)
|
||||
}
|
||||
|
||||
// Set slashings.
|
||||
validProposerSlashings, validAttSlashings := vs.getSlashings(ctx, head)
|
||||
sBlk.SetProposerSlashings(validProposerSlashings)
|
||||
sBlk.SetAttesterSlashings(validAttSlashings)
|
||||
|
||||
// Set exits.
|
||||
sBlk.SetVoluntaryExits(vs.getExits(head, req.Slot))
|
||||
|
||||
// Set sync aggregate. New in Altair.
|
||||
vs.setSyncAggregate(ctx, sBlk)
|
||||
|
||||
// Get local and builder (if enabled) payloads. Set execution data. New in Bellatrix.
|
||||
localPayload, err := vs.getLocalPayload(ctx, sBlk.Block(), head)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get local payload: %v", err)
|
||||
}
|
||||
builderPayload, err := vs.getBuilderPayload(ctx, sBlk.Block().Slot(), sBlk.Block().ProposerIndex())
|
||||
if err != nil {
|
||||
builderGetPayloadMissCount.Inc()
|
||||
log.WithError(err).Error("Could not get builder payload")
|
||||
}
|
||||
if err := setExecutionData(ctx, sBlk, localPayload, builderPayload); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||
}
|
||||
|
||||
// Set bls to execution change. New in Capella.
|
||||
vs.setBlsToExecData(sBlk, head)
|
||||
}
|
||||
|
||||
sr, err := vs.computeStateRoot(ctx, sBlk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||
}
|
||||
sBlk.SetStateRoot(sr)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
"validator": sBlk.Block().ProposerIndex(),
|
||||
}).Info("Finished building block")
|
||||
|
||||
pb, err := sBlk.Block().Proto()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
if sBlk.IsBlinded() {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: pb.(*ethpb.BlindedBeaconBlockCapella)}}, nil
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Capella{Capella: pb.(*ethpb.BeaconBlockCapella)}}, nil
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
if sBlk.IsBlinded() {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: pb.(*ethpb.BlindedBeaconBlockBellatrix)}}, nil
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: pb.(*ethpb.BeaconBlockBellatrix)}}, nil
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: pb.(*ethpb.BeaconBlockAltair)}}, nil
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Phase0{Phase0: pb.(*ethpb.BeaconBlock)}}, nil
|
||||
}
|
||||
|
||||
func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.SignedBeaconBlock, head state.BeaconState) error {
|
||||
// Build consensus fields in background
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -233,9 +144,9 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
vs.setBlsToExecData(sBlk, head)
|
||||
}()
|
||||
|
||||
localPayload, err := vs.getLocalPayload(ctx, sBlk.Block(), head)
|
||||
localPayload, blobsBundle, err := vs.getLocalPayloadAndBlobs(ctx, sBlk.Block(), head)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get local payload: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not get local payload: %v", err)
|
||||
}
|
||||
|
||||
builderPayload, err := vs.getBuilderPayload(ctx, sBlk.Block().Slot(), sBlk.Block().ProposerIndex())
|
||||
@@ -245,12 +156,60 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
}
|
||||
|
||||
if err := setExecutionData(ctx, sBlk, localPayload, builderPayload); err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||
}
|
||||
|
||||
if err := setKzgCommitments(sBlk, blobsBundle); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set kzg commitment: %v", err)
|
||||
}
|
||||
|
||||
wg.Wait() // Wait until block is built via consensus and execution fields.
|
||||
|
||||
return nil
|
||||
sr, err := vs.computeStateRoot(ctx, sBlk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||
}
|
||||
sBlk.SetStateRoot(sr)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
"validator": sBlk.Block().ProposerIndex(),
|
||||
}).Info("Finished building block")
|
||||
|
||||
pb, err := sBlk.Block().Proto()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
// TODO: Handle blind case
|
||||
scs, err := blobsBundleToSidecars(blobsBundle, sBlk.Block())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert blobs bundle to sidecar: %v", err)
|
||||
}
|
||||
blockAndBlobs := ðpb.BeaconBlockAndBlobsDeneb{
|
||||
Block: pb.(*ethpb.BeaconBlockDeneb),
|
||||
Blobs: scs,
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Deneb{Deneb: blockAndBlobs}}, nil
|
||||
}
|
||||
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
if sBlk.IsBlinded() {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: pb.(*ethpb.BlindedBeaconBlockCapella)}}, nil
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Capella{Capella: pb.(*ethpb.BeaconBlockCapella)}}, nil
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
if sBlk.IsBlinded() {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: pb.(*ethpb.BlindedBeaconBlockBellatrix)}}, nil
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: pb.(*ethpb.BeaconBlockBellatrix)}}, nil
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: pb.(*ethpb.BeaconBlockAltair)}}, nil
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Phase0{Phase0: pb.(*ethpb.BeaconBlock)}}, nil
|
||||
}
|
||||
|
||||
// ProposeBeaconBlock is called by a proposer during its assigned slot to create a block in an attempt
|
||||
@@ -258,11 +217,77 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(req.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", CouldNotDecodeBlock, err)
|
||||
}
|
||||
return vs.proposeGenericBeaconBlock(ctx, blk)
|
||||
|
||||
unblinder, err := newUnblinder(blk, vs.BlockBuilder)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create unblinder")
|
||||
}
|
||||
blk, err = unblinder.unblindBuilderBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not unblind builder block")
|
||||
}
|
||||
|
||||
// Broadcast the new block to the network.
|
||||
blkPb, err := blk.Proto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get protobuf block")
|
||||
}
|
||||
if err := vs.P2P.Broadcast(ctx, blkPb); err != nil {
|
||||
return nil, fmt.Errorf("could not broadcast block: %v", err)
|
||||
}
|
||||
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not tree hash block: %v", err)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(root[:]),
|
||||
}).Debug("Broadcasting block")
|
||||
|
||||
if blk.Version() >= version.Deneb {
|
||||
b, ok := req.GetBlock().(*ethpb.GenericSignedBeaconBlock_Deneb)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Internal, "Could not cast block to Deneb")
|
||||
}
|
||||
if len(b.Deneb.Blobs) > fieldparams.MaxBlobsPerBlock {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Too many blobs in block: %d", len(b.Deneb.Blobs))
|
||||
}
|
||||
scs := make([]*ethpb.BlobSidecar, len(b.Deneb.Blobs))
|
||||
for i, blob := range b.Deneb.Blobs {
|
||||
if err := vs.P2P.BroadcastBlob(ctx, blob.Message.Index, blob); err != nil {
|
||||
log.WithError(err).Errorf("Could not broadcast blob index %d / %d", i, len(b.Deneb.Blobs))
|
||||
}
|
||||
scs[i] = blob.Message
|
||||
}
|
||||
if len(scs) > 0 {
|
||||
if err := vs.BeaconDB.SaveBlobSidecar(ctx, scs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Do not block proposal critical path with debug logging or block feed updates.
|
||||
defer func() {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debugf(
|
||||
"Block proposal received via RPC")
|
||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: blk},
|
||||
})
|
||||
}()
|
||||
|
||||
if err := vs.BlockReceiver.ReceiveBlock(ctx, blk, root); err != nil {
|
||||
return nil, fmt.Errorf("could not process beacon block: %v", err)
|
||||
}
|
||||
|
||||
return ðpb.ProposeResponse{
|
||||
BlockRoot: root[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
@@ -344,54 +369,6 @@ func (vs *Server) GetFeeRecipientByPubKey(ctx context.Context, request *ethpb.Fe
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.SignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.proposeGenericBeaconBlock")
|
||||
defer span.End()
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not tree hash block: %v", err)
|
||||
}
|
||||
|
||||
unblinder, err := newUnblinder(blk, vs.BlockBuilder)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create unblinder")
|
||||
}
|
||||
blk, err = unblinder.unblindBuilderBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not unblind builder block")
|
||||
}
|
||||
|
||||
// Do not block proposal critical path with debug logging or block feed updates.
|
||||
defer func() {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debugf(
|
||||
"Block proposal received via RPC")
|
||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: blk},
|
||||
})
|
||||
}()
|
||||
|
||||
// Broadcast the new block to the network.
|
||||
blkPb, err := blk.Proto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get protobuf block")
|
||||
}
|
||||
if err := vs.P2P.Broadcast(ctx, blkPb); err != nil {
|
||||
return nil, fmt.Errorf("could not broadcast block: %v", err)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(root[:]),
|
||||
}).Debug("Broadcasting block")
|
||||
|
||||
if err := vs.BlockReceiver.ReceiveBlock(ctx, blk, root); err != nil {
|
||||
return nil, fmt.Errorf("could not process beacon block: %v", err)
|
||||
}
|
||||
|
||||
return ðpb.ProposeResponse{
|
||||
BlockRoot: root[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// computeStateRoot computes the state root after a block has been processed through a state transition and
|
||||
// returns it to the validator client.
|
||||
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
|
||||
@@ -40,6 +40,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
cfg.CapellaForkEpoch = 0
|
||||
cfg.DenebForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
@@ -78,7 +79,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
b := blk.Block()
|
||||
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
|
||||
localPayload, _, err := vs.getLocalPayloadAndBlobs(ctx, b, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
|
||||
require.NoError(t, err)
|
||||
@@ -137,7 +138,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
vs.HeadFetcher = chain
|
||||
b := blk.Block()
|
||||
|
||||
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
|
||||
localPayload, _, err := vs.getLocalPayloadAndBlobs(ctx, b, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
|
||||
require.NoError(t, err)
|
||||
@@ -199,7 +200,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
vs.HeadFetcher = chain
|
||||
|
||||
b := blk.Block()
|
||||
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
|
||||
localPayload, _, err := vs.getLocalPayloadAndBlobs(ctx, b, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
|
||||
require.NoError(t, err)
|
||||
@@ -213,7 +214,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 2}
|
||||
b := blk.Block()
|
||||
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
|
||||
localPayload, _, err := vs.getLocalPayloadAndBlobs(ctx, b, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
|
||||
require.NoError(t, err)
|
||||
@@ -233,7 +234,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 1}
|
||||
b := blk.Block()
|
||||
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
|
||||
localPayload, _, err := vs.getLocalPayloadAndBlobs(ctx, b, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
|
||||
require.NoError(t, err)
|
||||
@@ -254,7 +255,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
}
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 4}, BlockValue: 0}
|
||||
b := blk.Block()
|
||||
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
|
||||
localPayload, _, err := vs.getLocalPayloadAndBlobs(ctx, b, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
|
||||
require.ErrorIs(t, consensus_types.ErrNilObjectWrapped, err) // Builder returns fault. Use local block
|
||||
@@ -263,6 +264,28 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(4), e.BlockNumber()) // Local block
|
||||
})
|
||||
t.Run("Can get payload and blobs Deneb", func(t *testing.T) {
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockDeneb())
|
||||
require.NoError(t, err)
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{
|
||||
HasConfigured: false,
|
||||
}
|
||||
blobsBundle := &v1.BlobsBundle{
|
||||
KzgCommitments: [][]byte{{1, 2, 3}},
|
||||
Proofs: [][]byte{{4, 5, 6}},
|
||||
Blobs: [][]byte{{7, 8, 9}},
|
||||
}
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{
|
||||
PayloadIDBytes: id,
|
||||
BlobsBundle: blobsBundle,
|
||||
ExecutionPayloadDeneb: &v1.ExecutionPayloadDeneb{BlockNumber: 4},
|
||||
BlockValue: 0}
|
||||
blk.SetSlot(primitives.Slot(params.BeaconConfig().DenebForkEpoch) * params.BeaconConfig().SlotsPerEpoch)
|
||||
localPayload, bb, err := vs.getLocalPayloadAndBlobs(ctx, blk.Block(), capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(4), localPayload.BlockNumber())
|
||||
require.DeepEqual(t, bb, blobsBundle)
|
||||
})
|
||||
}
|
||||
func TestServer_getPayloadHeader(t *testing.T) {
|
||||
genesis := time.Now().Add(-time.Duration(params.BeaconConfig().SlotsPerEpoch) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
|
||||
48
beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deneb.go
Normal file
48
beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deneb.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
// setKzgCommitments sets the KZG commitment on the block.
|
||||
// Return early if the block version is older than deneb or block slot has not passed deneb epoch.
|
||||
func setKzgCommitments(blk interfaces.SignedBeaconBlock, bundle *enginev1.BlobsBundle) error {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
slot := blk.Block().Slot()
|
||||
if slots.ToEpoch(slot) < params.BeaconConfig().DenebForkEpoch {
|
||||
return nil
|
||||
}
|
||||
return blk.SetBlobKzgCommitments(bundle.KzgCommitments)
|
||||
}
|
||||
|
||||
// coverts a blobs bundle to a sidecar format.
|
||||
func blobsBundleToSidecars(bundle *enginev1.BlobsBundle, blk interfaces.ReadOnlyBeaconBlock) ([]*ethpb.BlobSidecar, error) {
|
||||
r, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pr := blk.ParentRoot()
|
||||
|
||||
sidecars := make([]*ethpb.BlobSidecar, len(bundle.Blobs))
|
||||
for i := 0; i < len(bundle.Blobs); i++ {
|
||||
sidecars[i] = ðpb.BlobSidecar{
|
||||
BlockRoot: r[:],
|
||||
Index: uint64(i),
|
||||
Slot: blk.Slot(),
|
||||
BlockParentRoot: pr[:],
|
||||
ProposerIndex: blk.ProposerIndex(),
|
||||
Blob: bundle.Blobs[i],
|
||||
KzgCommitment: bundle.KzgCommitments[i],
|
||||
KzgProof: bundle.Proofs[i],
|
||||
}
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func Test_setKzgCommitments(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, setKzgCommitments(b, nil))
|
||||
b, err = blocks.NewSignedBeaconBlock(util.NewBeaconBlockDeneb())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, setKzgCommitments(b, nil))
|
||||
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
kcs := [][]byte{[]byte("kzg"), []byte("kzg1"), []byte("kzg2")}
|
||||
bundle := &enginev1.BlobsBundle{KzgCommitments: kcs}
|
||||
require.NoError(t, setKzgCommitments(b, bundle))
|
||||
got, err := b.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, got, kcs)
|
||||
}
|
||||
|
||||
func Test_blobsBundleToSidecars(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockDeneb())
|
||||
require.NoError(t, err)
|
||||
|
||||
b.SetSlot(1)
|
||||
b.SetProposerIndex(2)
|
||||
b.SetParentRoot(bytesutil.PadTo([]byte("parentRoot"), 32))
|
||||
|
||||
kcs := [][]byte{[]byte("kzg"), []byte("kzg1"), []byte("kzg2")}
|
||||
proofs := [][]byte{[]byte("proof"), []byte("proof1"), []byte("proof2")}
|
||||
blobs := [][]byte{[]byte("blob"), []byte("blob1"), []byte("blob2")}
|
||||
bundle := &enginev1.BlobsBundle{KzgCommitments: kcs, Proofs: proofs, Blobs: blobs}
|
||||
|
||||
sidecars, err := blobsBundleToSidecars(bundle, b.Block())
|
||||
require.NoError(t, err)
|
||||
|
||||
r, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(sidecars), 3)
|
||||
for i := 0; i < len(sidecars); i++ {
|
||||
require.DeepEqual(t, sidecars[i].BlockRoot, r[:])
|
||||
require.Equal(t, sidecars[i].Index, uint64(i))
|
||||
require.Equal(t, sidecars[i].Slot, b.Block().Slot())
|
||||
pr := b.Block().ParentRoot()
|
||||
require.DeepEqual(t, sidecars[i].BlockParentRoot, pr[:])
|
||||
require.Equal(t, sidecars[i].ProposerIndex, b.Block().ProposerIndex())
|
||||
require.DeepEqual(t, sidecars[i].Blob, blobs[i])
|
||||
require.DeepEqual(t, sidecars[i].KzgProof, proofs[i])
|
||||
require.DeepEqual(t, sidecars[i].KzgCommitment, kcs[i])
|
||||
}
|
||||
}
|
||||
@@ -30,11 +30,16 @@ func getEmptyBlock(slot primitives.Slot) (interfaces.SignedBeaconBlock, error) {
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
default:
|
||||
case slots.ToEpoch(slot) < params.BeaconConfig().DenebForkEpoch:
|
||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{}}})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
default:
|
||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
}
|
||||
return sBlk, err
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ func Test_getEmptyBlock(t *testing.T) {
|
||||
config.AltairForkEpoch = 1
|
||||
config.BellatrixForkEpoch = 2
|
||||
config.CapellaForkEpoch = 3
|
||||
config.DenebForkEpoch = 4
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
tests := []struct {
|
||||
@@ -51,6 +52,15 @@ func Test_getEmptyBlock(t *testing.T) {
|
||||
return b
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb",
|
||||
slot: primitives.Slot(params.BeaconConfig().DenebForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
b, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -41,14 +41,14 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
|
||||
// The payload is computed given the respected time of merge.
|
||||
func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) (interfaces.ExecutionData, error) {
|
||||
// This returns the local execution payload of a given slot. The function has full awareness of pre and post merge.
|
||||
// It also returns the blobs bundle.
|
||||
func (vs *Server) getLocalPayloadAndBlobs(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) (interfaces.ExecutionData, *enginev1.BlobsBundle, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.getLocalPayload")
|
||||
defer span.End()
|
||||
|
||||
if blk.Version() < version.Bellatrix {
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
slot := blk.Slot()
|
||||
@@ -73,21 +73,21 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
"Please refer to our documentation for instructions")
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not get fee recipient in db")
|
||||
return nil, nil, errors.Wrap(err, "could not get fee recipient in db")
|
||||
}
|
||||
|
||||
if ok && proposerID == vIdx && payloadId != [8]byte{} { // Payload ID is cache hit. Return the cached payload ID.
|
||||
var pid [8]byte
|
||||
copy(pid[:], payloadId[:])
|
||||
payloadIDCacheHit.Inc()
|
||||
payload, err := vs.ExecutionEngineCaller.GetPayload(ctx, pid, slot)
|
||||
payload, blobsBundle, err := vs.ExecutionEngineCaller.GetPayload(ctx, pid, slot)
|
||||
switch {
|
||||
case err == nil:
|
||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||
return payload, nil
|
||||
return payload, blobsBundle, nil
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not get cached payload from execution client")
|
||||
return nil, nil, errors.Wrap(err, "could not get cached payload from execution client")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,36 +95,44 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
var hasTerminalBlock bool
|
||||
mergeComplete, err := blocks.IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
t, err := slots.ToTime(st.GenesisTime(), slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if mergeComplete {
|
||||
header, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
parentHash = header.BlockHash()
|
||||
} else {
|
||||
if activationEpochNotReached(slot) {
|
||||
return consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
p, err := consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return p, nil, nil
|
||||
}
|
||||
parentHash, hasTerminalBlock, err = vs.getTerminalBlockHashIfExists(ctx, uint64(t.Unix()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if !hasTerminalBlock {
|
||||
return consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
p, err := consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return p, nil, nil
|
||||
}
|
||||
}
|
||||
payloadIDCacheMiss.Inc()
|
||||
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
finalizedBlockHash := [32]byte{}
|
||||
@@ -142,10 +150,10 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
}
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
@@ -154,7 +162,7 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
Withdrawals: withdrawals,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
case version.Bellatrix:
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
@@ -163,25 +171,25 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown beacon state version")
|
||||
return nil, nil, errors.New("unknown beacon state version")
|
||||
}
|
||||
|
||||
payloadID, _, err := vs.ExecutionEngineCaller.ForkchoiceUpdated(ctx, f, attr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not prepare payload")
|
||||
return nil, nil, errors.Wrap(err, "could not prepare payload")
|
||||
}
|
||||
if payloadID == nil {
|
||||
return nil, fmt.Errorf("nil payload with block hash: %#x", parentHash)
|
||||
return nil, nil, fmt.Errorf("nil payload with block hash: %#x", parentHash)
|
||||
}
|
||||
payload, err := vs.ExecutionEngineCaller.GetPayload(ctx, *payloadID, slot)
|
||||
payload, blobsBundle, err := vs.ExecutionEngineCaller.GetPayload(ctx, *payloadID, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||
return payload, nil
|
||||
return payload, blobsBundle, nil
|
||||
}
|
||||
|
||||
// warnIfFeeRecipientDiffers logs a warning if the fee recipient in the included payload does not
|
||||
|
||||
@@ -149,7 +149,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
blk.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
_, err = vs.getLocalPayload(context.Background(), b.Block(), tt.st)
|
||||
_, _, err = vs.getLocalPayloadAndBlobs(context.Background(), b.Block(), tt.st)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -191,7 +191,7 @@ func TestServer_getExecutionPayloadContextTimeout(t *testing.T) {
|
||||
blk.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
_, err = vs.getLocalPayload(context.Background(), b.Block(), nonTransitionSt)
|
||||
_, _, err = vs.getLocalPayloadAndBlobs(context.Background(), b.Block(), nonTransitionSt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -243,7 +243,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
blk.Block.ParentRoot = bytesutil.PadTo([]byte{}, 32)
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
gotPayload, err := vs.getLocalPayload(context.Background(), b.Block(), transitionSt)
|
||||
gotPayload, _, err := vs.getLocalPayloadAndBlobs(context.Background(), b.Block(), transitionSt)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotPayload)
|
||||
|
||||
@@ -255,7 +255,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
payload.FeeRecipient = evilRecipientAddress[:]
|
||||
vs.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
gotPayload, err = vs.getLocalPayload(context.Background(), b.Block(), transitionSt)
|
||||
gotPayload, _, err = vs.getLocalPayloadAndBlobs(context.Background(), b.Block(), transitionSt)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotPayload)
|
||||
|
||||
|
||||
@@ -430,6 +430,126 @@ func TestServer_GetBeaconBlock_Capella(t *testing.T) {
|
||||
require.DeepEqual(t, change, got.GetCapella().Body.BlsToExecutionChanges[0])
|
||||
}
|
||||
|
||||
func TestServer_GetBeaconBlock_Deneb(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
transition.SkipSlotCache.Disable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockDeneb{
|
||||
Block: ðpb.BeaconBlockDeneb{
|
||||
Slot: denebSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyDeneb{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
random, err := helpers.RandaoMix(beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
require.NoError(t, err)
|
||||
timeStamp, err := slots.ToTime(beaconState.GenesisTime(), denebSlot+1)
|
||||
require.NoError(t, err)
|
||||
payload := &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 2,
|
||||
GasUsed: 3,
|
||||
Timestamp: uint64(timeStamp.Unix()),
|
||||
DataGasUsed: 4,
|
||||
ExcessDataGas: 5,
|
||||
}
|
||||
|
||||
kc := make([][]byte, 0)
|
||||
kc = append(kc, bytesutil.PadTo([]byte("kc"), 48))
|
||||
kc = append(kc, bytesutil.PadTo([]byte("kc1"), 48))
|
||||
kc = append(kc, bytesutil.PadTo([]byte("kc2"), 48))
|
||||
proofs := [][]byte{[]byte("proof"), []byte("proof1"), []byte("proof2")}
|
||||
blobs := [][]byte{[]byte("blob"), []byte("blob1"), []byte("blob2")}
|
||||
bundle := &enginev1.BlobsBundle{KzgCommitments: kc, Proofs: proofs, Blobs: blobs}
|
||||
proposerServer := getProposerServer(db, beaconState, parentRoot[:])
|
||||
proposerServer.ExecutionEngineCaller = &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
ExecutionPayloadDeneb: payload,
|
||||
BlobsBundle: bundle,
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: denebSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
copiedState := beaconState.Copy()
|
||||
copiedState, err = transition.ProcessSlots(ctx, copiedState, denebSlot+1)
|
||||
require.NoError(t, err)
|
||||
change, err := util.GenerateBLSToExecutionChange(copiedState, privKeys[1], 0)
|
||||
require.NoError(t, err)
|
||||
proposerServer.BLSChangesPool.InsertBLSToExecChange(change)
|
||||
|
||||
got, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, got.GetDeneb().Block.Body.BlobKzgCommitments, kc)
|
||||
}
|
||||
|
||||
func TestServer_GetBeaconBlock_Optimistic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
@@ -511,6 +631,7 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
block func([32]byte) *ethpb.GenericSignedBeaconBlock
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "phase0",
|
||||
@@ -558,6 +679,68 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bellatrix",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
blockToPropose := util.NewBeaconBlockBellatrix()
|
||||
blockToPropose.Block.Slot = 5
|
||||
blockToPropose.Block.ParentRoot = parent[:]
|
||||
blk := ðpb.GenericSignedBeaconBlock_Bellatrix{Bellatrix: blockToPropose}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb block no blob",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
blockToPropose := util.NewBeaconBlockDeneb()
|
||||
blockToPropose.Block.Slot = 5
|
||||
blockToPropose.Block.ParentRoot = parent[:]
|
||||
blk := ðpb.GenericSignedBeaconBlock_Deneb{Deneb: ðpb.SignedBeaconBlockAndBlobsDeneb{
|
||||
Block: blockToPropose,
|
||||
}}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb block has blobs",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
blockToPropose := util.NewBeaconBlockDeneb()
|
||||
blockToPropose.Block.Slot = 5
|
||||
blockToPropose.Block.ParentRoot = parent[:]
|
||||
blk := ðpb.GenericSignedBeaconBlock_Deneb{Deneb: ðpb.SignedBeaconBlockAndBlobsDeneb{
|
||||
Block: blockToPropose,
|
||||
Blobs: []*ethpb.SignedBlobSidecar{
|
||||
{Message: ðpb.BlobSidecar{Index: 0, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
{Message: ðpb.BlobSidecar{Index: 1, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
{Message: ðpb.BlobSidecar{Index: 2, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
{Message: ðpb.BlobSidecar{Index: 3, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
},
|
||||
}}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb block has too many blobs",
|
||||
err: "Too many blobs in block: 7",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
blockToPropose := util.NewBeaconBlockDeneb()
|
||||
blockToPropose.Block.Slot = 5
|
||||
blockToPropose.Block.ParentRoot = parent[:]
|
||||
blk := ðpb.GenericSignedBeaconBlock_Deneb{Deneb: ðpb.SignedBeaconBlockAndBlobsDeneb{
|
||||
Block: blockToPropose,
|
||||
Blobs: []*ethpb.SignedBlobSidecar{
|
||||
{Message: ðpb.BlobSidecar{Index: 0, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
{Message: ðpb.BlobSidecar{Index: 1, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
{Message: ðpb.BlobSidecar{Index: 2, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
{Message: ðpb.BlobSidecar{Index: 3, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
{Message: ðpb.BlobSidecar{Index: 4, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
{Message: ðpb.BlobSidecar{Index: 5, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
{Message: ðpb.BlobSidecar{Index: 6, Slot: 5, BlockParentRoot: parent[:]}},
|
||||
},
|
||||
}}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -570,17 +753,31 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
db := dbutil.SetupDB(t)
|
||||
proposerServer := &Server{
|
||||
BlockReceiver: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true, PayloadCapella: emptyPayloadCapella()},
|
||||
BeaconDB: db,
|
||||
}
|
||||
blockToPropose := tt.block(bsRoot)
|
||||
res, err := proposerServer.ProposeBeaconBlock(context.Background(), blockToPropose)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
if res == nil || len(res.BlockRoot) == 0 {
|
||||
t.Error("No block root was returned")
|
||||
if tt.err != "" { // Expecting an error
|
||||
require.ErrorContains(t, tt.err, err)
|
||||
} else {
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
if res == nil || len(res.BlockRoot) == 0 {
|
||||
t.Error("No block root was returned")
|
||||
}
|
||||
}
|
||||
if tt.name == "deneb block has blobs" {
|
||||
scs, err := db.BlobSidecarsBySlot(ctx, blockToPropose.GetDeneb().Block.Block.Slot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 4, len(scs))
|
||||
for i, sc := range scs {
|
||||
require.Equal(t, uint64(i), sc.Index)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ type BeaconState struct {
|
||||
nextSyncCommittee *ethpb.SyncCommittee
|
||||
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader
|
||||
latestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella
|
||||
latestExecutionPayloadHeaderDeneb *enginev1.ExecutionPayloadHeaderDeneb
|
||||
nextWithdrawalIndex uint64
|
||||
nextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ type BeaconState struct {
|
||||
nextSyncCommittee *ethpb.SyncCommittee
|
||||
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader
|
||||
latestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella
|
||||
latestExecutionPayloadHeaderDeneb *enginev1.ExecutionPayloadHeaderDeneb
|
||||
nextWithdrawalIndex uint64
|
||||
nextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
|
||||
|
||||
@@ -56,6 +56,18 @@ func TestBeaconState_LatestBlockHeader_Capella(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestBeaconState_LatestBlockHeader_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateLatestBlockHeader(
|
||||
t,
|
||||
func() (state.BeaconState, error) {
|
||||
return InitializeFromProtoDeneb(ðpb.BeaconStateDeneb{})
|
||||
},
|
||||
func(BH *ethpb.BeaconBlockHeader) (state.BeaconState, error) {
|
||||
return InitializeFromProtoDeneb(ðpb.BeaconStateDeneb{LatestBlockHeader: BH})
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestBeaconState_BlockRoots_Phase0(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateBlockRootsNative(
|
||||
t,
|
||||
@@ -104,6 +116,18 @@ func TestBeaconState_BlockRoots_Capella(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestBeaconState_BlockRoots_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateBlockRootsNative(
|
||||
t,
|
||||
func() (state.BeaconState, error) {
|
||||
return InitializeFromProtoDeneb(ðpb.BeaconStateDeneb{})
|
||||
},
|
||||
func(BR [][]byte) (state.BeaconState, error) {
|
||||
return InitializeFromProtoDeneb(ðpb.BeaconStateDeneb{BlockRoots: BR})
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestBeaconState_BlockRootAtIndex_Phase0(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateBlockRootAtIndexNative(
|
||||
t,
|
||||
@@ -151,3 +175,15 @@ func TestBeaconState_BlockRootAtIndex_Capella(t *testing.T) {
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestBeaconState_BlockRootAtIndex_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateBlockRootAtIndexNative(
|
||||
t,
|
||||
func() (state.BeaconState, error) {
|
||||
return InitializeFromProtoDeneb(ðpb.BeaconStateDeneb{})
|
||||
},
|
||||
func(BR [][]byte) (state.BeaconState, error) {
|
||||
return InitializeFromProtoDeneb(ðpb.BeaconStateDeneb{BlockRoots: BR})
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -41,6 +41,14 @@ func TestBeaconState_PreviousJustifiedCheckpointNil_Capella(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_PreviousJustifiedCheckpointNil_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStatePreviousJustifiedCheckpointNil(
|
||||
t,
|
||||
func() (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_PreviousJustifiedCheckpoint_Phase0(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStatePreviousJustifiedCheckpoint(
|
||||
t,
|
||||
@@ -73,6 +81,14 @@ func TestBeaconState_PreviousJustifiedCheckpoint_Capella(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_PreviousJustifiedCheckpoint_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStatePreviousJustifiedCheckpoint(
|
||||
t,
|
||||
func(cp *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{PreviousJustifiedCheckpoint: cp})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_CurrentJustifiedCheckpointNil_Phase0(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateCurrentJustifiedCheckpointNil(
|
||||
t,
|
||||
@@ -105,6 +121,14 @@ func TestBeaconState_CurrentJustifiedCheckpointNil_Capella(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_CurrentJustifiedCheckpointNil_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateCurrentJustifiedCheckpointNil(
|
||||
t,
|
||||
func() (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_CurrentJustifiedCheckpoint_Phase0(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateCurrentJustifiedCheckpoint(
|
||||
t,
|
||||
@@ -137,6 +161,14 @@ func TestBeaconState_CurrentJustifiedCheckpoint_Capella(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_CurrentJustifiedCheckpoint_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateCurrentJustifiedCheckpoint(
|
||||
t,
|
||||
func(cp *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{CurrentJustifiedCheckpoint: cp})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_FinalizedCheckpointNil_Phase0(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateFinalizedCheckpointNil(
|
||||
t,
|
||||
@@ -169,6 +201,14 @@ func TestBeaconState_FinalizedCheckpointNil_Capella(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_FinalizedCheckpointNil_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateFinalizedCheckpointNil(
|
||||
t,
|
||||
func() (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_FinalizedCheckpoint_Phase0(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateFinalizedCheckpoint(
|
||||
t,
|
||||
@@ -201,6 +241,14 @@ func TestBeaconState_FinalizedCheckpoint_Capella(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_FinalizedCheckpoint_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateFinalizedCheckpoint(
|
||||
t,
|
||||
func(cp *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{FinalizedCheckpoint: cp})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_JustificationBitsNil_Phase0(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateJustificationBitsNil(
|
||||
t,
|
||||
@@ -233,6 +281,14 @@ func TestBeaconState_JustificationBitsNil_Capella(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_JustificationBitsNil_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateJustificationBitsNil(
|
||||
t,
|
||||
func() (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_JustificationBits_Phase0(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateJustificationBits(
|
||||
t,
|
||||
@@ -264,3 +320,11 @@ func TestBeaconState_JustificationBits_Capella(t *testing.T) {
|
||||
return InitializeFromProtoUnsafeCapella(ðpb.BeaconStateCapella{JustificationBits: bits})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_JustificationBits_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateJustificationBits(
|
||||
t,
|
||||
func(bits bitfield.Bitvector4) (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{JustificationBits: bits})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,7 +20,12 @@ func (b *BeaconState) LatestExecutionPayloadHeader() (interfaces.ExecutionData,
|
||||
if b.version == version.Bellatrix {
|
||||
return blocks.WrappedExecutionPayloadHeader(b.latestExecutionPayloadHeaderVal())
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapellaVal(), 0)
|
||||
|
||||
if b.version == version.Capella {
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapellaVal(), 0)
|
||||
}
|
||||
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.latestExecutionPayloadHeaderDenebVal(), 0)
|
||||
}
|
||||
|
||||
// latestExecutionPayloadHeaderVal of the beacon state.
|
||||
@@ -34,3 +39,7 @@ func (b *BeaconState) latestExecutionPayloadHeaderVal() *enginev1.ExecutionPaylo
|
||||
func (b *BeaconState) latestExecutionPayloadHeaderCapellaVal() *enginev1.ExecutionPayloadHeaderCapella {
|
||||
return ethpb.CopyExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapella)
|
||||
}
|
||||
|
||||
func (b *BeaconState) latestExecutionPayloadHeaderDenebVal() *enginev1.ExecutionPayloadHeaderDeneb {
|
||||
return ethpb.CopyExecutionPayloadHeaderDeneb(b.latestExecutionPayloadHeaderDeneb)
|
||||
}
|
||||
|
||||
@@ -128,6 +128,37 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummaries,
|
||||
}
|
||||
case version.Deneb:
|
||||
return ðpb.BeaconStateDeneb{
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: gvrCopy[:],
|
||||
Slot: b.slot,
|
||||
Fork: b.fork,
|
||||
LatestBlockHeader: b.latestBlockHeader,
|
||||
BlockRoots: b.blockRoots.Slice(),
|
||||
StateRoots: b.stateRoots.Slice(),
|
||||
HistoricalRoots: b.historicalRoots.Slice(),
|
||||
Eth1Data: b.eth1Data,
|
||||
Eth1DataVotes: b.eth1DataVotes,
|
||||
Eth1DepositIndex: b.eth1DepositIndex,
|
||||
Validators: b.validators,
|
||||
Balances: b.balances,
|
||||
RandaoMixes: b.randaoMixes.Slice(),
|
||||
Slashings: b.slashings,
|
||||
PreviousEpochParticipation: b.previousEpochParticipation,
|
||||
CurrentEpochParticipation: b.currentEpochParticipation,
|
||||
JustificationBits: b.justificationBits,
|
||||
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint,
|
||||
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint,
|
||||
FinalizedCheckpoint: b.finalizedCheckpoint,
|
||||
InactivityScores: b.inactivityScores,
|
||||
CurrentSyncCommittee: b.currentSyncCommittee,
|
||||
NextSyncCommittee: b.nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeaderDeneb,
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummaries,
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -255,6 +286,37 @@ func (b *BeaconState) ToProto() interface{} {
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummariesVal(),
|
||||
}
|
||||
case version.Deneb:
|
||||
return ðpb.BeaconStateDeneb{
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: gvrCopy[:],
|
||||
Slot: b.slot,
|
||||
Fork: b.forkVal(),
|
||||
LatestBlockHeader: b.latestBlockHeaderVal(),
|
||||
BlockRoots: b.blockRoots.Slice(),
|
||||
StateRoots: b.stateRoots.Slice(),
|
||||
HistoricalRoots: b.historicalRoots.Slice(),
|
||||
Eth1Data: b.eth1DataVal(),
|
||||
Eth1DataVotes: b.eth1DataVotesVal(),
|
||||
Eth1DepositIndex: b.eth1DepositIndex,
|
||||
Validators: b.validatorsVal(),
|
||||
Balances: b.balancesVal(),
|
||||
RandaoMixes: b.randaoMixes.Slice(),
|
||||
Slashings: b.slashingsVal(),
|
||||
PreviousEpochParticipation: b.previousEpochParticipationVal(),
|
||||
CurrentEpochParticipation: b.currentEpochParticipationVal(),
|
||||
JustificationBits: b.justificationBitsVal(),
|
||||
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpointVal(),
|
||||
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpointVal(),
|
||||
FinalizedCheckpoint: b.finalizedCheckpointVal(),
|
||||
InactivityScores: b.inactivityScoresVal(),
|
||||
CurrentSyncCommittee: b.currentSyncCommitteeVal(),
|
||||
NextSyncCommittee: b.nextSyncCommitteeVal(),
|
||||
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeaderDenebVal(),
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummariesVal(),
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -338,3 +400,13 @@ func ProtobufBeaconStateCapella(s interface{}) (*ethpb.BeaconStateCapella, error
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
// ProtobufBeaconStateDeneb transforms an input into beacon state Deneb in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateDeneb(s interface{}) (*ethpb.BeaconStateDeneb, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.ProtobufBeaconStateDeneb")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
@@ -32,6 +32,12 @@ func TestBeaconState_SlotDataRace_Capella(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_SlotDataRace_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateSlotDataRace(t, func() (state.BeaconState, error) {
|
||||
return InitializeFromProtoDeneb(ðpb.BeaconStateDeneb{Slot: 1})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_MatchCurrentJustifiedCheckpt_Phase0(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateMatchCurrentJustifiedCheckptNative(
|
||||
t,
|
||||
|
||||
@@ -44,6 +44,14 @@ func TestBeaconState_ValidatorAtIndexReadOnly_HandlesNilSlice_Capella(t *testing
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconState_ValidatorAtIndexReadOnly_HandlesNilSlice_Deneb(t *testing.T) {
|
||||
testtmpl.VerifyBeaconStateValidatorAtIndexReadOnlyHandlesNilSlice(t, func() (state.BeaconState, error) {
|
||||
return statenative.InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{
|
||||
Validators: nil,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidatorIndexOutOfRangeError(t *testing.T) {
|
||||
err := statenative.NewValidatorIndexOutOfRangeError(1)
|
||||
require.Equal(t, err.Error(), "index 1 out of range")
|
||||
|
||||
@@ -13,6 +13,12 @@ import (
|
||||
)
|
||||
|
||||
func TestNextWithdrawalIndex(t *testing.T) {
|
||||
t.Run("ok for deneb", func(t *testing.T) {
|
||||
s := BeaconState{version: version.Deneb, nextWithdrawalIndex: 123}
|
||||
i, err := s.NextWithdrawalIndex()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(123), i)
|
||||
})
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
s := BeaconState{version: version.Capella, nextWithdrawalIndex: 123}
|
||||
i, err := s.NextWithdrawalIndex()
|
||||
@@ -27,6 +33,12 @@ func TestNextWithdrawalIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNextWithdrawalValidatorIndex(t *testing.T) {
|
||||
t.Run("ok for deneb", func(t *testing.T) {
|
||||
s := BeaconState{version: version.Deneb, nextWithdrawalValidatorIndex: 123}
|
||||
i, err := s.NextWithdrawalValidatorIndex()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.ValidatorIndex(123), i)
|
||||
})
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
s := BeaconState{version: version.Capella, nextWithdrawalValidatorIndex: 123}
|
||||
i, err := s.NextWithdrawalValidatorIndex()
|
||||
@@ -329,3 +341,257 @@ func TestExpectedWithdrawals(t *testing.T) {
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = saved
|
||||
})
|
||||
}
|
||||
|
||||
func TestExpectedWithdrawals_Deneb(t *testing.T) {
|
||||
t.Run("no withdrawals", func(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
validators: make([]*ethpb.Validator, 100),
|
||||
balances: make([]uint64, 100),
|
||||
}
|
||||
for i := range s.validators {
|
||||
s.balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawableEpoch: primitives.Epoch(1),
|
||||
}
|
||||
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s.validators[i] = val
|
||||
}
|
||||
expected, err := s.ExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(expected))
|
||||
})
|
||||
t.Run("one fully withdrawable", func(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
validators: make([]*ethpb.Validator, 100),
|
||||
balances: make([]uint64, 100),
|
||||
nextWithdrawalValidatorIndex: 20,
|
||||
}
|
||||
for i := range s.validators {
|
||||
s.balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawableEpoch: primitives.Epoch(1),
|
||||
}
|
||||
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s.validators[i] = val
|
||||
}
|
||||
s.validators[3].WithdrawableEpoch = primitives.Epoch(0)
|
||||
expected, err := s.ExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(expected))
|
||||
withdrawal := &enginev1.Withdrawal{
|
||||
Index: 0,
|
||||
ValidatorIndex: 3,
|
||||
Address: s.validators[3].WithdrawalCredentials[12:],
|
||||
Amount: s.balances[3],
|
||||
}
|
||||
require.DeepEqual(t, withdrawal, expected[0])
|
||||
})
|
||||
t.Run("one partially withdrawable", func(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
validators: make([]*ethpb.Validator, 100),
|
||||
balances: make([]uint64, 100),
|
||||
}
|
||||
for i := range s.validators {
|
||||
s.balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawableEpoch: primitives.Epoch(1),
|
||||
}
|
||||
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s.validators[i] = val
|
||||
}
|
||||
s.balances[3] += params.BeaconConfig().MinDepositAmount
|
||||
expected, err := s.ExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(expected))
|
||||
withdrawal := &enginev1.Withdrawal{
|
||||
Index: 0,
|
||||
ValidatorIndex: 3,
|
||||
Address: s.validators[3].WithdrawalCredentials[12:],
|
||||
Amount: params.BeaconConfig().MinDepositAmount,
|
||||
}
|
||||
require.DeepEqual(t, withdrawal, expected[0])
|
||||
})
|
||||
t.Run("one partially and one fully withdrawable", func(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
validators: make([]*ethpb.Validator, 100),
|
||||
balances: make([]uint64, 100),
|
||||
}
|
||||
for i := range s.validators {
|
||||
s.balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawableEpoch: primitives.Epoch(1),
|
||||
}
|
||||
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
val.WithdrawalCredentials[31] = byte(i)
|
||||
s.validators[i] = val
|
||||
}
|
||||
s.balances[3] += params.BeaconConfig().MinDepositAmount
|
||||
s.validators[7].WithdrawableEpoch = primitives.Epoch(0)
|
||||
expected, err := s.ExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(expected))
|
||||
|
||||
withdrawalFull := &enginev1.Withdrawal{
|
||||
Index: 1,
|
||||
ValidatorIndex: 7,
|
||||
Address: s.validators[7].WithdrawalCredentials[12:],
|
||||
Amount: s.balances[7],
|
||||
}
|
||||
withdrawalPartial := &enginev1.Withdrawal{
|
||||
Index: 0,
|
||||
ValidatorIndex: 3,
|
||||
Address: s.validators[3].WithdrawalCredentials[12:],
|
||||
Amount: params.BeaconConfig().MinDepositAmount,
|
||||
}
|
||||
require.DeepEqual(t, withdrawalPartial, expected[0])
|
||||
require.DeepEqual(t, withdrawalFull, expected[1])
|
||||
})
|
||||
t.Run("all partially withdrawable", func(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
validators: make([]*ethpb.Validator, 100),
|
||||
balances: make([]uint64, 100),
|
||||
}
|
||||
for i := range s.validators {
|
||||
s.balances[i] = params.BeaconConfig().MaxEffectiveBalance + 1
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawableEpoch: primitives.Epoch(1),
|
||||
}
|
||||
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s.validators[i] = val
|
||||
}
|
||||
expected, err := s.ExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxWithdrawalsPerPayload, uint64(len(expected)))
|
||||
withdrawal := &enginev1.Withdrawal{
|
||||
Index: 0,
|
||||
ValidatorIndex: 0,
|
||||
Address: s.validators[0].WithdrawalCredentials[12:],
|
||||
Amount: 1,
|
||||
}
|
||||
require.DeepEqual(t, withdrawal, expected[0])
|
||||
})
|
||||
t.Run("all fully withdrawable", func(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
validators: make([]*ethpb.Validator, 100),
|
||||
balances: make([]uint64, 100),
|
||||
}
|
||||
for i := range s.validators {
|
||||
s.balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawableEpoch: primitives.Epoch(0),
|
||||
}
|
||||
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s.validators[i] = val
|
||||
}
|
||||
expected, err := s.ExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxWithdrawalsPerPayload, uint64(len(expected)))
|
||||
withdrawal := &enginev1.Withdrawal{
|
||||
Index: 0,
|
||||
ValidatorIndex: 0,
|
||||
Address: s.validators[0].WithdrawalCredentials[12:],
|
||||
Amount: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
require.DeepEqual(t, withdrawal, expected[0])
|
||||
})
|
||||
t.Run("all fully and partially withdrawable", func(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
validators: make([]*ethpb.Validator, 100),
|
||||
balances: make([]uint64, 100),
|
||||
}
|
||||
for i := range s.validators {
|
||||
s.balances[i] = params.BeaconConfig().MaxEffectiveBalance + 1
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawableEpoch: primitives.Epoch(0),
|
||||
}
|
||||
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s.validators[i] = val
|
||||
}
|
||||
expected, err := s.ExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxWithdrawalsPerPayload, uint64(len(expected)))
|
||||
withdrawal := &enginev1.Withdrawal{
|
||||
Index: 0,
|
||||
ValidatorIndex: 0,
|
||||
Address: s.validators[0].WithdrawalCredentials[12:],
|
||||
Amount: params.BeaconConfig().MaxEffectiveBalance + 1,
|
||||
}
|
||||
require.DeepEqual(t, withdrawal, expected[0])
|
||||
})
|
||||
t.Run("one fully withdrawable but zero balance", func(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
validators: make([]*ethpb.Validator, 100),
|
||||
balances: make([]uint64, 100),
|
||||
nextWithdrawalValidatorIndex: 20,
|
||||
}
|
||||
for i := range s.validators {
|
||||
s.balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawableEpoch: primitives.Epoch(1),
|
||||
}
|
||||
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s.validators[i] = val
|
||||
}
|
||||
s.validators[3].WithdrawableEpoch = primitives.Epoch(0)
|
||||
s.balances[3] = 0
|
||||
expected, err := s.ExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(expected))
|
||||
})
|
||||
t.Run("one partially withdrawable, one above sweep bound", func(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
validators: make([]*ethpb.Validator, 100),
|
||||
balances: make([]uint64, 100),
|
||||
}
|
||||
for i := range s.validators {
|
||||
s.balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawableEpoch: primitives.Epoch(1),
|
||||
}
|
||||
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s.validators[i] = val
|
||||
}
|
||||
s.balances[3] += params.BeaconConfig().MinDepositAmount
|
||||
s.balances[10] += params.BeaconConfig().MinDepositAmount
|
||||
saved := params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = 10
|
||||
expected, err := s.ExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(expected))
|
||||
withdrawal := &enginev1.Withdrawal{
|
||||
Index: 0,
|
||||
ValidatorIndex: 3,
|
||||
Address: s.validators[3].WithdrawalCredentials[12:],
|
||||
Amount: params.BeaconConfig().MinDepositAmount,
|
||||
}
|
||||
require.DeepEqual(t, withdrawal, expected[0])
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = saved
|
||||
})
|
||||
}
|
||||
|
||||
@@ -33,6 +33,8 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateBellatrixFieldCount)
|
||||
case version.Capella:
|
||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
||||
case version.Deneb:
|
||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateCapellaFieldCount) // Deneb has the same state field count as Capella.
|
||||
}
|
||||
|
||||
// Genesis time root.
|
||||
@@ -243,7 +245,18 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
return nil, err
|
||||
}
|
||||
fieldRoots[types.LatestExecutionPayloadHeaderCapella.RealPosition()] = executionPayloadRoot[:]
|
||||
}
|
||||
|
||||
if state.version == version.Deneb {
|
||||
// Execution payload root.
|
||||
executionPayloadRoot, err := state.latestExecutionPayloadHeaderDeneb.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldRoots[types.LatestExecutionPayloadHeaderDeneb.RealPosition()] = executionPayloadRoot[:]
|
||||
}
|
||||
|
||||
if state.version >= version.Capella {
|
||||
// Next withdrawal index root.
|
||||
nextWithdrawalIndexRoot := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(nextWithdrawalIndexRoot, state.nextWithdrawalIndex)
|
||||
|
||||
@@ -131,6 +131,35 @@ func TestStateReferenceSharing_Finalizer_Capella(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateReferenceSharing_Finalizer_Deneb(t *testing.T) {
|
||||
// This test showcases the logic on the RandaoMixes field with the GC finalizer.
|
||||
|
||||
s, err := InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{RandaoMixes: [][]byte{[]byte("foo")}})
|
||||
require.NoError(t, err)
|
||||
a, ok := s.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected a single reference for RANDAO mixes")
|
||||
|
||||
func() {
|
||||
// Create object in a different scope for GC
|
||||
b := a.Copy()
|
||||
assert.Equal(t, uint(2), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 references to RANDAO mixes")
|
||||
_ = b
|
||||
}()
|
||||
|
||||
runtime.GC() // Should run finalizer on object b
|
||||
assert.Equal(t, uint(1), a.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 1 shared reference to RANDAO mixes!")
|
||||
|
||||
copied := a.Copy()
|
||||
b, ok := copied.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, uint(2), b.sharedFieldReferences[types.RandaoMixes].Refs(), "Expected 2 shared references to RANDAO mixes")
|
||||
require.NoError(t, b.UpdateRandaoMixesAtIndex(0, []byte("bar")))
|
||||
if b.sharedFieldReferences[types.RandaoMixes].Refs() != 1 || a.sharedFieldReferences[types.RandaoMixes].Refs() != 1 {
|
||||
t.Error("Expected 1 shared reference to RANDAO mix for both a and b")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateReferenceCopy_NoUnexpectedRootsMutation_Phase0(t *testing.T) {
|
||||
root1, root2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
|
||||
s, err := InitializeFromProtoUnsafePhase0(ðpb.BeaconState{
|
||||
@@ -367,6 +396,65 @@ func TestStateReferenceCopy_NoUnexpectedRootsMutation_Capella(t *testing.T) {
|
||||
assertRefCount(t, b, types.StateRoots, 1)
|
||||
}
|
||||
|
||||
func TestStateReferenceCopy_NoUnexpectedRootsMutation_Deneb(t *testing.T) {
|
||||
root1, root2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
|
||||
s, err := InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{
|
||||
BlockRoots: [][]byte{
|
||||
root1[:],
|
||||
},
|
||||
StateRoots: [][]byte{
|
||||
root1[:],
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
a, ok := s.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
require.NoError(t, err)
|
||||
assertRefCount(t, a, types.BlockRoots, 1)
|
||||
assertRefCount(t, a, types.StateRoots, 1)
|
||||
|
||||
// Copy, increases reference count.
|
||||
copied := a.Copy()
|
||||
b, ok := copied.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
assertRefCount(t, a, types.BlockRoots, 2)
|
||||
assertRefCount(t, a, types.StateRoots, 2)
|
||||
assertRefCount(t, b, types.BlockRoots, 2)
|
||||
assertRefCount(t, b, types.StateRoots, 2)
|
||||
|
||||
// Assert shared state.
|
||||
blockRootsA := a.BlockRoots()
|
||||
stateRootsA := a.StateRoots()
|
||||
blockRootsB := b.BlockRoots()
|
||||
stateRootsB := b.StateRoots()
|
||||
assertValFound(t, blockRootsA, root1[:])
|
||||
assertValFound(t, blockRootsB, root1[:])
|
||||
assertValFound(t, stateRootsA, root1[:])
|
||||
assertValFound(t, stateRootsB, root1[:])
|
||||
|
||||
// Mutator should only affect calling state: a.
|
||||
require.NoError(t, a.UpdateBlockRootAtIndex(0, root2))
|
||||
require.NoError(t, a.UpdateStateRootAtIndex(0, root2))
|
||||
|
||||
// Assert no shared state mutation occurred only on state a (copy on write).
|
||||
assertValNotFound(t, a.BlockRoots(), root1[:])
|
||||
assertValNotFound(t, a.StateRoots(), root1[:])
|
||||
assertValFound(t, a.BlockRoots(), root2[:])
|
||||
assertValFound(t, a.StateRoots(), root2[:])
|
||||
assertValFound(t, b.BlockRoots(), root1[:])
|
||||
assertValFound(t, b.StateRoots(), root1[:])
|
||||
assert.DeepEqual(t, root2[:], a.BlockRoots()[0], "Expected mutation not found")
|
||||
assert.DeepEqual(t, root2[:], a.StateRoots()[0], "Expected mutation not found")
|
||||
assert.DeepEqual(t, root1[:], blockRootsB[0], "Unexpected mutation found")
|
||||
assert.DeepEqual(t, root1[:], stateRootsB[0], "Unexpected mutation found")
|
||||
|
||||
// Copy on write happened, reference counters are reset.
|
||||
assertRefCount(t, a, types.BlockRoots, 1)
|
||||
assertRefCount(t, a, types.StateRoots, 1)
|
||||
assertRefCount(t, b, types.BlockRoots, 1)
|
||||
assertRefCount(t, b, types.StateRoots, 1)
|
||||
}
|
||||
|
||||
func TestStateReferenceCopy_NoUnexpectedRandaoMutation_Phase0(t *testing.T) {
|
||||
val1, val2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
|
||||
s, err := InitializeFromProtoUnsafePhase0(ðpb.BeaconState{
|
||||
@@ -543,6 +631,50 @@ func TestStateReferenceCopy_NoUnexpectedRandaoMutation_Capella(t *testing.T) {
|
||||
assertRefCount(t, b, types.RandaoMixes, 1)
|
||||
}
|
||||
|
||||
func TestStateReferenceCopy_NoUnexpectedRandaoMutation_Deneb(t *testing.T) {
|
||||
val1, val2 := bytesutil.ToBytes32([]byte("foo")), bytesutil.ToBytes32([]byte("bar"))
|
||||
s, err := InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{
|
||||
RandaoMixes: [][]byte{
|
||||
val1[:],
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
a, ok := s.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
require.NoError(t, err)
|
||||
assertRefCount(t, a, types.RandaoMixes, 1)
|
||||
|
||||
// Copy, increases reference count.
|
||||
copied := a.Copy()
|
||||
b, ok := copied.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
assertRefCount(t, a, types.RandaoMixes, 2)
|
||||
assertRefCount(t, b, types.RandaoMixes, 2)
|
||||
|
||||
// Assert shared state.
|
||||
mixesA := a.RandaoMixes()
|
||||
mixesB := b.RandaoMixes()
|
||||
assertValFound(t, mixesA, val1[:])
|
||||
assertValFound(t, mixesB, val1[:])
|
||||
|
||||
// Mutator should only affect calling state: a.
|
||||
require.NoError(t, a.UpdateRandaoMixesAtIndex(0, val2[:]))
|
||||
|
||||
// Assert no shared state mutation occurred only on state a (copy on write).
|
||||
assertValFound(t, a.RandaoMixes(), val2[:])
|
||||
assertValNotFound(t, a.RandaoMixes(), val1[:])
|
||||
assertValFound(t, b.RandaoMixes(), val1[:])
|
||||
assertValNotFound(t, b.RandaoMixes(), val2[:])
|
||||
assertValFound(t, mixesB, val1[:])
|
||||
assertValNotFound(t, mixesB, val2[:])
|
||||
assert.DeepEqual(t, val2[:], a.RandaoMixes()[0], "Expected mutation not found")
|
||||
assert.DeepEqual(t, val1[:], mixesB[0], "Unexpected mutation found")
|
||||
|
||||
// Copy on write happened, reference counters are reset.
|
||||
assertRefCount(t, a, types.RandaoMixes, 1)
|
||||
assertRefCount(t, b, types.RandaoMixes, 1)
|
||||
}
|
||||
|
||||
func TestStateReferenceCopy_NoUnexpectedAttestationsMutation(t *testing.T) {
|
||||
assertAttFound := func(vals []*ethpb.PendingAttestation, val uint64) {
|
||||
for i := range vals {
|
||||
@@ -813,6 +945,41 @@ func TestValidatorReferences_RemainsConsistent_Capella(t *testing.T) {
|
||||
}))
|
||||
}
|
||||
|
||||
func TestValidatorReferences_RemainsConsistent_Deneb(t *testing.T) {
|
||||
s, err := InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{
|
||||
Validators: []*ethpb.Validator{
|
||||
{PublicKey: []byte{'A'}},
|
||||
{PublicKey: []byte{'B'}},
|
||||
{PublicKey: []byte{'C'}},
|
||||
{PublicKey: []byte{'D'}},
|
||||
{PublicKey: []byte{'E'}},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
a, ok := s.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// Create a second state.
|
||||
copied := a.Copy()
|
||||
b, ok := copied.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// Update First Validator.
|
||||
assert.NoError(t, a.UpdateValidatorAtIndex(0, ðpb.Validator{PublicKey: []byte{'Z'}}))
|
||||
|
||||
assert.DeepNotEqual(t, a.Validators()[0], b.Validators()[0], "validators are equal when they are supposed to be different")
|
||||
// Modify all validators from copied state.
|
||||
assert.NoError(t, b.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
|
||||
return true, ðpb.Validator{PublicKey: []byte{'V'}}, nil
|
||||
}))
|
||||
|
||||
// Ensure reference is properly accounted for.
|
||||
assert.NoError(t, a.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
assert.NotEqual(t, bytesutil.ToBytes48([]byte{'V'}), val.PublicKey())
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
func TestValidatorReferences_RemainsConsistent_Bellatrix(t *testing.T) {
|
||||
s, err := InitializeFromProtoUnsafeBellatrix(ðpb.BeaconStateBellatrix{
|
||||
Validators: []*ethpb.Validator{
|
||||
|
||||
@@ -36,6 +36,14 @@ func (b *BeaconState) SetLatestExecutionPayloadHeader(val interfaces.ExecutionDa
|
||||
b.latestExecutionPayloadHeaderCapella = latest
|
||||
b.markFieldAsDirty(types.LatestExecutionPayloadHeaderCapella)
|
||||
return nil
|
||||
case *enginev1.ExecutionPayloadDeneb:
|
||||
latest, err := consensusblocks.PayloadToHeaderDeneb(val)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert payload to header")
|
||||
}
|
||||
b.latestExecutionPayloadHeaderDeneb = latest
|
||||
b.markFieldAsDirty(types.LatestExecutionPayloadHeaderDeneb)
|
||||
return nil
|
||||
case *enginev1.ExecutionPayloadHeader:
|
||||
b.latestExecutionPayloadHeader = header
|
||||
b.markFieldAsDirty(types.LatestExecutionPayloadHeader)
|
||||
@@ -44,6 +52,10 @@ func (b *BeaconState) SetLatestExecutionPayloadHeader(val interfaces.ExecutionDa
|
||||
b.latestExecutionPayloadHeaderCapella = header
|
||||
b.markFieldAsDirty(types.LatestExecutionPayloadHeaderCapella)
|
||||
return nil
|
||||
case *enginev1.ExecutionPayloadHeaderDeneb:
|
||||
b.latestExecutionPayloadHeaderDeneb = header
|
||||
b.markFieldAsDirty(types.LatestExecutionPayloadHeaderDeneb)
|
||||
return nil
|
||||
default:
|
||||
return errors.New("value must be an execution payload header")
|
||||
}
|
||||
|
||||
@@ -30,3 +30,25 @@ func TestSetNextWithdrawalValidatorIndex(t *testing.T) {
|
||||
require.Equal(t, primitives.ValidatorIndex(5), s.nextWithdrawalValidatorIndex)
|
||||
require.Equal(t, true, s.dirtyFields[types.NextWithdrawalValidatorIndex])
|
||||
}
|
||||
|
||||
func TestSetNextWithdrawalIndex_Deneb(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
nextWithdrawalIndex: 3,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
require.NoError(t, s.SetNextWithdrawalIndex(5))
|
||||
require.Equal(t, uint64(5), s.nextWithdrawalIndex)
|
||||
require.Equal(t, true, s.dirtyFields[types.NextWithdrawalIndex])
|
||||
}
|
||||
|
||||
func TestSetNextWithdrawalValidatorIndex_Deneb(t *testing.T) {
|
||||
s := BeaconState{
|
||||
version: version.Deneb,
|
||||
nextWithdrawalValidatorIndex: 3,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
require.NoError(t, s.SetNextWithdrawalValidatorIndex(5))
|
||||
require.Equal(t, primitives.ValidatorIndex(5), s.nextWithdrawalValidatorIndex)
|
||||
require.Equal(t, true, s.dirtyFields[types.NextWithdrawalValidatorIndex])
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
func (b *BeaconState) ProportionalSlashingMultiplier() (uint64, error) {
|
||||
switch b.version {
|
||||
case version.Bellatrix, version.Capella:
|
||||
case version.Bellatrix, version.Capella, version.Deneb:
|
||||
return params.BeaconConfig().ProportionalSlashingMultiplierBellatrix, nil
|
||||
case version.Altair:
|
||||
return params.BeaconConfig().ProportionalSlashingMultiplierAltair, nil
|
||||
@@ -19,7 +19,7 @@ func (b *BeaconState) ProportionalSlashingMultiplier() (uint64, error) {
|
||||
|
||||
func (b *BeaconState) InactivityPenaltyQuotient() (uint64, error) {
|
||||
switch b.version {
|
||||
case version.Bellatrix, version.Capella:
|
||||
case version.Bellatrix, version.Capella, version.Deneb:
|
||||
return params.BeaconConfig().InactivityPenaltyQuotientBellatrix, nil
|
||||
case version.Altair:
|
||||
return params.BeaconConfig().InactivityPenaltyQuotientAltair, nil
|
||||
|
||||
@@ -267,7 +267,7 @@ func FuzzCapellaStateHashTreeRoot(f *testing.F) {
|
||||
assert.NoError(t, err)
|
||||
// Perform a cold HTR calculation by initializing a new state.
|
||||
innerState, ok := stateObj.ToProtoUnsafe().(*ethpb.BeaconStateCapella)
|
||||
assert.Equal(t, true, ok, "inner state is a not a beacon state bellatrix proto")
|
||||
assert.Equal(t, true, ok, "inner state is a not a beacon state capella proto")
|
||||
newState, err := native.InitializeFromProtoUnsafeCapella(innerState)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -292,3 +292,73 @@ func FuzzCapellaStateHashTreeRoot(f *testing.F) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzDenebStateHashTreeRoot(f *testing.F) {
|
||||
gState, _ := util.DeterministicGenesisStateDeneb(f, 100)
|
||||
output, err := gState.MarshalSSZ()
|
||||
assert.NoError(f, err)
|
||||
randPool := make([]byte, 100)
|
||||
_, err = rand.NewDeterministicGenerator().Read(randPool)
|
||||
assert.NoError(f, err)
|
||||
f.Add(randPool, uint64(10))
|
||||
f.Fuzz(func(t *testing.T, diffBuffer []byte, slotsToTransition uint64) {
|
||||
stateSSZ := bytesutil.SafeCopyBytes(output)
|
||||
for i := 0; i < len(diffBuffer); i += 9 {
|
||||
if i+8 >= len(diffBuffer) {
|
||||
return
|
||||
}
|
||||
num := bytesutil.BytesToUint64BigEndian(diffBuffer[i : i+8])
|
||||
num %= uint64(len(diffBuffer))
|
||||
// Perform a XOR on the byte of the selected index.
|
||||
stateSSZ[num] ^= diffBuffer[i+8]
|
||||
}
|
||||
pbState := ðpb.BeaconStateDeneb{}
|
||||
err := pbState.UnmarshalSSZ(stateSSZ)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nativeState, err := native.InitializeFromProtoDeneb(pbState)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
slotsToTransition %= 100
|
||||
stateObj, err := native.InitializeFromProtoUnsafeDeneb(pbState)
|
||||
assert.NoError(t, err)
|
||||
for stateObj.Slot() < primitives.Slot(slotsToTransition) {
|
||||
stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1)
|
||||
assert.NoError(t, err)
|
||||
stateObj.Copy()
|
||||
|
||||
nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1)
|
||||
assert.NoError(t, err)
|
||||
nativeState.Copy()
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
// Perform a cold HTR calculation by initializing a new state.
|
||||
innerState, ok := stateObj.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
|
||||
assert.Equal(t, true, ok, "inner state is a not a beacon state deneb proto")
|
||||
newState, err := native.InitializeFromProtoUnsafeDeneb(innerState)
|
||||
assert.NoError(t, err)
|
||||
|
||||
newRt, newErr := newState.HashTreeRoot(context.Background())
|
||||
rt, err := stateObj.HashTreeRoot(context.Background())
|
||||
nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background())
|
||||
assert.Equal(t, newErr != nil, err != nil)
|
||||
assert.Equal(t, newErr != nil, nativeErr != nil)
|
||||
if err == nil {
|
||||
assert.Equal(t, rt, newRt)
|
||||
assert.Equal(t, rt, nativeRt)
|
||||
}
|
||||
|
||||
newSSZ, newErr := newState.MarshalSSZ()
|
||||
stateObjSSZ, err := stateObj.MarshalSSZ()
|
||||
nativeSSZ, nativeErr := nativeState.MarshalSSZ()
|
||||
assert.Equal(t, newErr != nil, err != nil)
|
||||
assert.Equal(t, newErr != nil, nativeErr != nil)
|
||||
if err == nil {
|
||||
assert.DeepEqual(t, newSSZ, stateObjSSZ)
|
||||
assert.DeepEqual(t, newSSZ, nativeSSZ)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -272,6 +272,62 @@ func TestBeaconState_NoDeadlock_Capella(t *testing.T) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestBeaconState_NoDeadlock_Deneb(t *testing.T) {
|
||||
count := uint64(100)
|
||||
vals := make([]*ethpb.Validator, 0, count)
|
||||
for i := uint64(1); i < count; i++ {
|
||||
var someRoot [32]byte
|
||||
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||
copy(someKey[:], strconv.Itoa(int(i)))
|
||||
vals = append(vals, ðpb.Validator{
|
||||
PublicKey: someKey[:],
|
||||
WithdrawalCredentials: someRoot[:],
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1,
|
||||
ActivationEpoch: 1,
|
||||
ExitEpoch: 1,
|
||||
WithdrawableEpoch: 1,
|
||||
})
|
||||
}
|
||||
st, err := InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{
|
||||
Validators: vals,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s, ok := st.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
// Continuously lock and unlock the state
|
||||
// by acquiring the lock.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for _, f := range s.stateFieldLeaves {
|
||||
f.Lock()
|
||||
if f.Empty() {
|
||||
f.InsertFieldLayer(make([][]*[32]byte, 10))
|
||||
}
|
||||
f.Unlock()
|
||||
f.FieldReference().AddRef()
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
// Constantly read from the offending portion
|
||||
// of the code to ensure there is no possible
|
||||
// recursive read locking.
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
_ = st.FieldReferencesCount()
|
||||
}()
|
||||
}
|
||||
// Test will not terminate in the event of a deadlock.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
|
||||
|
||||
newState := generateState(t)
|
||||
|
||||
@@ -83,11 +83,20 @@ var capellaFields = append(
|
||||
types.HistoricalSummaries,
|
||||
)
|
||||
|
||||
var denebFields = append(
|
||||
altairFields,
|
||||
types.LatestExecutionPayloadHeaderDeneb,
|
||||
types.NextWithdrawalIndex,
|
||||
types.NextWithdrawalValidatorIndex,
|
||||
types.HistoricalSummaries,
|
||||
)
|
||||
|
||||
const (
|
||||
phase0SharedFieldRefCount = 10
|
||||
altairSharedFieldRefCount = 11
|
||||
bellatrixSharedFieldRefCount = 12
|
||||
capellaSharedFieldRefCount = 14
|
||||
denebSharedFieldRefCount = 14
|
||||
)
|
||||
|
||||
// InitializeFromProtoPhase0 the beacon state from a protobuf representation.
|
||||
@@ -110,6 +119,11 @@ func InitializeFromProtoCapella(st *ethpb.BeaconStateCapella) (state.BeaconState
|
||||
return InitializeFromProtoUnsafeCapella(proto.Clone(st).(*ethpb.BeaconStateCapella))
|
||||
}
|
||||
|
||||
// InitializeFromProtoDeneb the beacon state from a protobuf representation.
|
||||
func InitializeFromProtoDeneb(st *ethpb.BeaconStateDeneb) (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeDeneb(proto.Clone(st).(*ethpb.BeaconStateDeneb))
|
||||
}
|
||||
|
||||
// InitializeFromProtoUnsafePhase0 directly uses the beacon state protobuf fields
|
||||
// and sets them as fields of the BeaconState type.
|
||||
func InitializeFromProtoUnsafePhase0(st *ethpb.BeaconState) (state.BeaconState, error) {
|
||||
@@ -474,6 +488,102 @@ func InitializeFromProtoUnsafeCapella(st *ethpb.BeaconStateCapella) (state.Beaco
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// InitializeFromProtoUnsafeDeneb directly uses the beacon state protobuf fields
|
||||
// and sets them as fields of the BeaconState type.
|
||||
func InitializeFromProtoUnsafeDeneb(st *ethpb.BeaconStateDeneb) (state.BeaconState, error) {
|
||||
if st == nil {
|
||||
return nil, errors.New("received nil state")
|
||||
}
|
||||
|
||||
var bRoots customtypes.BlockRoots
|
||||
for i, r := range st.BlockRoots {
|
||||
bRoots[i] = bytesutil.ToBytes32(r)
|
||||
}
|
||||
var sRoots customtypes.StateRoots
|
||||
for i, r := range st.StateRoots {
|
||||
sRoots[i] = bytesutil.ToBytes32(r)
|
||||
}
|
||||
hRoots := customtypes.HistoricalRoots(make([][32]byte, len(st.HistoricalRoots)))
|
||||
for i, r := range st.HistoricalRoots {
|
||||
hRoots[i] = bytesutil.ToBytes32(r)
|
||||
}
|
||||
var mixes customtypes.RandaoMixes
|
||||
for i, m := range st.RandaoMixes {
|
||||
mixes[i] = bytesutil.ToBytes32(m)
|
||||
}
|
||||
|
||||
fieldCount := params.BeaconConfig().BeaconStateCapellaFieldCount
|
||||
b := &BeaconState{
|
||||
version: version.Deneb,
|
||||
genesisTime: st.GenesisTime,
|
||||
genesisValidatorsRoot: bytesutil.ToBytes32(st.GenesisValidatorsRoot),
|
||||
slot: st.Slot,
|
||||
fork: st.Fork,
|
||||
latestBlockHeader: st.LatestBlockHeader,
|
||||
blockRoots: &bRoots,
|
||||
stateRoots: &sRoots,
|
||||
historicalRoots: hRoots,
|
||||
eth1Data: st.Eth1Data,
|
||||
eth1DataVotes: st.Eth1DataVotes,
|
||||
eth1DepositIndex: st.Eth1DepositIndex,
|
||||
validators: st.Validators,
|
||||
balances: st.Balances,
|
||||
randaoMixes: &mixes,
|
||||
slashings: st.Slashings,
|
||||
previousEpochParticipation: st.PreviousEpochParticipation,
|
||||
currentEpochParticipation: st.CurrentEpochParticipation,
|
||||
justificationBits: st.JustificationBits,
|
||||
previousJustifiedCheckpoint: st.PreviousJustifiedCheckpoint,
|
||||
currentJustifiedCheckpoint: st.CurrentJustifiedCheckpoint,
|
||||
finalizedCheckpoint: st.FinalizedCheckpoint,
|
||||
inactivityScores: st.InactivityScores,
|
||||
currentSyncCommittee: st.CurrentSyncCommittee,
|
||||
nextSyncCommittee: st.NextSyncCommittee,
|
||||
latestExecutionPayloadHeaderDeneb: st.LatestExecutionPayloadHeader,
|
||||
nextWithdrawalIndex: st.NextWithdrawalIndex,
|
||||
nextWithdrawalValidatorIndex: st.NextWithdrawalValidatorIndex,
|
||||
historicalSummaries: st.HistoricalSummaries,
|
||||
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||
valMapHandler: stateutil.NewValMapHandler(st.Validators),
|
||||
}
|
||||
|
||||
for _, f := range denebFields {
|
||||
b.dirtyFields[f] = true
|
||||
b.rebuildTrie[f] = true
|
||||
b.dirtyIndices[f] = []uint64{}
|
||||
trie, err := fieldtrie.NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.stateFieldLeaves[f] = trie
|
||||
}
|
||||
|
||||
// Initialize field reference tracking for shared data.
|
||||
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.HistoricalRoots] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.Eth1DataVotes] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.Slashings] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.PreviousEpochParticipationBits] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.CurrentEpochParticipationBits] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.InactivityScores] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.LatestExecutionPayloadHeaderDeneb] = stateutil.NewRef(1) // New in Deneb.
|
||||
b.sharedFieldReferences[types.HistoricalSummaries] = stateutil.NewRef(1) // New in Capella.
|
||||
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(b, finalizerCleanup)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the beacon state.
|
||||
func (b *BeaconState) Copy() state.BeaconState {
|
||||
b.lock.RLock()
|
||||
@@ -489,6 +599,8 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
fieldCount = params.BeaconConfig().BeaconStateBellatrixFieldCount
|
||||
case version.Capella:
|
||||
fieldCount = params.BeaconConfig().BeaconStateCapellaFieldCount
|
||||
case version.Deneb:
|
||||
fieldCount = params.BeaconConfig().BeaconStateCapellaFieldCount
|
||||
}
|
||||
|
||||
dst := &BeaconState{
|
||||
@@ -532,6 +644,7 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
nextSyncCommittee: b.nextSyncCommitteeVal(),
|
||||
latestExecutionPayloadHeader: b.latestExecutionPayloadHeaderVal(),
|
||||
latestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapellaVal(),
|
||||
latestExecutionPayloadHeaderDeneb: b.latestExecutionPayloadHeaderDenebVal(),
|
||||
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
@@ -551,6 +664,8 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, bellatrixSharedFieldRefCount)
|
||||
case version.Capella:
|
||||
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount)
|
||||
case version.Deneb:
|
||||
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, denebSharedFieldRefCount)
|
||||
}
|
||||
|
||||
for field, ref := range b.sharedFieldReferences {
|
||||
@@ -640,6 +755,8 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateBellatrixFieldCount)
|
||||
case version.Capella:
|
||||
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
||||
case version.Deneb:
|
||||
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -830,6 +947,8 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
return b.latestExecutionPayloadHeader.HashTreeRoot()
|
||||
case types.LatestExecutionPayloadHeaderCapella:
|
||||
return b.latestExecutionPayloadHeaderCapella.HashTreeRoot()
|
||||
case types.LatestExecutionPayloadHeaderDeneb:
|
||||
return b.latestExecutionPayloadHeaderDeneb.HashTreeRoot()
|
||||
case types.NextWithdrawalIndex:
|
||||
return ssz.Uint64Root(b.nextWithdrawalIndex), nil
|
||||
case types.NextWithdrawalValidatorIndex:
|
||||
|
||||
@@ -166,6 +166,42 @@ func TestInitializeFromProto_Capella(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitializeFromProto_Deneb(t *testing.T) {
|
||||
type test struct {
|
||||
name string
|
||||
state *ethpb.BeaconStateDeneb
|
||||
error string
|
||||
}
|
||||
initTests := []test{
|
||||
{
|
||||
name: "nil state",
|
||||
state: nil,
|
||||
error: "received nil state",
|
||||
},
|
||||
{
|
||||
name: "nil validators",
|
||||
state: ðpb.BeaconStateDeneb{
|
||||
Slot: 4,
|
||||
Validators: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty state",
|
||||
state: ðpb.BeaconStateDeneb{},
|
||||
},
|
||||
}
|
||||
for _, tt := range initTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := statenative.InitializeFromProtoDeneb(tt.state)
|
||||
if tt.error != "" {
|
||||
require.ErrorContains(t, tt.error, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitializeFromProtoUnsafe_Phase0(t *testing.T) {
|
||||
testState, _ := util.DeterministicGenesisState(t, 64)
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(testState.ToProtoUnsafe())
|
||||
@@ -297,6 +333,37 @@ func TestInitializeFromProtoUnsafe_Capella(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitializeFromProtoUnsafe_Deneb(t *testing.T) {
|
||||
type test struct {
|
||||
name string
|
||||
state *ethpb.BeaconStateDeneb
|
||||
error string
|
||||
}
|
||||
initTests := []test{
|
||||
{
|
||||
name: "nil validators",
|
||||
state: ðpb.BeaconStateDeneb{
|
||||
Slot: 4,
|
||||
Validators: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty state",
|
||||
state: ðpb.BeaconStateDeneb{},
|
||||
},
|
||||
}
|
||||
for _, tt := range initTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := statenative.InitializeFromProtoUnsafeDeneb(tt.state)
|
||||
if tt.error != "" {
|
||||
assert.ErrorContains(t, tt.error, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconState_HashTreeRoot(t *testing.T) {
|
||||
testState, _ := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ func (f FieldIndex) RealPosition() int {
|
||||
return 22
|
||||
case NextSyncCommittee:
|
||||
return 23
|
||||
case LatestExecutionPayloadHeader, LatestExecutionPayloadHeaderCapella:
|
||||
case LatestExecutionPayloadHeader, LatestExecutionPayloadHeaderCapella, LatestExecutionPayloadHeaderDeneb:
|
||||
return 24
|
||||
case NextWithdrawalIndex:
|
||||
return 25
|
||||
@@ -205,6 +205,7 @@ const (
|
||||
NextSyncCommittee
|
||||
LatestExecutionPayloadHeader
|
||||
LatestExecutionPayloadHeaderCapella
|
||||
LatestExecutionPayloadHeaderDeneb
|
||||
NextWithdrawalIndex
|
||||
NextWithdrawalValidatorIndex
|
||||
HistoricalSummaries
|
||||
|
||||
@@ -22,6 +22,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/capella:go_default_library",
|
||||
"//beacon-chain/core/deneb:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/capella"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/execution"
|
||||
prysmtime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
@@ -205,7 +206,7 @@ func ReplayProcessSlots(ctx context.Context, state state.BeaconState, slot primi
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
}
|
||||
case version.Altair, version.Bellatrix, version.Capella:
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||
state, err = altair.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -243,6 +244,14 @@ func ReplayProcessSlots(ctx context.Context, state state.BeaconState, slot primi
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if prysmtime.CanUpgradeToDeneb(state.Slot()) {
|
||||
state, err = deneb.UpgradeToDeneb(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
|
||||
@@ -33,6 +33,7 @@ go_library(
|
||||
"subscriber_beacon_aggregate_proof.go",
|
||||
"subscriber_beacon_attestation.go",
|
||||
"subscriber_beacon_blocks.go",
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
@@ -42,6 +43,7 @@ go_library(
|
||||
"validate_attester_slashing.go",
|
||||
"validate_beacon_attestation.go",
|
||||
"validate_beacon_blocks.go",
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
@@ -164,6 +166,7 @@ go_test(
|
||||
"validate_attester_slashing_test.go",
|
||||
"validate_beacon_attestation_test.go",
|
||||
"validate_beacon_blocks_test.go",
|
||||
"validate_blob_test.go",
|
||||
"validate_bls_to_execution_change_test.go",
|
||||
"validate_proposer_slashing_test.go",
|
||||
"validate_sync_committee_message_test.go",
|
||||
|
||||
@@ -37,6 +37,8 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
// differentiate them below.
|
||||
case strings.Contains(topic, p2p.GossipSyncCommitteeMessage) && !strings.Contains(topic, p2p.SyncContributionAndProofSubnetTopicFormat):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
case strings.Contains(topic, p2p.GossipBlobSidecarMessage):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBlobSidecar{})]
|
||||
}
|
||||
|
||||
base := p2p.GossipTopicMappings(topic, 0)
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/equality"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
@@ -427,6 +428,21 @@ func (s *Service) pendingBlocksInCache(slot primitives.Slot) []interfaces.ReadOn
|
||||
return blks
|
||||
}
|
||||
|
||||
// This returns signed blob sidecar given input key from slotToPendingBlobs.
|
||||
func (s *Service) pendingBlobsInCache(slot primitives.Slot) []*eth.SignedBlobSidecar {
|
||||
k := slotToCacheKey(slot)
|
||||
value, ok := s.slotToPendingBlobs.Get(k)
|
||||
if !ok {
|
||||
return []*eth.SignedBlobSidecar{}
|
||||
}
|
||||
bs, ok := value.([]*eth.SignedBlobSidecar)
|
||||
if !ok {
|
||||
log.Debug("pendingBlobsInCache: value is not of type []*eth.SignedBlobSidecar")
|
||||
return []*eth.SignedBlobSidecar{}
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
// This adds input signed beacon block to slotToPendingBlocks cache.
|
||||
func (s *Service) addPendingBlockToCache(b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
@@ -445,6 +461,23 @@ func (s *Service) addPendingBlockToCache(b interfaces.ReadOnlySignedBeaconBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This adds blob to slotToPendingBlobs cache.
|
||||
func (s *Service) addPendingBlobToCache(b *eth.SignedBlobSidecar) error {
|
||||
blobs := s.pendingBlobsInCache(b.Message.Slot)
|
||||
|
||||
// If we already have seen the index. Ignore it.
|
||||
for _, blob := range blobs {
|
||||
if blob.Message.Index == b.Message.Index {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
blobs = append(blobs, b)
|
||||
k := slotToCacheKey(b.Message.Slot)
|
||||
s.slotToPendingBlobs.Set(k, blobs, pendingBlockExpTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This converts input string to slot.
|
||||
func cacheKeyToSlot(s string) primitives.Slot {
|
||||
b := []byte(s)
|
||||
|
||||
@@ -44,6 +44,7 @@ var _ runtime.Service = (*Service)(nil)
|
||||
|
||||
const rangeLimit uint64 = 1024
|
||||
const seenBlockSize = 1000
|
||||
const seenBlobSize = seenBlockSize * 4 // Each block can have max 4 blobs. Worst case 164kB for cache.
|
||||
const seenUnaggregatedAttSize = 20000
|
||||
const seenAggregatedAttSize = 1024
|
||||
const seenSyncMsgSize = 1000 // Maximum of 512 sync committee members, 1000 is a safe amount.
|
||||
@@ -110,6 +111,7 @@ type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
slotToPendingBlocks *gcache.Cache
|
||||
slotToPendingBlobs *gcache.Cache
|
||||
seenPendingBlocks map[[32]byte]bool
|
||||
blkRootToPendingAtts map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof
|
||||
subHandler *subTopicHandler
|
||||
@@ -120,6 +122,8 @@ type Service struct {
|
||||
rateLimiter *limiter
|
||||
seenBlockLock sync.RWMutex
|
||||
seenBlockCache *lru.Cache
|
||||
seenBlobLock sync.RWMutex
|
||||
seenBlobCache *lru.Cache
|
||||
seenAggregatedAttestationLock sync.RWMutex
|
||||
seenAggregatedAttestationCache *lru.Cache
|
||||
seenUnAggregatedAttestationLock sync.RWMutex
|
||||
@@ -146,6 +150,7 @@ type Service struct {
|
||||
// NewService initializes new regular sync service.
|
||||
func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
c := gcache.New(pendingBlockExpTime /* exp time */, 2*pendingBlockExpTime /* prune time */)
|
||||
pendingBlobCache := gcache.New(0 /* exp time */, 0 /* prune time */)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
@@ -153,6 +158,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
chainStarted: abool.New(),
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: c,
|
||||
slotToPendingBlobs: pendingBlobCache,
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
@@ -223,6 +229,7 @@ func (s *Service) Status() error {
|
||||
// and prevent DoS.
|
||||
func (s *Service) initCaches() {
|
||||
s.seenBlockCache = lruwrpr.New(seenBlockSize)
|
||||
s.seenBlobCache = lruwrpr.New(seenBlobSize)
|
||||
s.seenAggregatedAttestationCache = lruwrpr.New(seenAggregatedAttSize)
|
||||
s.seenUnAggregatedAttestationCache = lruwrpr.New(seenUnaggregatedAttSize)
|
||||
s.seenSyncMessageCache = lruwrpr.New(seenSyncMsgSize)
|
||||
|
||||
@@ -87,6 +87,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
s.validateCommitteeIndexBeaconAttestation, /* validator */
|
||||
s.committeeIndexBeaconAttestationSubscriber, /* message handler */
|
||||
digest,
|
||||
params.BeaconNetworkConfig().AttestationSubnetCount,
|
||||
)
|
||||
} else {
|
||||
s.subscribeDynamicWithSubnets(
|
||||
@@ -130,6 +131,17 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
digest,
|
||||
)
|
||||
}
|
||||
|
||||
// New Gossip Topic in Deneb
|
||||
if epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.BlobSubnetTopicFormat,
|
||||
s.validateBlob, /* validator */
|
||||
s.blobSubscriber, /* message handler */
|
||||
digest,
|
||||
params.BeaconNetworkConfig().BlobsidecarSubnetCount,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a given topic with a given validator and subscription handler.
|
||||
@@ -300,7 +312,7 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
|
||||
// subscribe to a static subnet with the given topic and index.A given validator and subscription handler is
|
||||
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte) {
|
||||
func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte, subnetCount uint64) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
@@ -336,7 +348,7 @@ func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal,
|
||||
if !valid {
|
||||
log.Warnf("Attestation subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
||||
for i := uint64(0); i < subnetCount; i++ {
|
||||
fullTopic := fmt.Sprintf(topic, digest, i) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
s.unSubscribeFromTopic(fullTopic)
|
||||
}
|
||||
@@ -344,7 +356,7 @@ func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal,
|
||||
return
|
||||
}
|
||||
// Check every slot that there are enough peers
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
||||
for i := uint64(0); i < subnetCount; i++ {
|
||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
||||
|
||||
24
beacon-chain/sync/subscriber_blob_sidecar.go
Normal file
24
beacon-chain/sync/subscriber_blob_sidecar.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) blobSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
b, ok := msg.(*eth.SignedBlobSidecar)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type *eth.SignedBlobSidecar, type=%T", msg)
|
||||
}
|
||||
|
||||
log.WithFields(blobFields(b.Message)).Debug("Received blob sidecar")
|
||||
|
||||
s.setSeenBlobIndex(b.Message.Blob, b.Message.Index)
|
||||
|
||||
// TODO: Store blobs in cache. Will be addressed in subsequent PR.
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -335,7 +335,7 @@ func TestStaticSubnets(t *testing.T) {
|
||||
r.subscribeStaticWithSubnets(defaultTopic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
// no-op
|
||||
return nil
|
||||
}, d)
|
||||
}, d, params.BeaconNetworkConfig().AttestationSubnetCount)
|
||||
topics := r.cfg.p2p.PubSub().GetTopics()
|
||||
if uint64(len(topics)) != params.BeaconNetworkConfig().AttestationSubnetCount {
|
||||
t.Errorf("Wanted the number of subnet topics registered to be %d but got %d", params.BeaconNetworkConfig().AttestationSubnetCount, len(topics))
|
||||
|
||||
190
beacon-chain/sync/validate_blob.go
Normal file
190
beacon-chain/sync/validate_blob.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, errInvalidTopic
|
||||
}
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
sBlob, ok := m.(*eth.SignedBlobSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.SignedBlobSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
blob := sBlob.Message
|
||||
|
||||
// [REJECT] The sidecar is for the correct topic -- i.e. sidecar.index matches the topic {index}.
|
||||
want := fmt.Sprintf("blob_sidecar_%d", blob.Index)
|
||||
if !strings.Contains(*msg.Topic, want) {
|
||||
log.WithFields(blobFields(blob)).Debug("Sidecar blob does not match topic")
|
||||
return pubsub.ValidationReject, fmt.Errorf("wrong topic name: %s", *msg.Topic)
|
||||
}
|
||||
|
||||
// [IGNORE] The sidecar is not from a future slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) --
|
||||
// i.e. validate that sidecar.slot <= current_slot (a client MAY queue future blocks for processing at the appropriate slot).
|
||||
genesisTime := uint64(s.cfg.chain.GenesisTime().Unix())
|
||||
if err := slots.VerifyTime(genesisTime, blob.Slot, earlyBlockProcessingTolerance); err != nil {
|
||||
log.WithError(err).WithFields(blobFields(blob)).Debug("Ignored blob: too far into future")
|
||||
return pubsub.ValidationIgnore, errors.Wrap(err, "blob too far into future")
|
||||
}
|
||||
|
||||
// [IGNORE] The sidecar is from a slot greater than the latest finalized slot --
|
||||
// i.e. validate that sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
||||
startSlot, err := slots.EpochStart(s.cfg.chain.FinalizedCheckpt().Epoch)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
if startSlot >= blob.Slot {
|
||||
err := fmt.Errorf("finalized slot %d greater or equal to blob slot %d", startSlot, blob.Slot)
|
||||
log.WithFields(blobFields(blob)).Debug(err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// [IGNORE] The sidecar's block's parent (defined by sidecar.block_parent_root) has been seen (via both gossip and non-gossip sources)
|
||||
parentRoot := bytesutil.ToBytes32(blob.BlockParentRoot)
|
||||
if !s.cfg.chain.HasBlock(ctx, parentRoot) {
|
||||
if err := s.addPendingBlobToCache(sBlob); err != nil {
|
||||
log.WithError(err).WithFields(blobFields(blob)).Error("Failed to add blob to cache")
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
log.WithFields(blobFields(blob)).Debug("Ignored blob: parent block not found")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's block's parent (defined by sidecar.block_parent_root) passes validation.
|
||||
parentSlot, err := s.cfg.chain.RecentBlockSlot(parentRoot)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
// [REJECT] The sidecar is from a higher slot than the sidecar's block's parent (defined by sidecar.block_parent_root).
|
||||
if parentSlot >= blob.Slot {
|
||||
err := fmt.Errorf("parent block slot %d greater or equal to blob slot %d", parentSlot, blob.Slot)
|
||||
log.WithFields(blobFields(blob)).Debug(err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// [REJECT] The proposer signature, signed_blob_sidecar.signature,
|
||||
// is valid with respect to the sidecar.proposer_index pubkey.
|
||||
parentState, err := s.cfg.stateGen.StateByRoot(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
if err := verifyBlobSignature(parentState, sBlob); err != nil {
|
||||
log.WithError(err).WithFields(blobFields(blob)).Debug("Failed to verify blob signature")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// [IGNORE] The sidecar is the only sidecar with valid signature received for the tuple (sidecar.block_root, sidecar.index).
|
||||
if s.hasSeenBlobIndex(blob.BlockRoot, blob.Index) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar is proposed by the expected proposer_index for the block's slot in the context of the current shuffling (defined by block_parent_root/slot)
|
||||
parentState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, parentState, parentRoot[:], blob.Slot)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, parentState)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
if blob.ProposerIndex != idx {
|
||||
err := fmt.Errorf("expected proposer index %d, got %d", idx, blob.ProposerIndex)
|
||||
log.WithFields(blobFields(blob)).Debug(err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
msg.ValidatorData = sBlob
|
||||
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func verifyBlobSignature(st state.BeaconState, blob *eth.SignedBlobSidecar) error {
|
||||
currentEpoch := slots.ToEpoch(blob.Message.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBlobSidecar, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := st.ValidatorAtIndex(blob.Message.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb, err := bls.PublicKeyFromBytes(proposer.PublicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(blob.Signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(blob.Message, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !sig.Verify(pb, sr[:]) {
|
||||
return signing.ErrSigFailedToVerify
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns true if the blob with the same root and index has been seen before.
|
||||
func (s *Service) hasSeenBlobIndex(root []byte, index uint64) bool {
|
||||
s.seenBlobLock.RLock()
|
||||
defer s.seenBlobLock.RUnlock()
|
||||
b := append(root, bytesutil.Bytes32(index)...)
|
||||
_, seen := s.seenBlobCache.Get(string(b))
|
||||
return seen
|
||||
}
|
||||
|
||||
// Set blob index and root as seen.
|
||||
func (s *Service) setSeenBlobIndex(root []byte, index uint64) {
|
||||
s.seenBlobLock.Lock()
|
||||
defer s.seenBlobLock.Unlock()
|
||||
b := append(root, bytesutil.Bytes32(index)...)
|
||||
s.seenBlobCache.Add(string(b), true)
|
||||
}
|
||||
|
||||
func blobFields(b *eth.BlobSidecar) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"proposerIndex": b.ProposerIndex,
|
||||
"blockRoot": fmt.Sprintf("%#x", b.BlockRoot),
|
||||
"index": b.Index,
|
||||
}
|
||||
}
|
||||
415
beacon-chain/sync/validate_blob_test.go
Normal file
415
beacon-chain/sync/validate_blob_test.go
Normal file
@@ -0,0 +1,415 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestValidateBlob_FromSelf(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
s := &Service{cfg: &config{p2p: p}}
|
||||
result, err := s.validateBlob(ctx, s.cfg.p2p.PeerID(), nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, result, pubsub.ValidationAccept)
|
||||
}
|
||||
|
||||
func TestValidateBlob_InitSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{IsSyncing: true}}}
|
||||
result, err := s.validateBlob(ctx, "", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, result, pubsub.ValidationIgnore)
|
||||
}
|
||||
|
||||
func TestValidateBlob_InvalidTopic(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}}
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{},
|
||||
})
|
||||
require.ErrorIs(t, errInvalidTopic, err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
}
|
||||
|
||||
func TestValidateBlob_InvalidMessageType(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
|
||||
msg := util.NewBeaconBlock()
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestToTopic(topic, digest)
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.ErrorIs(t, errWrongMessage, err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
}
|
||||
|
||||
func TestValidateBlob_InvalidTopicIndex(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
|
||||
msg := util.NewBlobsidecar()
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestAndIndexToTopic(topic, digest, 1)
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.ErrorContains(t, "/eth2/f5a5fd42/blob_sidecar_1", err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
}
|
||||
|
||||
func TestValidateBlob_OlderThanFinalizedEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Now(), FinalizedCheckPoint: ð.Checkpoint{Epoch: 1}}
|
||||
s := &Service{cfg: &config{
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
|
||||
b := util.NewBlobsidecar()
|
||||
b.Message.Slot = chainService.CurrentSlot() + 1
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(b)]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestAndIndexToTopic(topic, digest, 0)
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.ErrorContains(t, "finalized slot 32 greater or equal to blob slot 1", err)
|
||||
require.Equal(t, result, pubsub.ValidationIgnore)
|
||||
}
|
||||
|
||||
func TestValidateBlob_UnknownParentBlock(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Now(), FinalizedCheckPoint: ð.Checkpoint{}}
|
||||
s := &Service{
|
||||
slotToPendingBlobs: gcache.New(time.Second, 2*time.Second),
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
|
||||
b := util.NewBlobsidecar()
|
||||
b.Message.Slot = chainService.CurrentSlot() + 1
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(b)]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestAndIndexToTopic(topic, digest, 0)
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, result, pubsub.ValidationIgnore)
|
||||
require.LogsContain(t, hook, "Ignored blob: parent block not found")
|
||||
|
||||
pendingBlobs := s.pendingBlobsInCache(b.Message.Slot)
|
||||
require.Equal(t, 1, len(pendingBlobs))
|
||||
}
|
||||
|
||||
func TestValidateBlob_HigherThanParentSlot(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Now(), FinalizedCheckPoint: ð.Checkpoint{}, DB: db}
|
||||
s := &Service{
|
||||
slotToPendingBlobs: gcache.New(time.Second, 2*time.Second),
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
|
||||
b := util.NewBlobsidecar()
|
||||
b.Message.Slot = chainService.CurrentSlot() + 1
|
||||
chainService.BlockSlot = chainService.CurrentSlot() + 1
|
||||
bb := util.NewBeaconBlock()
|
||||
bb.Block.Slot = b.Message.Slot
|
||||
signedBb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBb))
|
||||
r, err := signedBb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
b.Message.BlockParentRoot = r[:]
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(b)]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestAndIndexToTopic(topic, digest, 0)
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.ErrorContains(t, "parent block slot 1 greater or equal to blob slot 1", err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
}
|
||||
|
||||
func TestValidateBlob_InvalidProposerSignature(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Now(), FinalizedCheckPoint: ð.Checkpoint{}, DB: db}
|
||||
stateGen := stategen.New(db, doublylinkedtree.New())
|
||||
s := &Service{
|
||||
slotToPendingBlobs: gcache.New(time.Second, 2*time.Second),
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{},
|
||||
chain: chainService,
|
||||
stateGen: stateGen,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
|
||||
b := util.NewBlobsidecar()
|
||||
b.Message.Slot = chainService.CurrentSlot() + 1
|
||||
sk, err := bls.SecretKeyFromBytes(bytesutil.PadTo([]byte("sk"), 32))
|
||||
require.NoError(t, err)
|
||||
b.Signature = sk.Sign([]byte("data")).Marshal()
|
||||
|
||||
bb := util.NewBeaconBlock()
|
||||
signedBb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBb))
|
||||
r, err := signedBb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
b.Message.BlockParentRoot = r[:]
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(b)]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestAndIndexToTopic(topic, digest, 0)
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
}
|
||||
|
||||
func TestValidateBlob_AlreadySeenInCache(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Now(), FinalizedCheckPoint: ð.Checkpoint{}, DB: db}
|
||||
stateGen := stategen.New(db, doublylinkedtree.New())
|
||||
s := &Service{
|
||||
slotToPendingBlobs: gcache.New(time.Second, 2*time.Second),
|
||||
seenBlobCache: lruwrpr.New(10),
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{},
|
||||
chain: chainService,
|
||||
stateGen: stateGen,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
|
||||
b := util.NewBlobsidecar()
|
||||
b.Message.Slot = chainService.CurrentSlot() + 1
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
|
||||
bb := util.NewBeaconBlock()
|
||||
signedBb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBb))
|
||||
r, err := signedBb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, r))
|
||||
|
||||
b.Message.BlockParentRoot = r[:]
|
||||
b.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, b.Message, params.BeaconConfig().DomainBlobSidecar, privKeys[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(b)]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestAndIndexToTopic(topic, digest, 0)
|
||||
|
||||
s.setSeenBlobIndex(bytesutil.PadTo([]byte{}, 32), 0)
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, result, pubsub.ValidationIgnore)
|
||||
}
|
||||
|
||||
func TestValidateBlob_IncorrectProposerIndex(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Now(), FinalizedCheckPoint: ð.Checkpoint{}, DB: db}
|
||||
stateGen := stategen.New(db, doublylinkedtree.New())
|
||||
s := &Service{
|
||||
slotToPendingBlobs: gcache.New(time.Second, 2*time.Second),
|
||||
seenBlobCache: lruwrpr.New(10),
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{},
|
||||
chain: chainService,
|
||||
stateGen: stateGen,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
|
||||
b := util.NewBlobsidecar()
|
||||
b.Message.Slot = chainService.CurrentSlot() + 1
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
|
||||
bb := util.NewBeaconBlock()
|
||||
signedBb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBb))
|
||||
r, err := signedBb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, r))
|
||||
|
||||
b.Message.BlockParentRoot = r[:]
|
||||
b.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, b.Message, params.BeaconConfig().DomainBlobSidecar, privKeys[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(b)]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestAndIndexToTopic(topic, digest, 0)
|
||||
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.ErrorContains(t, "expected proposer index 21, got 0", err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
}
|
||||
|
||||
func TestValidateBlob_EverythingPasses(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Now(), FinalizedCheckPoint: ð.Checkpoint{}, DB: db}
|
||||
stateGen := stategen.New(db, doublylinkedtree.New())
|
||||
s := &Service{
|
||||
slotToPendingBlobs: gcache.New(time.Second, 2*time.Second),
|
||||
seenBlobCache: lruwrpr.New(10),
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{},
|
||||
chain: chainService,
|
||||
stateGen: stateGen,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
|
||||
b := util.NewBlobsidecar()
|
||||
b.Message.Slot = chainService.CurrentSlot() + 1
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
|
||||
bb := util.NewBeaconBlock()
|
||||
signedBb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBb))
|
||||
r, err := signedBb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, r))
|
||||
|
||||
b.Message.BlockParentRoot = r[:]
|
||||
b.Message.ProposerIndex = 21
|
||||
b.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, b.Message, params.BeaconConfig().DomainBlobSidecar, privKeys[21])
|
||||
require.NoError(t, err)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(b)]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestAndIndexToTopic(topic, digest, 0)
|
||||
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, result, pubsub.ValidationAccept)
|
||||
}
|
||||
@@ -263,12 +263,16 @@ func generateGenesis(ctx context.Context) (state.BeaconState, error) {
|
||||
// set timestamps for genesis and shanghai fork
|
||||
gen.Timestamp = f.GenesisTime
|
||||
gen.Config.ShanghaiTime = interop.GethShanghaiTime(f.GenesisTime, params.BeaconConfig())
|
||||
//gen.Config.CancunTime = interop.GethCancunTime(f.GenesisTime, params.BeaconConfig())
|
||||
gen.Config.CancunTime = interop.GethCancunTime(f.GenesisTime, params.BeaconConfig())
|
||||
log.
|
||||
WithField("shanghai", gen.Config.ShanghaiTime).
|
||||
WithField("cancun", gen.Config.CancunTime).
|
||||
Info("setting fork geth times")
|
||||
if v > version.Altair {
|
||||
// set ttd to zero so EL goes post-merge immediately
|
||||
gen.Config.TerminalTotalDifficulty = big.NewInt(0)
|
||||
gen.Config.TerminalTotalDifficultyPassed = true
|
||||
}
|
||||
} else {
|
||||
gen = interop.GethTestnetGenesis(f.GenesisTime, params.BeaconConfig())
|
||||
|
||||
@@ -26,4 +26,7 @@ const (
|
||||
SyncCommitteeAggregationBytesLength = 16 // SyncCommitteeAggregationBytesLength defines the length of sync committee aggregate bytes.
|
||||
SyncAggregateSyncCommitteeBytesLength = 64 // SyncAggregateSyncCommitteeBytesLength defines the length of sync committee bytes in a sync aggregate.
|
||||
MaxWithdrawalsPerPayload = 16 // MaxWithdrawalsPerPayloadLength defines the maximum number of withdrawals that can be included in a payload.
|
||||
MaxBlobsPerBlock = 6 // MaxBlobsPerBlock defines the maximum number of blobs with respect to consensus rule can be included in a block.
|
||||
MaxBlobCommitmentsPerBlock = 4096 // MaxBlobCommitmentsPerBlock defines the theoretical limit of blobs can be included in a block.
|
||||
BlobLength = 131072 // BlobLength defines the byte length of a blob.
|
||||
)
|
||||
|
||||
@@ -26,4 +26,7 @@ const (
|
||||
SyncCommitteeAggregationBytesLength = 1 // SyncCommitteeAggregationBytesLength defines the sync committee aggregate bytes.
|
||||
SyncAggregateSyncCommitteeBytesLength = 4 // SyncAggregateSyncCommitteeBytesLength defines the length of sync committee bytes in a sync aggregate.
|
||||
MaxWithdrawalsPerPayload = 4 // MaxWithdrawalsPerPayloadLength defines the maximum number of withdrawals that can be included in a payload.
|
||||
MaxBlobsPerBlock = 6 // MaxBlobsPerBlock defines the maximum number of blobs with respect to consensus rule can be included in a block.
|
||||
MaxBlobCommitmentsPerBlock = 16 // MaxBlobCommitmentsPerBlock defines the theoretical limit of blobs can be included in a block.
|
||||
BlobLength = 4 // BlobLength defines the byte length of a blob.
|
||||
)
|
||||
|
||||
@@ -28,6 +28,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//params:go_default_library",
|
||||
"@com_github_mohae_deepcopy//:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
|
||||
// BeaconChainConfig contains constant configs for node to participate in beacon chain.
|
||||
@@ -104,7 +105,7 @@ type BeaconChainConfig struct {
|
||||
MaxVoluntaryExits uint64 `yaml:"MAX_VOLUNTARY_EXITS" spec:"true"` // MaxVoluntaryExits defines the maximum number of validator exits in a block.
|
||||
MaxWithdrawalsPerPayload uint64 `yaml:"MAX_WITHDRAWALS_PER_PAYLOAD" spec:"true"` // MaxWithdrawalsPerPayload defines the maximum number of withdrawals in a block.
|
||||
MaxBlsToExecutionChanges uint64 `yaml:"MAX_BLS_TO_EXECUTION_CHANGES" spec:"true"` // MaxBlsToExecutionChanges defines the maximum number of BLS-to-execution-change objects in a block.
|
||||
MaxValidatorsPerWithdrawalsSweep uint64 `yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP" spec:"true"` //MaxValidatorsPerWithdrawalsSweep bounds the size of the sweep searching for withdrawals per slot.
|
||||
MaxValidatorsPerWithdrawalsSweep uint64 `yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxValidatorsPerWithdrawalsSweep bounds the size of the sweep searching for withdrawals per slot.
|
||||
|
||||
// BLS domain values.
|
||||
DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER" spec:"true"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification.
|
||||
@@ -120,6 +121,7 @@ type BeaconChainConfig struct {
|
||||
DomainApplicationMask [4]byte `yaml:"DOMAIN_APPLICATION_MASK" spec:"true"` // DomainApplicationMask defines the BLS signature domain for application mask.
|
||||
DomainApplicationBuilder [4]byte `yaml:"DOMAIN_APPLICATION_BUILDER" spec:"true"` // DomainApplicationBuilder defines the BLS signature domain for application builder.
|
||||
DomainBLSToExecutionChange [4]byte `yaml:"DOMAIN_BLS_TO_EXECUTION_CHANGE" spec:"true"` // DomainBLSToExecutionChange defines the BLS signature domain to change withdrawal addresses to ETH1 prefix
|
||||
DomainBlobSidecar [4]byte `yaml:"DOMAIN_BLOB_SIDECAR" spec:"true"` // DomainBlobSidecar defines the BLS signature domain for blob sidecar.
|
||||
|
||||
// Prysm constants.
|
||||
GweiPerEth uint64 // GweiPerEth is the amount of gwei corresponding to 1 eth.
|
||||
@@ -154,6 +156,8 @@ type BeaconChainConfig struct {
|
||||
BellatrixForkEpoch primitives.Epoch `yaml:"BELLATRIX_FORK_EPOCH" spec:"true"` // BellatrixForkEpoch is used to represent the assigned fork epoch for bellatrix.
|
||||
CapellaForkVersion []byte `yaml:"CAPELLA_FORK_VERSION" spec:"true"` // CapellaForkVersion is used to represent the fork version for capella.
|
||||
CapellaForkEpoch primitives.Epoch `yaml:"CAPELLA_FORK_EPOCH" spec:"true"` // CapellaForkEpoch is used to represent the assigned fork epoch for capella.
|
||||
DenebForkVersion []byte `yaml:"DENEB_FORK_VERSION" spec:"true"` // DenebForkVersion is used to represent the fork version for deneb.
|
||||
DenebForkEpoch primitives.Epoch `yaml:"DENEB_FORK_EPOCH" spec:"true"` // DenebForkEpoch is used to represent the assigned fork epoch for deneb.
|
||||
|
||||
ForkVersionSchedule map[[fieldparams.VersionLength]byte]primitives.Epoch // Schedule of fork epochs by version.
|
||||
ForkVersionNames map[[fieldparams.VersionLength]byte]string // Human-readable names of fork versions.
|
||||
@@ -227,18 +231,31 @@ func configForkSchedule(b *BeaconChainConfig) map[[fieldparams.VersionLength]byt
|
||||
fvs[bytesutil.ToBytes4(b.AltairForkVersion)] = b.AltairForkEpoch
|
||||
fvs[bytesutil.ToBytes4(b.BellatrixForkVersion)] = b.BellatrixForkEpoch
|
||||
fvs[bytesutil.ToBytes4(b.CapellaForkVersion)] = b.CapellaForkEpoch
|
||||
fvs[bytesutil.ToBytes4(b.DenebForkVersion)] = b.DenebForkEpoch
|
||||
return fvs
|
||||
}
|
||||
|
||||
func configForkNames(b *BeaconChainConfig) map[[fieldparams.VersionLength]byte]string {
|
||||
cfv := ConfigForkVersions(b)
|
||||
fvn := map[[fieldparams.VersionLength]byte]string{}
|
||||
fvn[bytesutil.ToBytes4(b.GenesisForkVersion)] = "phase0"
|
||||
fvn[bytesutil.ToBytes4(b.AltairForkVersion)] = "altair"
|
||||
fvn[bytesutil.ToBytes4(b.BellatrixForkVersion)] = "bellatrix"
|
||||
fvn[bytesutil.ToBytes4(b.CapellaForkVersion)] = "capella"
|
||||
for k, v := range cfv {
|
||||
fvn[k] = version.String(v)
|
||||
}
|
||||
return fvn
|
||||
}
|
||||
|
||||
// ConfigForkVersions returns a mapping between a fork version param and the version identifier
|
||||
// from the runtime/version package.
|
||||
func ConfigForkVersions(b *BeaconChainConfig) map[[fieldparams.VersionLength]byte]int {
|
||||
return map[[fieldparams.VersionLength]byte]int{
|
||||
bytesutil.ToBytes4(b.GenesisForkVersion): version.Phase0,
|
||||
bytesutil.ToBytes4(b.AltairForkVersion): version.Altair,
|
||||
bytesutil.ToBytes4(b.BellatrixForkVersion): version.Bellatrix,
|
||||
bytesutil.ToBytes4(b.CapellaForkVersion): version.Capella,
|
||||
bytesutil.ToBytes4(b.DenebForkVersion): version.Deneb,
|
||||
}
|
||||
}
|
||||
|
||||
// Eth1DataVotesLength returns the maximum length of the votes on the Eth1 data,
|
||||
// computed from the parameters in BeaconChainConfig.
|
||||
func (b *BeaconChainConfig) Eth1DataVotesLength() uint64 {
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package params
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
|
||||
var configs *configset
|
||||
@@ -69,7 +72,12 @@ func (r *configset) add(c *BeaconChainConfig) error {
|
||||
c.InitializeForkSchedule()
|
||||
for v := range c.ForkVersionSchedule {
|
||||
if n, exists := r.versionToName[v]; exists {
|
||||
return errors.Wrapf(errCollisionFork, "config name=%s conflicts with existing config named=%s", name, n)
|
||||
// determine the fork name for the colliding version
|
||||
cfv := ConfigForkVersions(c)
|
||||
versionId := cfv[v]
|
||||
msg := fmt.Sprintf("version %#x for fork %s in config %s conflicts with existing config named=%s",
|
||||
v, version.String(versionId), name, n)
|
||||
return errors.Wrap(errCollisionFork, msg)
|
||||
}
|
||||
r.versionToName[v] = name
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ func InteropConfig() *BeaconChainConfig {
|
||||
c.AltairForkVersion = []byte{1, 0, 0, 235}
|
||||
c.BellatrixForkVersion = []byte{2, 0, 0, 235}
|
||||
c.CapellaForkVersion = []byte{3, 0, 0, 235}
|
||||
c.DenebForkVersion = []byte{4, 0, 0, 235}
|
||||
|
||||
c.InitializeForkSchedule()
|
||||
return c
|
||||
|
||||
@@ -207,6 +207,8 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
|
||||
fmt.Sprintf("TERMINAL_BLOCK_HASH: %#x", cfg.TerminalBlockHash),
|
||||
fmt.Sprintf("TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: %d", cfg.TerminalBlockHashActivationEpoch),
|
||||
fmt.Sprintf("DEPOSIT_CONTRACT_ADDRESS: %s", cfg.DepositContractAddress),
|
||||
fmt.Sprintf("DENEB_FORK_EPOCH: %d", cfg.DenebForkEpoch),
|
||||
fmt.Sprintf("DENEB_FORK_VERSION: %#x", cfg.DenebForkVersion),
|
||||
}
|
||||
|
||||
yamlFile := []byte(strings.Join(lines, "\n"))
|
||||
|
||||
@@ -20,10 +20,9 @@ import (
|
||||
|
||||
// Variables defined in the placeholderFields will not be tested in `TestLoadConfigFile`.
|
||||
// These are variables that we don't use in Prysm. (i.e. future hardfork, light client... etc)
|
||||
var placeholderFields = []string{"UPDATE_TIMEOUT", "DENEB_FORK_EPOCH", "DENEB_FORK_VERSION",
|
||||
"ATTESTATION_SUBNET_EXTRA_BITS", "RESP_TIMEOUT", "MAX_REQUEST_BLOCKS", "EPOCHS_PER_SUBNET_SUBSCRIPTION",
|
||||
"EIP6110_FORK_EPOCH", "MESSAGE_DOMAIN_INVALID_SNAPPY", "MIN_EPOCHS_FOR_BLOCK_REQUESTS", "MAXIMUM_GOSSIP_CLOCK_DISPARITY",
|
||||
"MESSAGE_DOMAIN_VALID_SNAPPY", "GOSSIP_MAX_SIZE", "SUBNETS_PER_NODE", "ATTESTATION_SUBNET_COUNT",
|
||||
var placeholderFields = []string{"UPDATE_TIMEOUT", "ATTESTATION_SUBNET_EXTRA_BITS", "RESP_TIMEOUT", "MAX_REQUEST_BLOCKS", "EPOCHS_PER_SUBNET_SUBSCRIPTION",
|
||||
"EIP6110_FORK_EPOCH", "MESSAGE_DOMAIN_INVALID_SNAPPY", "MIN_EPOCHS_FOR_BLOCK_REQUESTS", "MAXIMUM_GOSSIP_CLOCK_DISPARITY", "MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS",
|
||||
"MESSAGE_DOMAIN_VALID_SNAPPY", "GOSSIP_MAX_SIZE", "SUBNETS_PER_NODE", "ATTESTATION_SUBNET_COUNT", "MAX_REQUEST_BLOCKS_DENEB", "MAX_REQUEST_BLOB_SIDECARS",
|
||||
"MAX_CHUNK_SIZE", "ATTESTATION_PROPAGATION_SLOT_RANGE", "ATTESTATION_SUBNET_PREFIX_BITS", "EIP6110_FORK_VERSION", "TTFB_TIMEOUT"}
|
||||
|
||||
func assertEqualConfigs(t *testing.T, name string, fields []string, expected, actual *params.BeaconChainConfig) {
|
||||
|
||||
@@ -26,23 +26,27 @@ const (
|
||||
)
|
||||
|
||||
var mainnetNetworkConfig = &NetworkConfig{
|
||||
GossipMaxSize: 1 << 20, // 1 MiB
|
||||
GossipMaxSizeBellatrix: 10 * 1 << 20, // 10 MiB
|
||||
MaxChunkSize: 1 << 20, // 1 MiB
|
||||
MaxChunkSizeBellatrix: 10 * 1 << 20, // 10 MiB
|
||||
AttestationSubnetCount: 64,
|
||||
AttestationPropagationSlotRange: 32,
|
||||
MaxRequestBlocks: 1 << 10, // 1024
|
||||
TtfbTimeout: 5 * time.Second,
|
||||
RespTimeout: 10 * time.Second,
|
||||
MaximumGossipClockDisparity: 500 * time.Millisecond,
|
||||
MessageDomainInvalidSnappy: [4]byte{00, 00, 00, 00},
|
||||
MessageDomainValidSnappy: [4]byte{01, 00, 00, 00},
|
||||
ETH2Key: "eth2",
|
||||
AttSubnetKey: "attnets",
|
||||
SyncCommsSubnetKey: "syncnets",
|
||||
MinimumPeersInSubnetSearch: 20,
|
||||
ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524.
|
||||
GossipMaxSize: 1 << 20, // 1 MiB
|
||||
GossipMaxSizeBellatrix: 10 * 1 << 20, // 10 MiB
|
||||
MaxChunkSize: 1 << 20, // 1 MiB
|
||||
MaxChunkSizeBellatrix: 10 * 1 << 20, // 10 MiB
|
||||
AttestationSubnetCount: 64,
|
||||
BlobsidecarSubnetCount: 4,
|
||||
AttestationPropagationSlotRange: 32,
|
||||
MaxRequestBlocks: 1 << 10, // 1024
|
||||
TtfbTimeout: 5 * time.Second,
|
||||
RespTimeout: 10 * time.Second,
|
||||
MaximumGossipClockDisparity: 500 * time.Millisecond,
|
||||
MessageDomainInvalidSnappy: [4]byte{00, 00, 00, 00},
|
||||
MessageDomainValidSnappy: [4]byte{01, 00, 00, 00},
|
||||
ETH2Key: "eth2",
|
||||
AttSubnetKey: "attnets",
|
||||
SyncCommsSubnetKey: "syncnets",
|
||||
MinimumPeersInSubnetSearch: 20,
|
||||
ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524.
|
||||
MinEpochsForBlobsSidecarsRequest: 4096,
|
||||
MaxRequestBlobSidecars: 768,
|
||||
MaxRequestBlocksDeneb: 128,
|
||||
BootstrapNodes: []string{
|
||||
// Teku team's bootnode
|
||||
"enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA",
|
||||
@@ -174,6 +178,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
DomainApplicationMask: bytesutil.Uint32ToBytes4(0x00000001),
|
||||
DomainApplicationBuilder: bytesutil.Uint32ToBytes4(0x00000001),
|
||||
DomainBLSToExecutionChange: bytesutil.Uint32ToBytes4(0x0A000000),
|
||||
DomainBlobSidecar: bytesutil.Uint32ToBytes4(0x0B000000),
|
||||
|
||||
// Prysm constants.
|
||||
GweiPerEth: 1000000000,
|
||||
@@ -212,6 +217,8 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
BellatrixForkEpoch: mainnetBellatrixForkEpoch,
|
||||
CapellaForkVersion: []byte{3, 0, 0, 0},
|
||||
CapellaForkEpoch: 194048,
|
||||
DenebForkVersion: []byte{4, 0, 0, 0},
|
||||
DenebForkEpoch: math.MaxUint64,
|
||||
|
||||
// New values introduced in Altair hard fork 1.
|
||||
// Participation flag indices.
|
||||
|
||||
@@ -88,6 +88,7 @@ func MinimalSpecConfig() *BeaconChainConfig {
|
||||
minimalConfig.BellatrixForkEpoch = math.MaxUint64
|
||||
minimalConfig.CapellaForkVersion = []byte{3, 0, 0, 1}
|
||||
minimalConfig.CapellaForkEpoch = math.MaxUint64
|
||||
minimalConfig.DenebForkVersion = []byte{4, 0, 0, 1}
|
||||
|
||||
minimalConfig.SyncCommitteeSize = 32
|
||||
minimalConfig.InactivityScoreBias = 4
|
||||
|
||||
@@ -9,18 +9,22 @@ import (
|
||||
|
||||
// NetworkConfig defines the spec based network parameters.
|
||||
type NetworkConfig struct {
|
||||
GossipMaxSize uint64 `yaml:"GOSSIP_MAX_SIZE"` // GossipMaxSize is the maximum allowed size of uncompressed gossip messages.
|
||||
GossipMaxSizeBellatrix uint64 `yaml:"GOSSIP_MAX_SIZE_BELLATRIX"` // GossipMaxSizeBellatrix is the maximum allowed size of uncompressed gossip messages after the bellatrix epoch.
|
||||
MaxChunkSize uint64 `yaml:"MAX_CHUNK_SIZE"` // MaxChunkSize is the maximum allowed size of uncompressed req/resp chunked responses.
|
||||
MaxChunkSizeBellatrix uint64 `yaml:"MAX_CHUNK_SIZE_BELLATRIX"` // MaxChunkSizeBellatrix is the maximum allowed size of uncompressed req/resp chunked responses after the bellatrix epoch.
|
||||
AttestationSubnetCount uint64 `yaml:"ATTESTATION_SUBNET_COUNT"` // AttestationSubnetCount is the number of attestation subnets used in the gossipsub protocol.
|
||||
AttestationPropagationSlotRange primitives.Slot `yaml:"ATTESTATION_PROPAGATION_SLOT_RANGE"` // AttestationPropagationSlotRange is the maximum number of slots during which an attestation can be propagated.
|
||||
MaxRequestBlocks uint64 `yaml:"MAX_REQUEST_BLOCKS"` // MaxRequestBlocks is the maximum number of blocks in a single request.
|
||||
TtfbTimeout time.Duration `yaml:"TTFB_TIMEOUT"` // TtfbTimeout is the maximum time to wait for first byte of request response (time-to-first-byte).
|
||||
RespTimeout time.Duration `yaml:"RESP_TIMEOUT"` // RespTimeout is the maximum time for complete response transfer.
|
||||
MaximumGossipClockDisparity time.Duration `yaml:"MAXIMUM_GOSSIP_CLOCK_DISPARITY"` // MaximumGossipClockDisparity is the maximum milliseconds of clock disparity assumed between honest nodes.
|
||||
MessageDomainInvalidSnappy [4]byte `yaml:"MESSAGE_DOMAIN_INVALID_SNAPPY"` // MessageDomainInvalidSnappy is the 4-byte domain for gossip message-id isolation of invalid snappy messages.
|
||||
MessageDomainValidSnappy [4]byte `yaml:"MESSAGE_DOMAIN_VALID_SNAPPY"` // MessageDomainValidSnappy is the 4-byte domain for gossip message-id isolation of valid snappy messages.
|
||||
GossipMaxSize uint64 `yaml:"GOSSIP_MAX_SIZE"` // GossipMaxSize is the maximum allowed size of uncompressed gossip messages.
|
||||
GossipMaxSizeBellatrix uint64 `yaml:"GOSSIP_MAX_SIZE_BELLATRIX"` // GossipMaxSizeBellatrix is the maximum allowed size of uncompressed gossip messages after the bellatrix epoch.
|
||||
MaxChunkSize uint64 `yaml:"MAX_CHUNK_SIZE"` // MaxChunkSize is the maximum allowed size of uncompressed req/resp chunked responses.
|
||||
MaxChunkSizeBellatrix uint64 `yaml:"MAX_CHUNK_SIZE_BELLATRIX"` // MaxChunkSizeBellatrix is the maximum allowed size of uncompressed req/resp chunked responses after the bellatrix epoch.
|
||||
AttestationSubnetCount uint64 `yaml:"ATTESTATION_SUBNET_COUNT"` // AttestationSubnetCount is the number of attestation subnets used in the gossipsub protocol.
|
||||
BlobsidecarSubnetCount uint64 `yaml:"BLOBSIDECAR_SUBNET_COUNT"` // BlobsidecarSubnetCount is the number of blobsidecar subnets used in the gossipsub protocol.
|
||||
AttestationPropagationSlotRange primitives.Slot `yaml:"ATTESTATION_PROPAGATION_SLOT_RANGE"` // AttestationPropagationSlotRange is the maximum number of slots during which an attestation can be propagated.
|
||||
MaxRequestBlocks uint64 `yaml:"MAX_REQUEST_BLOCKS"` // MaxRequestBlocks is the maximum number of blocks in a single request.
|
||||
TtfbTimeout time.Duration `yaml:"TTFB_TIMEOUT"` // TtfbTimeout is the maximum time to wait for first byte of request response (time-to-first-byte).
|
||||
RespTimeout time.Duration `yaml:"RESP_TIMEOUT"` // RespTimeout is the maximum time for complete response transfer.
|
||||
MaximumGossipClockDisparity time.Duration `yaml:"MAXIMUM_GOSSIP_CLOCK_DISPARITY"` // MaximumGossipClockDisparity is the maximum milliseconds of clock disparity assumed between honest nodes.
|
||||
MessageDomainInvalidSnappy [4]byte `yaml:"MESSAGE_DOMAIN_INVALID_SNAPPY"` // MessageDomainInvalidSnappy is the 4-byte domain for gossip message-id isolation of invalid snappy messages.
|
||||
MessageDomainValidSnappy [4]byte `yaml:"MESSAGE_DOMAIN_VALID_SNAPPY"` // MessageDomainValidSnappy is the 4-byte domain for gossip message-id isolation of valid snappy messages.
|
||||
MinEpochsForBlobsSidecarsRequest primitives.Epoch `yaml:"MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUEST"` // MinEpochsForBlobsSidecarsRequest is the minimum number of epochs the node will keep the blobs for.
|
||||
MaxRequestBlobSidecars uint64 `yaml:"MAX_REQUEST_BLOB_SIDECARS"` // MaxRequestBlobSidecars is the maximum number of blobs to request in a single request.
|
||||
MaxRequestBlocksDeneb uint64 `yaml:"MAX_REQUEST_BLOCKS_DENEB"` // MaxRequestBlocksDeneb is the maximum number of blocks in a single request after the deneb epoch.
|
||||
|
||||
// DiscoveryV5 Config
|
||||
ETH2Key string // ETH2Key is the ENR key of the Ethereum consensus object in an enr.
|
||||
|
||||
@@ -48,6 +48,7 @@ func E2ETestConfig() *BeaconChainConfig {
|
||||
e2eConfig.AltairForkVersion = []byte{1, 0, 0, 253}
|
||||
e2eConfig.BellatrixForkVersion = []byte{2, 0, 0, 253}
|
||||
e2eConfig.CapellaForkVersion = []byte{3, 0, 0, 253}
|
||||
e2eConfig.DenebForkVersion = []byte{4, 0, 0, 253}
|
||||
|
||||
e2eConfig.InitializeForkSchedule()
|
||||
return e2eConfig
|
||||
@@ -88,6 +89,7 @@ func E2EMainnetTestConfig() *BeaconChainConfig {
|
||||
e2eConfig.AltairForkVersion = []byte{1, 0, 0, 254}
|
||||
e2eConfig.BellatrixForkVersion = []byte{2, 0, 0, 254}
|
||||
e2eConfig.CapellaForkVersion = []byte{3, 0, 0, 254}
|
||||
e2eConfig.DenebForkVersion = []byte{4, 0, 0, 254}
|
||||
|
||||
e2eConfig.InitializeForkSchedule()
|
||||
return e2eConfig
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user