mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-06 19:15:00 -05:00
Compare commits
2 Commits
ro-payload
...
gloas/fork
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b48daf14a | ||
|
|
dcb8dc2b75 |
@@ -1,4 +1,4 @@
|
||||
version: v1.7.0-alpha.2
|
||||
version: v1.7.0-alpha.1
|
||||
style: full
|
||||
|
||||
specrefs:
|
||||
@@ -146,6 +146,7 @@ exceptions:
|
||||
- g1_lincomb#deneb
|
||||
- hash_to_bls_field#deneb
|
||||
- is_power_of_two#deneb
|
||||
- multi_exp#deneb
|
||||
- reverse_bits#deneb
|
||||
- validate_kzg_g1#deneb
|
||||
- verify_blob_kzg_proof#deneb
|
||||
@@ -385,8 +386,7 @@ exceptions:
|
||||
- convert_builder_index_to_validator_index#gloas
|
||||
- convert_validator_index_to_builder_index#gloas
|
||||
- get_attestation_score#gloas
|
||||
- get_attestation_score#phase0
|
||||
- get_balance_after_withdrawals#capella
|
||||
- get_builder_from_deposit#gloas
|
||||
- get_builder_withdrawals#gloas
|
||||
- get_builders_sweep_withdrawals#gloas
|
||||
- get_index_for_new_builder#gloas
|
||||
@@ -396,18 +396,13 @@ exceptions:
|
||||
- initiate_builder_exit#gloas
|
||||
- is_active_builder#gloas
|
||||
- is_builder_index#gloas
|
||||
- is_data_available#gloas
|
||||
- is_eligible_for_partial_withdrawals#electra
|
||||
- is_head_late#gloas
|
||||
- is_head_weak#gloas
|
||||
- is_parent_strong#gloas
|
||||
- is_valid_proposal_slot#gloas
|
||||
- onboard_builders_from_pending_deposits#gloas
|
||||
- process_deposit_request#gloas
|
||||
- process_voluntary_exit#gloas
|
||||
- record_block_timeliness#gloas
|
||||
- record_block_timeliness#phase0
|
||||
- verify_data_column_sidecar_kzg_proofs#gloas
|
||||
- should_apply_proposer_boost#gloas
|
||||
- update_builder_pending_withdrawals#gloas
|
||||
- update_next_withdrawal_builder_index#gloas
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -273,16 +273,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.7.0-alpha.2"
|
||||
consensus_spec_version = "v1.7.0-alpha.1"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
|
||||
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
|
||||
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
|
||||
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
|
||||
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
|
||||
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -298,7 +298,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
|
||||
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -509,17 +509,17 @@ func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionPayloadBid struct {
|
||||
ParentBlockHash string `json:"parent_block_hash"`
|
||||
ParentBlockRoot string `json:"parent_block_root"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
Slot string `json:"slot"`
|
||||
Value string `json:"value"`
|
||||
ExecutionPayment string `json:"execution_payment"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ParentBlockHash string `json:"parent_block_hash"`
|
||||
ParentBlockRoot string `json:"parent_block_root"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
Slot string `json:"slot"`
|
||||
Value string `json:"value"`
|
||||
ExecutionPayment string `json:"execution_payment"`
|
||||
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
|
||||
}
|
||||
|
||||
type SignedExecutionPayloadBid struct {
|
||||
|
||||
@@ -2939,22 +2939,18 @@ func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *S
|
||||
}
|
||||
|
||||
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
|
||||
blobKzgCommitments := make([]string, len(b.BlobKzgCommitments))
|
||||
for i := range b.BlobKzgCommitments {
|
||||
blobKzgCommitments[i] = hexutil.Encode(b.BlobKzgCommitments[i])
|
||||
}
|
||||
return &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(b.BlockHash),
|
||||
PrevRandao: hexutil.Encode(b.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(b.FeeRecipient),
|
||||
GasLimit: fmt.Sprintf("%d", b.GasLimit),
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
Value: fmt.Sprintf("%d", b.Value),
|
||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(b.BlockHash),
|
||||
PrevRandao: hexutil.Encode(b.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(b.FeeRecipient),
|
||||
GasLimit: fmt.Sprintf("%d", b.GasLimit),
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
Value: fmt.Sprintf("%d", b.Value),
|
||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
|
||||
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3191,30 +3187,22 @@ func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayment")
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
|
||||
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BlobKzgCommitments")
|
||||
}
|
||||
blobKzgCommitments := make([][]byte, len(b.BlobKzgCommitments))
|
||||
for i, commitment := range b.BlobKzgCommitments {
|
||||
kzg, err := bytesutil.DecodeHexWithLength(commitment, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("BlobKzgCommitments[%d]", i))
|
||||
}
|
||||
blobKzgCommitments[i] = kzg
|
||||
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
|
||||
}
|
||||
return ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: parentBlockHash,
|
||||
ParentBlockRoot: parentBlockRoot,
|
||||
BlockHash: blockHash,
|
||||
PrevRandao: prevRandao,
|
||||
FeeRecipient: feeRecipient,
|
||||
GasLimit: gasLimit,
|
||||
BuilderIndex: primitives.BuilderIndex(builderIndex),
|
||||
Slot: primitives.Slot(slot),
|
||||
Value: primitives.Gwei(value),
|
||||
ExecutionPayment: primitives.Gwei(executionPayment),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ParentBlockHash: parentBlockHash,
|
||||
ParentBlockRoot: parentBlockRoot,
|
||||
BlockHash: blockHash,
|
||||
PrevRandao: prevRandao,
|
||||
FeeRecipient: feeRecipient,
|
||||
GasLimit: gasLimit,
|
||||
BuilderIndex: primitives.BuilderIndex(builderIndex),
|
||||
Slot: primitives.Slot(slot),
|
||||
Value: primitives.Gwei(value),
|
||||
ExecutionPayment: primitives.Gwei(executionPayment),
|
||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -101,7 +101,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
if len(lastValidHash) == 0 {
|
||||
lastValidHash = defaultLatestValidHash
|
||||
}
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
// this call has guaranteed to have the `headRoot` with its payload in forkchoice.
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), [32]byte(headPayload.ParentHash()), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
@@ -290,10 +291,10 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
// pruneInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, parentHash [32]byte, lvh [32]byte) error {
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, parentHash, lvh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -232,7 +232,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
|
||||
// this call does not have the root in forkchoice yet.
|
||||
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
@@ -992,9 +993,9 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [fieldparams.RootLength]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte, parentHash [32]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, parentHash, InvalidBlockLVH(err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -633,7 +633,7 @@ func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
|
||||
if err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot(), [32]byte(header.ParentHash()))
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
|
||||
//
|
||||
// <spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
|
||||
// <spec fn="process_execution_payload_bid" fork="gloas" hash="6dc696bb">
|
||||
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
|
||||
// signed_bid = block.body.signed_execution_payload_bid
|
||||
// bid = signed_bid.message
|
||||
@@ -37,12 +37,6 @@ import (
|
||||
// # Verify that the bid signature is valid
|
||||
// assert verify_execution_payload_bid_signature(state, signed_bid)
|
||||
//
|
||||
// # Verify commitments are under limit
|
||||
// assert (
|
||||
// len(bid.blob_kzg_commitments)
|
||||
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
|
||||
// )
|
||||
//
|
||||
// # Verify that the bid is for the current slot
|
||||
// assert bid.slot == block.slot
|
||||
// # Verify that the bid is for the right parent block
|
||||
@@ -115,12 +109,6 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
|
||||
}
|
||||
}
|
||||
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(block.Slot()))
|
||||
commitmentCount := bid.BlobKzgCommitmentCount()
|
||||
if commitmentCount > uint64(maxBlobsPerBlock) {
|
||||
return fmt.Errorf("bid has %d blob KZG commitments over max %d", commitmentCount, maxBlobsPerBlock)
|
||||
}
|
||||
|
||||
if err := validateBidConsistency(st, bid, block); err != nil {
|
||||
return errors.Wrap(err, "bid consistency validation failed")
|
||||
}
|
||||
|
||||
@@ -184,28 +184,6 @@ func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid,
|
||||
return out
|
||||
}
|
||||
|
||||
func blobCommitmentsForSlot(slot primitives.Slot, count int) [][]byte {
|
||||
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
|
||||
if count > max {
|
||||
count = max
|
||||
}
|
||||
commitments := make([][]byte, count)
|
||||
for i := range commitments {
|
||||
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
|
||||
}
|
||||
return commitments
|
||||
}
|
||||
|
||||
func tooManyBlobCommitmentsForSlot(slot primitives.Slot) [][]byte {
|
||||
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
|
||||
count := max + 1
|
||||
commitments := make([][]byte, count)
|
||||
for i := range commitments {
|
||||
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
|
||||
}
|
||||
return commitments
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
||||
slot := primitives.Slot(12)
|
||||
proposerIdx := primitives.ValidatorIndex(0)
|
||||
@@ -216,17 +194,17 @@ func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 0,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 0,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
@@ -258,16 +236,16 @@ func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
@@ -302,17 +280,17 @@ func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 500_000,
|
||||
ExecutionPayment: 1,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 500_000,
|
||||
ExecutionPayment: 1,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
@@ -363,17 +341,17 @@ func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
|
||||
state = stateIface.(*state_native.BeaconState)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x04}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x04}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
@@ -416,17 +394,17 @@ func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
|
||||
state = stateIface.(*state_native.BeaconState)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 25,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 25,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
@@ -458,17 +436,17 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
// Use an invalid signature.
|
||||
invalidSig := [96]byte{1}
|
||||
@@ -485,42 +463,6 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
|
||||
require.ErrorContains(t, "bid signature validation failed", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_TooManyBlobCommitments(t *testing.T) {
|
||||
slot := primitives.Slot(9)
|
||||
proposerIdx := primitives.ValidatorIndex(0)
|
||||
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
pubKey := [48]byte{}
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
BlobKzgCommitments: tooManyBlobCommitmentsForSlot(slot),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
}
|
||||
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err := ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "blob KZG commitments over max", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(10)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
@@ -536,17 +478,17 @@ func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot + 1, // mismatch
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot + 1, // mismatch
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
@@ -578,17 +520,17 @@ func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
@@ -621,17 +563,17 @@ func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
|
||||
|
||||
parentRoot := bytes.Repeat([]byte{0x22}, 32)
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: parentRoot,
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: parentRoot,
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
@@ -663,17 +605,17 @@ func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
|
||||
@@ -264,24 +264,24 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
|
||||
|
||||
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
|
||||
//
|
||||
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="d76e0f89">
|
||||
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="cf1e65b5">
|
||||
// def is_valid_indexed_payload_attestation(
|
||||
// state: BeaconState, attestation: IndexedPayloadAttestation
|
||||
// state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation
|
||||
// ) -> bool:
|
||||
// """
|
||||
// Check if ``attestation`` is non-empty, has sorted indices, and has
|
||||
// Check if ``indexed_payload_attestation`` is non-empty, has sorted indices, and has
|
||||
// a valid aggregate signature.
|
||||
// """
|
||||
// # Verify indices are non-empty and sorted
|
||||
// indices = attestation.attesting_indices
|
||||
// indices = indexed_payload_attestation.attesting_indices
|
||||
// if len(indices) == 0 or not indices == sorted(indices):
|
||||
// return False
|
||||
//
|
||||
// # Verify aggregate signature
|
||||
// pubkeys = [state.validators[i].pubkey for i in indices]
|
||||
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
|
||||
// signing_root = compute_signing_root(attestation.data, domain)
|
||||
// return bls.FastAggregateVerify(pubkeys, signing_root, attestation.signature)
|
||||
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, None)
|
||||
// signing_root = compute_signing_root(indexed_payload_attestation.data, domain)
|
||||
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_payload_attestation.signature)
|
||||
// </spec>
|
||||
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
|
||||
indices := att.AttestingIndices
|
||||
|
||||
@@ -78,7 +78,7 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"forkchoice.go",
|
||||
"gloas.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"node.go",
|
||||
@@ -32,6 +33,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
|
||||
@@ -31,7 +31,8 @@ func New() *ForkChoice {
|
||||
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
emptyNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
|
||||
fullNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
|
||||
slashedIndices: make(map[primitives.ValidatorIndex]bool),
|
||||
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
|
||||
}
|
||||
@@ -43,7 +44,7 @@ func New() *ForkChoice {
|
||||
|
||||
// NodeCount returns the current number of nodes in the Store.
|
||||
func (f *ForkChoice) NodeCount() int {
|
||||
return len(f.store.nodeByRoot)
|
||||
return len(f.store.emptyNodeByRoot)
|
||||
}
|
||||
|
||||
// Head returns the head root from fork choice store.
|
||||
@@ -64,14 +65,14 @@ func (f *ForkChoice) Head(
|
||||
return [32]byte{}, errors.Wrap(err, "could not apply proposer boost score")
|
||||
}
|
||||
|
||||
if err := f.store.treeRootNode.applyWeightChanges(ctx); err != nil {
|
||||
if err := f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
|
||||
}
|
||||
|
||||
jc := f.JustifiedCheckpoint()
|
||||
fc := f.FinalizedCheckpoint()
|
||||
currentEpoch := slots.EpochsSinceGenesis(f.store.genesisTime)
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
if err := f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
|
||||
}
|
||||
return f.store.head(ctx)
|
||||
@@ -118,14 +119,14 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
finalizedEpoch := fc.Epoch
|
||||
node, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
|
||||
pn, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jc, fc = f.store.pullTips(state, node, jc, fc)
|
||||
jc, fc = f.store.pullTips(state, pn.node, jc, fc)
|
||||
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
|
||||
_, remErr := f.store.removeNode(ctx, node)
|
||||
_, remErr := f.store.removeNode(ctx, pn)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("Could not remove node")
|
||||
}
|
||||
@@ -156,27 +157,32 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
// HasNode returns true if the node exists in fork choice store,
|
||||
// false else wise.
|
||||
func (f *ForkChoice) HasNode(root [32]byte) bool {
|
||||
_, ok := f.store.nodeByRoot[root]
|
||||
_, ok := f.store.emptyNodeByRoot[root]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsCanonical returns true if the given root is part of the canonical chain.
|
||||
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
//It is fine to pick empty node here since we only check if the beacon block is canonical.
|
||||
pn, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || pn == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if node.bestDescendant == nil {
|
||||
if pn.node.bestDescendant == nil {
|
||||
// The node doesn't have any children
|
||||
if f.store.headNode.bestDescendant == nil {
|
||||
return node == f.store.headNode
|
||||
// headNode is itself head.
|
||||
return pn.node == f.store.headNode
|
||||
}
|
||||
return node == f.store.headNode.bestDescendant
|
||||
// headNode is not actualized and there are some descendants
|
||||
return pn.node == f.store.headNode.bestDescendant
|
||||
}
|
||||
// The node has children
|
||||
if f.store.headNode.bestDescendant == nil {
|
||||
return node.bestDescendant == f.store.headNode
|
||||
return pn.node.bestDescendant == f.store.headNode
|
||||
}
|
||||
return node.bestDescendant == f.store.headNode.bestDescendant
|
||||
return pn.node.bestDescendant == f.store.headNode.bestDescendant
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the given root has been optimistically synced.
|
||||
@@ -185,7 +191,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
node, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return true, ErrNilNode
|
||||
}
|
||||
@@ -198,17 +204,21 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.AncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
pn, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || pn == nil {
|
||||
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
|
||||
}
|
||||
|
||||
n := node
|
||||
for n != nil && n.slot > slot {
|
||||
n := pn.node
|
||||
for n.slot > slot {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, ctx.Err()
|
||||
}
|
||||
n = n.parent
|
||||
if n.parent == nil {
|
||||
n = nil
|
||||
break
|
||||
}
|
||||
n = n.parent.node
|
||||
}
|
||||
|
||||
if n == nil {
|
||||
@@ -221,10 +231,11 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
|
||||
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
|
||||
// known chain in forkchoice.
|
||||
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
node, ok := f.store.nodeByRoot[cp.Root]
|
||||
if !ok || node == nil {
|
||||
pn, ok := f.store.emptyNodeByRoot[cp.Root]
|
||||
if !ok || pn == nil {
|
||||
return false, nil
|
||||
}
|
||||
node := pn.node
|
||||
epochStart, err := slots.EpochStart(cp.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -233,10 +244,13 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(node.children) == 0 {
|
||||
// If it's the start of the epoch, it is a checkpoint
|
||||
if node.slot == epochStart {
|
||||
return true, nil
|
||||
}
|
||||
if node.slot == epochStart {
|
||||
// If there are no descendants of this beacon block, it is is viable as a checkpoint
|
||||
children := f.store.allConsensusChildren(node)
|
||||
if len(children) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
if !features.Get().IgnoreUnviableAttestations {
|
||||
@@ -246,7 +260,8 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
for _, child := range node.children {
|
||||
// If some child is after the start of the epoch, the checkpoint is viable.
|
||||
for _, child := range children {
|
||||
if child.slot > epochStart {
|
||||
return true, nil
|
||||
}
|
||||
@@ -287,7 +302,7 @@ func (f *ForkChoice) updateBalances() error {
|
||||
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance {
|
||||
// Ignore the vote if the root is not in fork choice
|
||||
// store, that means we have not seen the block before.
|
||||
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
|
||||
nextNode, ok := f.store.emptyNodeByRoot[vote.nextRoot]
|
||||
if ok && vote.nextRoot != zHash {
|
||||
// Protection against nil node
|
||||
if nextNode == nil {
|
||||
@@ -296,7 +311,7 @@ func (f *ForkChoice) updateBalances() error {
|
||||
nextNode.balance += newBalance
|
||||
}
|
||||
|
||||
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
|
||||
currentNode, ok := f.store.emptyNodeByRoot[vote.currentRoot]
|
||||
if ok && vote.currentRoot != zHash {
|
||||
// Protection against nil node
|
||||
if currentNode == nil {
|
||||
@@ -337,13 +352,13 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
|
||||
return f.store.proposerBoost()
|
||||
}
|
||||
|
||||
// SetOptimisticToValid sets the node with the given root as a fully validated node
|
||||
// SetOptimisticToValid sets the node with the given root as a fully validated node. The payload for this root MUST have been processed.
|
||||
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
fn, ok := f.store.fullNodeByRoot[root]
|
||||
if !ok || fn == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set node to valid")
|
||||
}
|
||||
return node.setNodeAndParentValidated(ctx)
|
||||
return f.store.setNodeAndParentValidated(ctx, fn)
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpoint of fork choice store.
|
||||
@@ -362,8 +377,8 @@ func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
|
||||
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, payloadHash)
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
|
||||
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, parentHash, payloadHash)
|
||||
}
|
||||
|
||||
// InsertSlashedIndex adds the given slashed validator index to the
|
||||
@@ -386,7 +401,7 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.Vali
|
||||
return
|
||||
}
|
||||
|
||||
node, ok := f.store.nodeByRoot[f.votes[index].currentRoot]
|
||||
node, ok := f.store.emptyNodeByRoot[f.votes[index].currentRoot]
|
||||
if !ok || node == nil {
|
||||
return
|
||||
}
|
||||
@@ -421,22 +436,28 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
|
||||
}
|
||||
|
||||
// CommonAncestor returns the common ancestor root and slot between the two block roots r1 and r2.
|
||||
// This is payload aware. Consider the following situation
|
||||
// [A,full] <--- [B, full] <---[C,pending]
|
||||
//
|
||||
// \---------[B, empty] <--[D, pending]
|
||||
//
|
||||
// Then even though C and D both descend from the beacon block B, their common ancestor is A.
|
||||
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, primitives.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
n1, ok := f.store.nodeByRoot[r1]
|
||||
if !ok || n1 == nil {
|
||||
en1, ok := f.store.emptyNodeByRoot[r1]
|
||||
if !ok || en1 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
// Do nothing if the input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, n1.slot, nil
|
||||
return r1, en1.node.slot, nil
|
||||
}
|
||||
|
||||
n2, ok := f.store.nodeByRoot[r2]
|
||||
if !ok || n2 == nil {
|
||||
en2, ok := f.store.emptyNodeByRoot[r2]
|
||||
if !ok || en2 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
@@ -444,23 +465,23 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, 0, ctx.Err()
|
||||
}
|
||||
if n1.slot > n2.slot {
|
||||
n1 = n1.parent
|
||||
if en1.node.slot > en2.node.slot {
|
||||
en1 = en1.node.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
// This should not happen at runtime as the finalized
|
||||
// node has to be a common ancestor
|
||||
if n1 == nil {
|
||||
if en1 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
} else {
|
||||
n2 = n2.parent
|
||||
en2 = en2.node.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if n2 == nil {
|
||||
if en2 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
}
|
||||
if n1 == n2 {
|
||||
return n1.root, n1.slot, nil
|
||||
if en1 == en2 {
|
||||
return en1.node.root, en1.node.slot, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -507,35 +528,17 @@ func (f *ForkChoice) CachedHeadRoot() [32]byte {
|
||||
|
||||
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
|
||||
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
|
||||
root := f.FinalizedCheckpoint().Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
return f.store.latestHashForRoot(f.FinalizedCheckpoint().Root)
|
||||
}
|
||||
|
||||
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
|
||||
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
root := f.JustifiedCheckpoint().Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
return f.store.latestHashForRoot(f.JustifiedCheckpoint().Root)
|
||||
}
|
||||
|
||||
// UnrealizedJustifiedPayloadBlockHash returns the hash of the payload at the unrealized justified checkpoint
|
||||
func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
root := f.store.unrealizedJustifiedCheckpoint.Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
return f.store.latestHashForRoot(f.store.unrealizedJustifiedCheckpoint.Root)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkchoice.
|
||||
@@ -559,7 +562,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
|
||||
nodes := make([]*forkchoice2.Node, 0, f.NodeCount())
|
||||
var err error
|
||||
if f.store.treeRootNode != nil {
|
||||
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
|
||||
nodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -588,7 +591,7 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
|
||||
|
||||
// Weight returns the weight of the given root if found on the store
|
||||
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return 0, ErrNilNode
|
||||
}
|
||||
@@ -616,11 +619,11 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
|
||||
|
||||
// Slot returns the slot of the given root if it's known to forkchoice
|
||||
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return 0, ErrNilNode
|
||||
}
|
||||
return n.slot, nil
|
||||
return n.node.slot, nil
|
||||
}
|
||||
|
||||
// DependentRoot returns the last root of the epoch prior to the requested ecoch in the canonical chain.
|
||||
@@ -628,7 +631,7 @@ func (f *ForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
|
||||
return f.DependentRootForEpoch(f.CachedHeadRoot(), epoch)
|
||||
}
|
||||
|
||||
// DependentRootForEpoch return the last root of the epoch prior to the requested ecoch for the given root.
|
||||
// DependentRootForEpoch return the last root of the epoch prior to the requested epoch for the given root.
|
||||
func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
tr, err := f.TargetRootForEpoch(root, epoch)
|
||||
if err != nil {
|
||||
@@ -637,18 +640,18 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
|
||||
if tr == [32]byte{} {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
node, ok := f.store.nodeByRoot[tr]
|
||||
if !ok || node == nil {
|
||||
en, ok := f.store.emptyNodeByRoot[tr]
|
||||
if !ok || en == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
if slots.ToEpoch(node.slot) >= epoch {
|
||||
if node.parent != nil {
|
||||
node = node.parent
|
||||
if slots.ToEpoch(en.node.slot) >= epoch {
|
||||
if en.node.parent != nil {
|
||||
en = en.node.parent
|
||||
} else {
|
||||
return f.store.finalizedDependentRoot, nil
|
||||
}
|
||||
}
|
||||
return node.root, nil
|
||||
return en.node.root, nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch returns the root of the target block for a given epoch.
|
||||
@@ -660,46 +663,48 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
|
||||
// which case we return the root of the checkpoint of the chain containing the
|
||||
// passed root, at the given epoch
|
||||
func (f *ForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
nodeEpoch := slots.ToEpoch(n.slot)
|
||||
node := n.node
|
||||
nodeEpoch := slots.ToEpoch(node.slot)
|
||||
if epoch > nodeEpoch {
|
||||
return n.root, nil
|
||||
return node.root, nil
|
||||
}
|
||||
if n.target == nil {
|
||||
if node.target == nil {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
targetRoot := n.target.root
|
||||
targetRoot := node.target.root
|
||||
if epoch == nodeEpoch {
|
||||
return targetRoot, nil
|
||||
}
|
||||
targetNode, ok := f.store.nodeByRoot[targetRoot]
|
||||
targetNode, ok := f.store.emptyNodeByRoot[targetRoot]
|
||||
if !ok || targetNode == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
// If slot 0 was not missed we consider a previous block to go back at least one epoch
|
||||
if nodeEpoch == slots.ToEpoch(targetNode.slot) {
|
||||
targetNode = targetNode.parent
|
||||
if nodeEpoch == slots.ToEpoch(targetNode.node.slot) {
|
||||
targetNode = targetNode.node.parent
|
||||
if targetNode == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
}
|
||||
return f.TargetRootForEpoch(targetNode.root, epoch)
|
||||
return f.TargetRootForEpoch(targetNode.node.root, epoch)
|
||||
}
|
||||
|
||||
// ParentRoot returns the block root of the parent node if it is in forkchoice.
|
||||
// The exception is for the finalized checkpoint root which we return the zero
|
||||
// hash.
|
||||
func (f *ForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
// Return the zero hash for the tree root
|
||||
if n.parent == nil {
|
||||
parent := n.node.parent
|
||||
if parent == nil {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
return n.parent.root, nil
|
||||
return parent.node.root, nil
|
||||
}
|
||||
|
||||
@@ -104,9 +104,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
f.justifiedBalances = []uint64{10, 20, 30}
|
||||
require.NoError(t, f.updateBalances())
|
||||
s := f.store
|
||||
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
|
||||
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
@@ -122,9 +122,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
f.balances = []uint64{100, 100, 100}
|
||||
f.votes = []Vote{
|
||||
@@ -135,9 +135,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
|
||||
f.justifiedBalances = []uint64{10, 20, 30}
|
||||
require.NoError(t, f.updateBalances())
|
||||
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
|
||||
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
@@ -153,9 +153,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
f.balances = []uint64{125, 125, 125}
|
||||
f.votes = []Vote{
|
||||
@@ -166,9 +166,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
|
||||
f.justifiedBalances = []uint64{10, 20, 30}
|
||||
require.NoError(t, f.updateBalances())
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
|
||||
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(5), s.emptyNodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
@@ -224,10 +224,10 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
|
||||
f.store.emptyNodeByRoot[[32]byte{'3'}].balance = 10
|
||||
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
|
||||
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
|
||||
require.Equal(t, uint64(10), f.store.emptyNodeByRoot[[32]byte{'1'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'2'}].weight)
|
||||
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
|
||||
@@ -260,7 +260,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
|
||||
f.store.treeRootNode = f.store.emptyNodeByRoot[indexToHash(1)]
|
||||
f.store.treeRootNode.parent = nil
|
||||
|
||||
r, err := f.AncestorRoot(ctx, indexToHash(3), 6)
|
||||
@@ -342,21 +342,21 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
|
||||
|
||||
// Process b's slashing, c is now head
|
||||
f.InsertSlashedIndex(ctx, 1)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
|
||||
f.justifiedBalances = []uint64{100, 200, 200, 300}
|
||||
head, err = f.Head(ctx)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, head)
|
||||
|
||||
// Process b's slashing again, should be a noop
|
||||
f.InsertSlashedIndex(ctx, 1)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
|
||||
f.justifiedBalances = []uint64{100, 200, 200, 300}
|
||||
head, err = f.Head(ctx)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, head)
|
||||
|
||||
@@ -591,7 +591,7 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
optimistic: true,
|
||||
}
|
||||
|
||||
f.store.nodeByRoot[[32]byte{'y'}] = n
|
||||
f.store.emptyNodeByRoot[[32]byte{'y'}] = n
|
||||
// broken link
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
@@ -742,7 +742,7 @@ func TestWeight(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
require.Equal(t, true, ok)
|
||||
n.weight = 10
|
||||
w, err := f.Weight(root)
|
||||
|
||||
300
beacon-chain/forkchoice/doubly-linked-tree/gloas.go
Normal file
300
beacon-chain/forkchoice/doubly-linked-tree/gloas.go
Normal file
@@ -0,0 +1,300 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *Store) getNodeInformation(block interfaces.ReadOnlyBeaconBlock, parent **PayloadNode, payloadHash *[32]byte) error {
|
||||
sb, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wb, err := blocks.WrappedROSignedExecutionPayloadBid(sb)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to wrap signed bid")
|
||||
}
|
||||
bid, err := wb.Bid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get bid from wrapped bid")
|
||||
}
|
||||
*payloadHash = bid.BlockHash()
|
||||
parentRoot := block.ParentRoot()
|
||||
*parent = s.emptyNodeByRoot[parentRoot]
|
||||
if *parent == nil {
|
||||
// This is the tree root node.
|
||||
return nil
|
||||
}
|
||||
if bid.ParentBlockHash() == (*parent).node.payloadHash {
|
||||
//block builds on full
|
||||
*parent = s.fullNodeByRoot[(*parent).node.payloadHash]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyWeightChangesConsensusNode recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
func (s *Store) applyWeightChangesConsensusNode(ctx context.Context, n *Node) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
if err := s.applyWeightChangesPayloadNode(ctx, en); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight := en.weight
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn != nil {
|
||||
if err := s.applyWeightChangesPayloadNode(ctx, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight += fn.weight
|
||||
}
|
||||
if n.root == params.BeaconConfig().ZeroHash {
|
||||
return nil
|
||||
}
|
||||
n.weight = n.balance + childrenWeight
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyWeightChangesPayloadNode recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
func (s *Store) applyWeightChangesPayloadNode(ctx context.Context, n *PayloadNode) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
childrenWeight := uint64(0)
|
||||
for _, child := range n.children {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if err := s.applyWeightChangesConsensusNode(ctx, child); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight += child.weight
|
||||
}
|
||||
n.weight = n.balance + childrenWeight
|
||||
return nil
|
||||
}
|
||||
|
||||
// allConsensusChildren returns the list of all consensus blocks that build on the given node.
|
||||
func (s *Store) allConsensusChildren(n *Node) []*Node {
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
fn, ok := s.fullNodeByRoot[n.root]
|
||||
if ok {
|
||||
return append(en.children, fn.children...)
|
||||
}
|
||||
return en.children
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
func (s *Store) setNodeAndParentValidated(ctx context.Context, pn *PayloadNode) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if !pn.optimistic {
|
||||
return nil
|
||||
}
|
||||
pn.optimistic = false
|
||||
if pn.full {
|
||||
// set the empty node also a as valid
|
||||
en := s.emptyNodeByRoot[pn.node.root]
|
||||
en.optimistic = false
|
||||
}
|
||||
if pn.node.parent == nil {
|
||||
return nil
|
||||
}
|
||||
return s.setNodeAndParentValidated(ctx, pn.node.parent)
|
||||
}
|
||||
|
||||
// fullAncestor returns the highest ancestor with a full payload that a block with the
|
||||
// given root has. If there is a payload for the past root, then it will return that full
|
||||
// node. Otherwise it will use the full parent actually being an acestor of the given root
|
||||
func (s *Store) fullAncestor(root [32]byte) *PayloadNode {
|
||||
fn, ok := s.fullNodeByRoot[root]
|
||||
if ok {
|
||||
return fn
|
||||
}
|
||||
en := s.emptyNodeByRoot[root]
|
||||
if en == nil {
|
||||
return nil
|
||||
}
|
||||
return s.fullParent(en)
|
||||
}
|
||||
|
||||
// fullParent returns the latest full node that this block builds on.
|
||||
func (s *Store) fullParent(pn *PayloadNode) *PayloadNode {
|
||||
parent := pn.node.parent
|
||||
for ; parent != nil && !parent.full; parent = parent.node.parent {
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
// parentHash return the payload hash of the latest full node that this block builds on.
|
||||
func (s *Store) parentHash(pn *PayloadNode) [32]byte {
|
||||
fullParent := s.fullParent(pn)
|
||||
if fullParent == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return fullParent.node.payloadHash
|
||||
}
|
||||
|
||||
// latestHashForRoot returns the latest payload hash for the given block root.
|
||||
func (s *Store) latestHashForRoot(root [32]byte) [32]byte {
|
||||
// try to get the full node first
|
||||
fn, ok := s.fullNodeByRoot[root]
|
||||
if ok && fn != nil {
|
||||
return fn.node.payloadHash
|
||||
}
|
||||
en := s.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return s.parentHash(en)
|
||||
}
|
||||
|
||||
// updateBestDescendantPayloadNode updates the best descendant of this node and its
|
||||
// children.
|
||||
func (s *Store) updateBestDescendantPayloadNode(ctx context.Context, n *PayloadNode, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
var bestChild *Node
|
||||
bestWeight := uint64(0)
|
||||
for _, child := range n.children {
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := s.updateBestDescendantConsensusNode(ctx, child, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && bestChild == nil {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
bestWeight = child.weight
|
||||
bestChild = child
|
||||
} else if childLeadsToViableHead {
|
||||
// If both are viable, compare their weights.
|
||||
if child.weight == bestWeight {
|
||||
// Tie-breaker of equal weights by root.
|
||||
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
|
||||
bestChild = child
|
||||
}
|
||||
} else if child.weight > bestWeight {
|
||||
bestChild = child
|
||||
bestWeight = child.weight
|
||||
}
|
||||
}
|
||||
}
|
||||
if bestChild == nil {
|
||||
n.bestDescendant = nil
|
||||
} else {
|
||||
if bestChild.bestDescendant == nil {
|
||||
n.bestDescendant = bestChild
|
||||
} else {
|
||||
n.bestDescendant = bestChild.bestDescendant
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestDescendantConsensusNode updates the best descendant of this node and its
|
||||
// children.
|
||||
func (s *Store) updateBestDescendantConsensusNode(ctx context.Context, n *Node, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if len(s.allConsensusChildren(n)) == 0 {
|
||||
n.bestDescendant = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
if err := s.updateBestDescendantPayloadNode(ctx, en, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn == nil {
|
||||
n.bestDescendant = en.bestDescendant
|
||||
return nil
|
||||
}
|
||||
// TODO GLOAS: pick between full or empty
|
||||
if err := s.updateBestDescendantPayloadNode(ctx, fn, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
n.bestDescendant = fn.bestDescendant
|
||||
return nil
|
||||
}
|
||||
|
||||
// choosePayloadContent chooses between empty or full for the passed consensus node. TODO Gloas: use PTC to choose.
|
||||
func (s *Store) choosePayloadContent(n *Node) *PayloadNode {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn != nil {
|
||||
return fn
|
||||
}
|
||||
return s.emptyNodeByRoot[n.root]
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (s *Store) nodeTreeDump(ctx context.Context, n *Node, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
var parentRoot [32]byte
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.node.root
|
||||
}
|
||||
target := [32]byte{}
|
||||
if n.target != nil {
|
||||
target = n.target.root
|
||||
}
|
||||
optimistic := n.parent.optimistic
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
timestamp := en.timestamp
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn != nil {
|
||||
optimistic = fn.optimistic
|
||||
timestamp = fn.timestamp
|
||||
}
|
||||
thisNode := &forkchoice2.Node{
|
||||
Slot: n.slot,
|
||||
BlockRoot: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
JustifiedEpoch: n.justifiedEpoch,
|
||||
FinalizedEpoch: n.finalizedEpoch,
|
||||
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
|
||||
Balance: n.balance,
|
||||
Weight: n.weight,
|
||||
ExecutionOptimistic: optimistic,
|
||||
ExecutionBlockHash: n.payloadHash[:],
|
||||
Timestamp: timestamp,
|
||||
Target: target[:],
|
||||
}
|
||||
if optimistic {
|
||||
thisNode.Validity = forkchoice2.Optimistic
|
||||
} else {
|
||||
thisNode.Validity = forkchoice2.Valid
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
var err error
|
||||
children := s.allConsensusChildren(n)
|
||||
for _, child := range children {
|
||||
nodes, err = s.nodeTreeDump(ctx, child, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
@@ -1,95 +1,17 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessAttestationsThreshold is the amount of time after which we
|
||||
// process attestations for the current slot
|
||||
const ProcessAttestationsThreshold = 10 * time.Second
|
||||
|
||||
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
childrenWeight := uint64(0)
|
||||
for _, child := range n.children {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if err := child.applyWeightChanges(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight += child.weight
|
||||
}
|
||||
if n.root == params.BeaconConfig().ZeroHash {
|
||||
return nil
|
||||
}
|
||||
n.weight = n.balance + childrenWeight
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children.
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.bestDescendant = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var bestChild *Node
|
||||
bestWeight := uint64(0)
|
||||
hasViableDescendant := false
|
||||
for _, child := range n.children {
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && !hasViableDescendant {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
bestWeight = child.weight
|
||||
bestChild = child
|
||||
hasViableDescendant = true
|
||||
} else if childLeadsToViableHead {
|
||||
// If both are viable, compare their weights.
|
||||
if child.weight == bestWeight {
|
||||
// Tie-breaker of equal weights by root.
|
||||
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
|
||||
bestChild = child
|
||||
}
|
||||
} else if child.weight > bestWeight {
|
||||
bestChild = child
|
||||
bestWeight = child.weight
|
||||
}
|
||||
}
|
||||
}
|
||||
if hasViableDescendant {
|
||||
if bestChild.bestDescendant == nil {
|
||||
n.bestDescendant = bestChild
|
||||
} else {
|
||||
n.bestDescendant = bestChild.bestDescendant
|
||||
}
|
||||
} else {
|
||||
n.bestDescendant = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// viableForHead returns true if the node is viable to head.
|
||||
// Any node with different finalized or justified epoch than
|
||||
// the ones in fork choice store should not be viable to head.
|
||||
@@ -110,30 +32,13 @@ func (n *Node) leadsToViableHead(justifiedEpoch, currentEpoch primitives.Epoch)
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, currentEpoch)
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if !n.optimistic {
|
||||
return nil
|
||||
}
|
||||
n.optimistic = false
|
||||
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
// arrivedEarly returns whether this node was inserted before the first
|
||||
// threshold to orphan a block.
|
||||
// Note that genesisTime has seconds granularity, therefore we use a strict
|
||||
// inequality < here. For example a block that arrives 3.9999 seconds into the
|
||||
// slot will have secs = 3 below.
|
||||
func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
sss, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
|
||||
func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
sss, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
|
||||
votingWindow := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
|
||||
return sss < votingWindow, err
|
||||
}
|
||||
@@ -143,52 +48,7 @@ func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
// Note that genesisTime has seconds granularity, therefore we use an
|
||||
// inequality >= here. For example a block that arrives 10.00001 seconds into the
|
||||
// slot will have secs = 10 below.
|
||||
func (n *Node) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
|
||||
secs, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
|
||||
func (n *PayloadNode) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
|
||||
secs, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
|
||||
return secs >= ProcessAttestationsThreshold, err
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
var parentRoot [32]byte
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.root
|
||||
}
|
||||
target := [32]byte{}
|
||||
if n.target != nil {
|
||||
target = n.target.root
|
||||
}
|
||||
thisNode := &forkchoice2.Node{
|
||||
Slot: n.slot,
|
||||
BlockRoot: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
JustifiedEpoch: n.justifiedEpoch,
|
||||
FinalizedEpoch: n.finalizedEpoch,
|
||||
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
|
||||
Balance: n.balance,
|
||||
Weight: n.weight,
|
||||
ExecutionOptimistic: n.optimistic,
|
||||
ExecutionBlockHash: n.payloadHash[:],
|
||||
Timestamp: n.timestamp,
|
||||
Target: target[:],
|
||||
}
|
||||
if n.optimistic {
|
||||
thisNode.Validity = forkchoice2.Optimistic
|
||||
} else {
|
||||
thisNode.Validity = forkchoice2.Valid
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
var err error
|
||||
for _, child := range n.children {
|
||||
nodes, err = child.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@@ -27,15 +27,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
|
||||
|
||||
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
|
||||
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
|
||||
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
|
||||
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].weight)
|
||||
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].weight)
|
||||
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].weight)
|
||||
}
|
||||
|
||||
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
@@ -53,19 +53,19 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 400
|
||||
s.nodeByRoot[indexToHash(2)].weight = 400
|
||||
s.nodeByRoot[indexToHash(3)].weight = 400
|
||||
s.emptyNodeByRoot[indexToHash(1)].weight = 400
|
||||
s.emptyNodeByRoot[indexToHash(2)].weight = 400
|
||||
s.emptyNodeByRoot[indexToHash(3)].weight = 400
|
||||
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
|
||||
|
||||
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
|
||||
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
|
||||
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
|
||||
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].weight)
|
||||
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].weight)
|
||||
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].weight)
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
@@ -108,8 +108,8 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
s.nodeByRoot[indexToHash(2)].weight = 200
|
||||
s.emptyNodeByRoot[indexToHash(1)].weight = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].weight = 200
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
@@ -128,8 +128,8 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
s.nodeByRoot[indexToHash(2)].weight = 100
|
||||
s.emptyNodeByRoot[indexToHash(1)].weight = 200
|
||||
s.emptyNodeByRoot[indexToHash(2)].weight = 100
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
@@ -176,9 +176,9 @@ func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 5))
|
||||
require.Equal(t, true, f.store.emptyNodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(2)].leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(4)].leadsToViableHead(4, 5))
|
||||
}
|
||||
|
||||
func TestNode_SetFullyValidated(t *testing.T) {
|
||||
@@ -195,25 +195,25 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[1] = f.store.nodeByRoot[blk.Root()]
|
||||
storeNodes[1] = f.store.emptyNodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[2] = f.store.nodeByRoot[blk.Root()]
|
||||
storeNodes[2] = f.store.emptyNodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[3] = f.store.nodeByRoot[blk.Root()]
|
||||
storeNodes[3] = f.store.emptyNodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[4] = f.store.nodeByRoot[blk.Root()]
|
||||
storeNodes[4] = f.store.emptyNodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[5] = f.store.nodeByRoot[blk.Root()]
|
||||
storeNodes[5] = f.store.emptyNodeByRoot[blk.Root()]
|
||||
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
@@ -223,7 +223,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
|
||||
require.NoError(t, f.store.emptyNodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
|
||||
|
||||
// block 5 should still be optimistic
|
||||
opt, err = f.IsOptimistic(indexToHash(5))
|
||||
|
||||
@@ -7,92 +7,130 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
|
||||
// setOptimisticToInvalid removes invalid nodes from forkchoice. It does NOT remove the empty node for the passed root.
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, lastValidHash [32]byte) ([][32]byte, error) {
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok {
|
||||
node, ok = s.nodeByRoot[parentRoot]
|
||||
if !ok || node == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
n := s.fullNodeByRoot[root]
|
||||
if n == nil {
|
||||
// The offending node with its payload is not in forkchoice. Try with the parent
|
||||
n = s.emptyNodeByRoot[parentRoot]
|
||||
if n == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find consensus parent")
|
||||
}
|
||||
// return early if the parent is LVH
|
||||
if node.payloadHash == lastValidHash {
|
||||
if n.node.payloadHash == lastValidHash {
|
||||
// The parent node must have been full and with a valid payload
|
||||
return invalidRoots, nil
|
||||
}
|
||||
} else {
|
||||
if node == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
if node.parent.root != parentRoot {
|
||||
return invalidRoots, errInvalidParentRoot
|
||||
if n.node.payloadHash == parentHash {
|
||||
// The parent was full and invalid
|
||||
n = s.fullNodeByRoot[parentRoot]
|
||||
if n == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent")
|
||||
}
|
||||
} else {
|
||||
// The parent is empty and we don't yet know if it's valid or not
|
||||
for n = n.node.parent; n != nil; n = n.node.parent {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
if n.node.payloadHash == lastValidHash {
|
||||
// The node built on empty and the whole chain was valid
|
||||
return invalidRoots, nil
|
||||
}
|
||||
if n.node.payloadHash == parentHash {
|
||||
// The parent was full and invalid
|
||||
break
|
||||
}
|
||||
}
|
||||
if n == nil {
|
||||
return nil, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent in ancestry")
|
||||
}
|
||||
}
|
||||
}
|
||||
firstInvalid := node
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
|
||||
// n points to a full node that has an invalid payload in forkchoice. We need to find the fist node in the chain that is actually invalid.
|
||||
fp := s.fullParent(n)
|
||||
for ; fp != nil && fp.node.payloadHash != lastValidHash; fp = s.fullParent(fp) {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
n = fp
|
||||
}
|
||||
// Deal with the case that the last valid payload is in a different fork
|
||||
// This means we are dealing with an EE that does not follow the spec
|
||||
if firstInvalid.parent == nil {
|
||||
if fp == nil {
|
||||
// return early if the invalid node was not imported
|
||||
if node.root == parentRoot {
|
||||
if n.node.root == parentRoot {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
firstInvalid = node
|
||||
}
|
||||
return s.removeNode(ctx, firstInvalid)
|
||||
return s.removeNode(ctx, n)
|
||||
}
|
||||
|
||||
// removeNode removes the node with the given root and all of its children
|
||||
// from the Fork Choice Store.
|
||||
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
|
||||
func (s *Store) removeNode(ctx context.Context, pn *PayloadNode) ([][32]byte, error) {
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
|
||||
if node == nil {
|
||||
if pn == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not remove node")
|
||||
}
|
||||
if !node.optimistic || node.parent == nil {
|
||||
if !pn.optimistic || pn.node.parent == nil {
|
||||
return invalidRoots, errInvalidOptimisticStatus
|
||||
}
|
||||
|
||||
children := node.parent.children
|
||||
children := pn.node.parent.children
|
||||
if len(children) == 1 {
|
||||
node.parent.children = []*Node{}
|
||||
pn.node.parent.children = []*Node{}
|
||||
} else {
|
||||
for i, n := range children {
|
||||
if n == node {
|
||||
if n == pn.node {
|
||||
if i != len(children)-1 {
|
||||
children[i] = children[len(children)-1]
|
||||
}
|
||||
node.parent.children = children[:len(children)-1]
|
||||
pn.node.parent.children = children[:len(children)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.removeNodeAndChildren(ctx, node, invalidRoots)
|
||||
return s.removeNodeAndChildren(ctx, pn, invalidRoots)
|
||||
}
|
||||
|
||||
// removeNodeAndChildren removes `node` and all of its descendant from the Store
|
||||
func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRoots [][32]byte) ([][32]byte, error) {
|
||||
func (s *Store) removeNodeAndChildren(ctx context.Context, pn *PayloadNode, invalidRoots [][32]byte) ([][32]byte, error) {
|
||||
var err error
|
||||
for _, child := range node.children {
|
||||
// If we are removing an empty node, then remove the full node as well if it exists.
|
||||
if !pn.full {
|
||||
fn, ok := s.fullNodeByRoot[pn.node.root]
|
||||
if ok {
|
||||
invalidRoots, err = s.removeNodeAndChildren(ctx, fn, invalidRoots)
|
||||
if err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Now we remove the full node's children.
|
||||
for _, child := range pn.children {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
if invalidRoots, err = s.removeNodeAndChildren(ctx, child, invalidRoots); err != nil {
|
||||
// We need to remove only the empty node here since the recursion will take care of the full one.
|
||||
en := s.emptyNodeByRoot[child.root]
|
||||
if invalidRoots, err = s.removeNodeAndChildren(ctx, en, invalidRoots); err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
}
|
||||
invalidRoots = append(invalidRoots, node.root)
|
||||
if node.root == s.proposerBoostRoot {
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
// Only append the root for the empty nodes.
|
||||
if pn.full {
|
||||
delete(s.fullNodeByRoot, pn.node.root)
|
||||
} else {
|
||||
invalidRoots = append(invalidRoots, pn.node.root)
|
||||
if pn.node.root == s.proposerBoostRoot {
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
}
|
||||
if pn.node.root == s.previousProposerBoostRoot {
|
||||
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
delete(s.emptyNodeByRoot, pn.node.root)
|
||||
}
|
||||
if node.root == s.previousProposerBoostRoot {
|
||||
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
delete(s.nodeByRoot, node.root)
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
@@ -274,7 +274,7 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
|
||||
|
||||
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
|
||||
require.Equal(t, 2, len(f.store.emptyNodeByRoot[[32]byte{'a'}].children))
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
s := f.store
|
||||
proposerScore := uint64(0)
|
||||
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
|
||||
previousNode, ok := s.emptyNodeByRoot[s.previousProposerBoostRoot]
|
||||
if !ok || previousNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
|
||||
} else {
|
||||
@@ -20,7 +20,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
}
|
||||
|
||||
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
|
||||
currentNode, ok := s.emptyNodeByRoot[s.proposerBoostRoot]
|
||||
if !ok || currentNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
|
||||
} else {
|
||||
|
||||
@@ -166,13 +166,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// (1: 48) -> (2: 38) -> (3: 10)
|
||||
// \--------------->(4: 18)
|
||||
//
|
||||
node1 := f.store.nodeByRoot[indexToHash(1)]
|
||||
node1 := f.store.emptyNodeByRoot[indexToHash(1)]
|
||||
require.Equal(t, node1.weight, uint64(48))
|
||||
node2 := f.store.nodeByRoot[indexToHash(2)]
|
||||
node2 := f.store.emptyNodeByRoot[indexToHash(2)]
|
||||
require.Equal(t, node2.weight, uint64(38))
|
||||
node3 := f.store.nodeByRoot[indexToHash(3)]
|
||||
node3 := f.store.emptyNodeByRoot[indexToHash(3)]
|
||||
require.Equal(t, node3.weight, uint64(10))
|
||||
node4 := f.store.nodeByRoot[indexToHash(4)]
|
||||
node4 := f.store.emptyNodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node4.weight, uint64(18))
|
||||
|
||||
// Regression: process attestations for C, check that it
|
||||
|
||||
@@ -34,22 +34,23 @@ const orphanLateBlockProposingEarly = 2
|
||||
func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
override = false
|
||||
|
||||
// We only need to override FCU if our current head is from the current
|
||||
// We only need to override FCU if our current consensusHead is from the current
|
||||
// slot. This differs from the spec implementation in that we assume
|
||||
// that we will call this function in the previous slot to proposing.
|
||||
head := f.store.headNode
|
||||
if head == nil {
|
||||
consensusHead := f.store.headNode
|
||||
if consensusHead == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if head.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
if consensusHead.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
return
|
||||
}
|
||||
|
||||
// Do not reorg on epoch boundaries
|
||||
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return
|
||||
}
|
||||
head := f.store.choosePayloadContent(consensusHead)
|
||||
// Only reorg blocks that arrive late
|
||||
early, err := head.arrivedEarly(f.store.genesisTime)
|
||||
if err != nil {
|
||||
@@ -61,15 +62,15 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
}
|
||||
// Only reorg if we have been finalizing
|
||||
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
|
||||
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return
|
||||
}
|
||||
// Only orphan a single block
|
||||
parent := head.parent
|
||||
parent := consensusHead.parent
|
||||
if parent == nil {
|
||||
return
|
||||
}
|
||||
if head.slot > parent.slot+1 {
|
||||
if consensusHead.slot > parent.node.slot+1 {
|
||||
return
|
||||
}
|
||||
// Do not orphan a block that has higher justification than the parent
|
||||
@@ -78,12 +79,12 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// }
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return
|
||||
}
|
||||
|
||||
// Return early if we are checking before 10 seconds into the slot
|
||||
sss, err := slots.SinceSlotStart(head.slot, f.store.genesisTime, time.Now())
|
||||
sss, err := slots.SinceSlotStart(consensusHead.slot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check current slot")
|
||||
return true
|
||||
@@ -106,60 +107,61 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// This function needs to be called only when proposing a block and all
|
||||
// attestation processing has already happened.
|
||||
func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
head := f.store.headNode
|
||||
if head == nil {
|
||||
consensusHead := f.store.headNode
|
||||
if consensusHead == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
// Only reorg blocks from the previous slot.
|
||||
currentSlot := slots.CurrentSlot(f.store.genesisTime)
|
||||
if head.slot+1 != currentSlot {
|
||||
return head.root
|
||||
if consensusHead.slot+1 != currentSlot {
|
||||
return consensusHead.root
|
||||
}
|
||||
// Do not reorg on epoch boundaries
|
||||
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return head.root
|
||||
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return consensusHead.root
|
||||
}
|
||||
// Only reorg blocks that arrive late
|
||||
head := f.store.choosePayloadContent(consensusHead)
|
||||
early, err := head.arrivedEarly(f.store.genesisTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if block arrived early")
|
||||
return head.root
|
||||
return consensusHead.root
|
||||
}
|
||||
if early {
|
||||
return head.root
|
||||
return consensusHead.root
|
||||
}
|
||||
// Only reorg if we have been finalizing
|
||||
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
|
||||
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return head.root
|
||||
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return consensusHead.root
|
||||
}
|
||||
// Only orphan a single block
|
||||
parent := head.parent
|
||||
parent := consensusHead.parent
|
||||
if parent == nil {
|
||||
return head.root
|
||||
return consensusHead.root
|
||||
}
|
||||
if head.slot > parent.slot+1 {
|
||||
return head.root
|
||||
if consensusHead.slot > parent.node.slot+1 {
|
||||
return consensusHead.root
|
||||
}
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return head.root
|
||||
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return consensusHead.root
|
||||
}
|
||||
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return head.root
|
||||
return consensusHead.root
|
||||
}
|
||||
|
||||
// Only reorg if we are proposing early
|
||||
sss, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if proposing early")
|
||||
return head.root
|
||||
return consensusHead.root
|
||||
}
|
||||
if sss >= orphanLateBlockProposingEarly*time.Second {
|
||||
return head.root
|
||||
return consensusHead.root
|
||||
}
|
||||
return parent.root
|
||||
return parent.node.root
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// head starts from justified root and then follows the best descendant links
|
||||
@@ -26,13 +27,16 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
}
|
||||
|
||||
// JustifiedRoot has to be known
|
||||
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
|
||||
if !ok || justifiedNode == nil {
|
||||
var jn *Node
|
||||
ej := s.emptyNodeByRoot[s.justifiedCheckpoint.Root]
|
||||
if ej != nil {
|
||||
jn = ej.node
|
||||
} else {
|
||||
// If the justifiedCheckpoint is from genesis, then the root is
|
||||
// zeroHash. In this case it should be the root of forkchoice
|
||||
// tree.
|
||||
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
justifiedNode = s.treeRootNode
|
||||
jn = s.treeRootNode
|
||||
} else {
|
||||
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
|
||||
}
|
||||
@@ -40,9 +44,9 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
// If the justified node doesn't have a best descendant,
|
||||
// the best node is itself.
|
||||
bestDescendant := justifiedNode.bestDescendant
|
||||
bestDescendant := jn.bestDescendant
|
||||
if bestDescendant == nil {
|
||||
bestDescendant = justifiedNode
|
||||
bestDescendant = jn
|
||||
}
|
||||
currentEpoch := slots.EpochsSinceGenesis(s.genesisTime)
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, currentEpoch) {
|
||||
@@ -66,29 +70,42 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
// It then updates the new node's parent with the best child and descendant node.
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
roblock consensus_blocks.ROBlock,
|
||||
justifiedEpoch, finalizedEpoch primitives.Epoch) (*Node, error) {
|
||||
justifiedEpoch, finalizedEpoch primitives.Epoch,
|
||||
) (*PayloadNode, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
root := roblock.Root()
|
||||
block := roblock.Block()
|
||||
slot := block.Slot()
|
||||
parentRoot := block.ParentRoot()
|
||||
var payloadHash [32]byte
|
||||
if block.Version() >= version.Bellatrix {
|
||||
execution, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(payloadHash[:], execution.BlockHash())
|
||||
}
|
||||
|
||||
// Return if the block has been inserted into Store before.
|
||||
if n, ok := s.nodeByRoot[root]; ok {
|
||||
if n, ok := s.emptyNodeByRoot[root]; ok {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
parent := s.nodeByRoot[parentRoot]
|
||||
block := roblock.Block()
|
||||
slot := block.Slot()
|
||||
var parent *PayloadNode
|
||||
var payloadHash = &[32]byte{}
|
||||
if block.Version() >= version.Gloas {
|
||||
if err := s.getNodeInformation(block, &parent, payloadHash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if block.Version() >= version.Bellatrix {
|
||||
execution, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(payloadHash[:], execution.BlockHash())
|
||||
}
|
||||
parentRoot := block.ParentRoot()
|
||||
en := s.fullNodeByRoot[parentRoot]
|
||||
parent = s.fullNodeByRoot[parentRoot]
|
||||
if parent == nil && en != nil {
|
||||
// pre-Gloas only full parents are allowed.
|
||||
return nil, errInvalidParentRoot
|
||||
}
|
||||
}
|
||||
|
||||
n := &Node{
|
||||
slot: slot,
|
||||
root: root,
|
||||
@@ -97,30 +114,47 @@ func (s *Store) insert(ctx context.Context,
|
||||
unrealizedJustifiedEpoch: justifiedEpoch,
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
optimistic: true,
|
||||
payloadHash: payloadHash,
|
||||
timestamp: time.Now(),
|
||||
payloadHash: *payloadHash,
|
||||
}
|
||||
|
||||
// Set the node's target checkpoint
|
||||
if slot%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
n.target = n
|
||||
} else if parent != nil {
|
||||
if slots.ToEpoch(slot) == slots.ToEpoch(parent.slot) {
|
||||
n.target = parent.target
|
||||
if slots.ToEpoch(slot) == slots.ToEpoch(parent.node.slot) {
|
||||
n.target = parent.node.target
|
||||
} else {
|
||||
n.target = parent
|
||||
n.target = parent.node
|
||||
}
|
||||
}
|
||||
var ret *PayloadNode
|
||||
// Make the empty node.It's optimistic status equals it's parent's status.
|
||||
pn := &PayloadNode{
|
||||
node: n,
|
||||
optimistic: n.parent.optimistic,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
s.emptyNodeByRoot[root] = pn
|
||||
ret = pn
|
||||
if block.Version() < version.Gloas {
|
||||
// Make also the full node, this is optimistic until the engine returns the execution payload validation.
|
||||
fn := &PayloadNode{
|
||||
node: n,
|
||||
optimistic: true,
|
||||
timestamp: time.Now(),
|
||||
full: true,
|
||||
}
|
||||
ret = fn
|
||||
s.fullNodeByRoot[root] = fn
|
||||
}
|
||||
|
||||
s.nodeByRoot[root] = n
|
||||
if parent == nil {
|
||||
if s.treeRootNode == nil {
|
||||
s.treeRootNode = n
|
||||
s.headNode = n
|
||||
s.highestReceivedNode = n
|
||||
} else {
|
||||
delete(s.nodeByRoot, root)
|
||||
delete(s.emptyNodeByRoot, root)
|
||||
delete(s.fullNodeByRoot, root)
|
||||
return nil, errInvalidParentRoot
|
||||
}
|
||||
} else {
|
||||
@@ -128,7 +162,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
// Apply proposer boost
|
||||
now := time.Now()
|
||||
if now.Before(s.genesisTime) {
|
||||
return n, nil
|
||||
return ret, nil
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, now)
|
||||
@@ -144,17 +178,16 @@ func (s *Store) insert(ctx context.Context,
|
||||
// Update best descendants
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
_, remErr := s.removeNode(ctx, n)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("could not remove node")
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not update best descendants")
|
||||
if err := s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": root,
|
||||
}).Error("Could not update best descendant")
|
||||
}
|
||||
}
|
||||
// Update metrics.
|
||||
processedBlockCount.Inc()
|
||||
nodeCount.Set(float64(len(s.nodeByRoot)))
|
||||
nodeCount.Set(float64(len(s.emptyNodeByRoot)))
|
||||
|
||||
// Only update received block slot if it's within epoch from current time.
|
||||
if slot+params.BeaconConfig().SlotsPerEpoch > slots.CurrentSlot(s.genesisTime) {
|
||||
@@ -165,10 +198,10 @@ func (s *Store) insert(ctx context.Context,
|
||||
s.highestReceivedNode = n
|
||||
}
|
||||
|
||||
return n, nil
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
|
||||
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` maps
|
||||
// starting from `node` down to the finalized Node or to a leaf of the Fork
|
||||
// choice store.
|
||||
func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalizedNode *Node) error {
|
||||
@@ -181,44 +214,51 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, child := range node.children {
|
||||
for _, child := range s.allConsensusChildren(node) {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
node.children = nil
|
||||
delete(s.nodeByRoot, node.root)
|
||||
en := s.emptyNodeByRoot[node.root]
|
||||
en.children = nil
|
||||
delete(s.emptyNodeByRoot, node.root)
|
||||
fn := s.fullNodeByRoot[node.root]
|
||||
if fn != nil {
|
||||
fn.children = nil
|
||||
delete(s.fullNodeByRoot, node.root)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// prune prunes the fork choice store. It removes all nodes that compete with the finalized root.
|
||||
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
|
||||
// TODO: GLOAS, to ensure that chains up to a full node are found, we may want to consider pruning only up to the latest full block that was finalized
|
||||
func (s *Store) prune(ctx context.Context) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
|
||||
defer span.End()
|
||||
|
||||
finalizedRoot := s.finalizedCheckpoint.Root
|
||||
finalizedEpoch := s.finalizedCheckpoint.Epoch
|
||||
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
|
||||
if !ok || finalizedNode == nil {
|
||||
fen, ok := s.emptyNodeByRoot[finalizedRoot]
|
||||
if !ok || fen == nil {
|
||||
return errors.WithMessage(errUnknownFinalizedRoot, fmt.Sprintf("%#x", finalizedRoot))
|
||||
}
|
||||
fn := fen.node
|
||||
// return early if we haven't changed the finalized checkpoint
|
||||
if finalizedNode.parent == nil {
|
||||
if fn.parent == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save the new finalized dependent root because it will be pruned
|
||||
s.finalizedDependentRoot = finalizedNode.parent.root
|
||||
s.finalizedDependentRoot = fn.parent.node.root
|
||||
|
||||
// Prune nodeByRoot starting from root
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
finalizedNode.parent = nil
|
||||
s.treeRootNode = finalizedNode
|
||||
fn.parent = nil
|
||||
s.treeRootNode = fn
|
||||
|
||||
prunedCount.Inc()
|
||||
// Prune all children of the finalized checkpoint block that are incompatible with it
|
||||
@@ -226,13 +266,13 @@ func (s *Store) prune(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute epoch start")
|
||||
}
|
||||
if finalizedNode.slot == checkpointMaxSlot {
|
||||
if fn.slot == checkpointMaxSlot {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, child := range finalizedNode.children {
|
||||
for _, child := range fen.children {
|
||||
if child != nil && child.slot <= checkpointMaxSlot {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, child, fn); err != nil {
|
||||
return errors.Wrap(err, "could not prune incompatible finalized child")
|
||||
}
|
||||
}
|
||||
@@ -246,10 +286,10 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
|
||||
var roots [][32]byte
|
||||
var slots []primitives.Slot
|
||||
|
||||
for root, node := range s.nodeByRoot {
|
||||
if len(node.children) == 0 {
|
||||
for root, n := range s.emptyNodeByRoot {
|
||||
if len(s.allConsensusChildren(n.node)) == 0 {
|
||||
roots = append(roots, root)
|
||||
slots = append(slots, node.slot)
|
||||
slots = append(slots, n.node.slot)
|
||||
}
|
||||
}
|
||||
return roots, slots
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestStore_NodeByRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
require.Equal(t, 3, f.NodeCount())
|
||||
for root, node := range f.store.nodeByRoot {
|
||||
for root, node := range f.store.emptyNodeByRoot {
|
||||
v, ok := expectedRoots[root]
|
||||
require.Equal(t, ok, true)
|
||||
require.Equal(t, v, node)
|
||||
@@ -130,14 +130,14 @@ func TestStore_Insert(t *testing.T) {
|
||||
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
s := &Store{emptyNodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
payloadHash := [32]byte{'a'}
|
||||
ctx := t.Context()
|
||||
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
|
||||
assert.Equal(t, 2, len(s.emptyNodeByRoot), "Did not insert block")
|
||||
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
|
||||
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
|
||||
assert.Equal(t, payloadHash, treeRootNode.children[0].payloadHash, "Incorrect payload hash")
|
||||
@@ -166,7 +166,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned.
|
||||
s.finalizedCheckpoint.Root = indexToHash(99)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
|
||||
assert.Equal(t, 1, len(s.emptyNodeByRoot), "Incorrect nodes count")
|
||||
}
|
||||
|
||||
func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
@@ -188,12 +188,12 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
// Finalized root is at index 11 so everything before 11 should be pruned.
|
||||
s.finalizedCheckpoint.Root = indexToHash(10)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
|
||||
assert.Equal(t, 90, len(s.emptyNodeByRoot), "Incorrect nodes count")
|
||||
|
||||
// One more time.
|
||||
s.finalizedCheckpoint.Root = indexToHash(20)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
|
||||
assert.Equal(t, 80, len(s.emptyNodeByRoot), "Incorrect nodes count")
|
||||
}
|
||||
|
||||
func TestStore_Prune_ReturnEarly(t *testing.T) {
|
||||
@@ -236,7 +236,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
s := f.store
|
||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
require.Equal(t, len(s.nodeByRoot), 1)
|
||||
require.Equal(t, len(s.emptyNodeByRoot), 1)
|
||||
}
|
||||
|
||||
// This test starts with the following branching diagram
|
||||
@@ -316,7 +316,7 @@ func TestStore_PruneMapsNodes(t *testing.T) {
|
||||
s := f.store
|
||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
require.Equal(t, len(s.nodeByRoot), 1)
|
||||
require.Equal(t, len(s.emptyNodeByRoot), 1)
|
||||
}
|
||||
|
||||
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
|
||||
@@ -21,23 +21,26 @@ type ForkChoice struct {
|
||||
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
|
||||
}
|
||||
|
||||
var _ forkchoice.ForkChoicer = (*ForkChoice)(nil)
|
||||
|
||||
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
|
||||
type Store struct {
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
|
||||
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
|
||||
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
|
||||
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
|
||||
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
|
||||
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
|
||||
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
emptyNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // nodes indexed by roots.
|
||||
fullNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // nodes indexed by payload Hash
|
||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
genesisTime time.Time
|
||||
highestReceivedNode *Node // The highest slot node.
|
||||
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.
|
||||
@@ -50,18 +53,27 @@ type Node struct {
|
||||
slot primitives.Slot // slot of the block converted to the node.
|
||||
root [fieldparams.RootLength]byte // root of the block converted to the node.
|
||||
payloadHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
|
||||
parent *Node // parent index of this node.
|
||||
parent *PayloadNode // parent index of this node.
|
||||
target *Node // target checkpoint for
|
||||
children []*Node // the list of direct children of this Node
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
|
||||
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
|
||||
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
|
||||
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
|
||||
balance uint64 // the balance that voted for this node directly
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
timestamp time.Time // The timestamp when the node was inserted.
|
||||
}
|
||||
|
||||
// PayloadNode defines a full Forkchoice node after the Gloas fork, with the payload status either empty of full
|
||||
type PayloadNode struct {
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
full bool // whether this node represents a payload present or not
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
balance uint64 // the balance that voted for this node directly
|
||||
bestDescendant *Node // bestDescendant node of this payload node.
|
||||
node *Node // the consensus part of this full forkchoice node
|
||||
timestamp time.Time // The timestamp when the node was inserted.
|
||||
children []*Node // the list of direct children of this Node
|
||||
}
|
||||
|
||||
// Vote defines an individual validator's vote.
|
||||
|
||||
@@ -15,33 +15,34 @@ import (
|
||||
)
|
||||
|
||||
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoch) error {
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
en, ok := s.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set unrealized justified epoch")
|
||||
}
|
||||
if epoch < node.unrealizedJustifiedEpoch {
|
||||
if epoch < en.node.unrealizedJustifiedEpoch {
|
||||
return errInvalidUnrealizedJustifiedEpoch
|
||||
}
|
||||
node.unrealizedJustifiedEpoch = epoch
|
||||
en.node.unrealizedJustifiedEpoch = epoch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoch) error {
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
en, ok := s.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set unrealized finalized epoch")
|
||||
}
|
||||
if epoch < node.unrealizedFinalizedEpoch {
|
||||
if epoch < en.node.unrealizedFinalizedEpoch {
|
||||
return errInvalidUnrealizedFinalizedEpoch
|
||||
}
|
||||
node.unrealizedFinalizedEpoch = epoch
|
||||
en.node.unrealizedFinalizedEpoch = epoch
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
|
||||
// epochs stored within nodes. It should be called at the beginning of each epoch.
|
||||
func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
|
||||
for _, node := range f.store.nodeByRoot {
|
||||
for _, en := range f.store.emptyNodeByRoot {
|
||||
node := en.node
|
||||
node.justifiedEpoch = node.unrealizedJustifiedEpoch
|
||||
node.finalizedEpoch = node.unrealizedFinalizedEpoch
|
||||
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
|
||||
@@ -62,16 +63,17 @@ func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Chec
|
||||
if node.parent == nil { // Nothing to do if the parent is nil.
|
||||
return jc, fc
|
||||
}
|
||||
pn := node.parent.node
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
|
||||
stateSlot := state.Slot()
|
||||
stateEpoch := slots.ToEpoch(stateSlot)
|
||||
currJustified := node.parent.unrealizedJustifiedEpoch == currentEpoch
|
||||
prevJustified := node.parent.unrealizedJustifiedEpoch+1 == currentEpoch
|
||||
currJustified := pn.unrealizedJustifiedEpoch == currentEpoch
|
||||
prevJustified := pn.unrealizedJustifiedEpoch+1 == currentEpoch
|
||||
tooEarlyForCurr := slots.SinceEpochStarts(stateSlot)*3 < params.BeaconConfig().SlotsPerEpoch*2
|
||||
// Exit early if it's justified or too early to be justified.
|
||||
if currJustified || (stateEpoch == currentEpoch && prevJustified && tooEarlyForCurr) {
|
||||
node.unrealizedJustifiedEpoch = node.parent.unrealizedJustifiedEpoch
|
||||
node.unrealizedFinalizedEpoch = node.parent.unrealizedFinalizedEpoch
|
||||
node.unrealizedJustifiedEpoch = pn.unrealizedJustifiedEpoch
|
||||
node.unrealizedFinalizedEpoch = pn.unrealizedFinalizedEpoch
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
|
||||
@@ -22,12 +22,12 @@ func TestStore_SetUnrealizedEpochs(t *testing.T) {
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
|
||||
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 2))
|
||||
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 2))
|
||||
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
|
||||
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
|
||||
|
||||
require.ErrorIs(t, errInvalidUnrealizedJustifiedEpoch, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 0))
|
||||
require.ErrorIs(t, errInvalidUnrealizedFinalizedEpoch, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 0))
|
||||
@@ -78,7 +78,7 @@ func TestStore_LongFork(t *testing.T) {
|
||||
// Add an attestation to c, it is head
|
||||
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'c'}, 1)
|
||||
f.justifiedBalances = []uint64{100}
|
||||
c := f.store.nodeByRoot[[32]byte{'c'}]
|
||||
c := f.store.emptyNodeByRoot[[32]byte{'c'}]
|
||||
require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.slot))
|
||||
driftGenesisTime(f, c.slot, 0)
|
||||
headRoot, err := f.Head(ctx)
|
||||
@@ -91,15 +91,15 @@ func TestStore_LongFork(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 2, Root: ha}))
|
||||
d := f.store.nodeByRoot[[32]byte{'d'}]
|
||||
d := f.store.emptyNodeByRoot[[32]byte{'d'}]
|
||||
require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.slot))
|
||||
driftGenesisTime(f, d.slot, 0)
|
||||
require.Equal(t, true, d.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.slot)))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, headRoot)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
|
||||
}
|
||||
|
||||
// Epoch 1 Epoch 2 Epoch 3
|
||||
@@ -243,8 +243,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'d'}, headRoot)
|
||||
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
|
||||
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
|
||||
driftGenesisTime(f, 99, 0)
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
|
||||
@@ -252,8 +252,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'h'}, headRoot)
|
||||
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
|
||||
}
|
||||
|
||||
func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
@@ -263,14 +263,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
st, root, err := prepareForkchoiceState(ctx, 65, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 66, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 66, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
|
||||
})
|
||||
|
||||
t.Run("Previous Epoch is justified and too early for current", func(tt *testing.T) {
|
||||
@@ -278,21 +278,21 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 96, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 96, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
|
||||
})
|
||||
t.Run("Previous Epoch is justified and not too early for current", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 127, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 127, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
@@ -302,14 +302,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
t.Run("Block from previous Epoch", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 94, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 96, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 95, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
@@ -319,7 +319,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
t.Run("Previous Epoch is not justified", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
@@ -335,6 +335,6 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -284,7 +284,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 9 10
|
||||
f.store.finalizedCheckpoint.Root = indexToHash(5)
|
||||
require.NoError(t, f.store.prune(t.Context()))
|
||||
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
|
||||
assert.Equal(t, 5, len(f.store.emptyNodeByRoot), "Incorrect nodes length after prune")
|
||||
// we pruned artificially the justified root.
|
||||
f.store.justifiedCheckpoint.Root = indexToHash(5)
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ type FastGetter interface {
|
||||
// Setter allows to set forkchoice information
|
||||
type Setter interface {
|
||||
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
|
||||
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
|
||||
SetOptimisticToInvalid(context.Context, [32]byte, [32]byte, [32]byte, [32]byte) ([][32]byte, error)
|
||||
UpdateJustifiedCheckpoint(context.Context, *forkchoicetypes.Checkpoint) error
|
||||
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
|
||||
SetGenesisTime(time.Time)
|
||||
|
||||
@@ -82,20 +82,20 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
|
||||
parentBlockRoot := h.ParentBlockRoot()
|
||||
blockHash := h.BlockHash()
|
||||
randao := h.PrevRandao()
|
||||
blobKzgCommitments := h.BlobKzgCommitments()
|
||||
blobKzgCommitmentsRoot := h.BlobKzgCommitmentsRoot()
|
||||
feeRecipient := h.FeeRecipient()
|
||||
b.latestExecutionPayloadBid = ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
ParentBlockRoot: parentBlockRoot[:],
|
||||
BlockHash: blockHash[:],
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: h.GasLimit(),
|
||||
BuilderIndex: h.BuilderIndex(),
|
||||
Slot: h.Slot(),
|
||||
Value: h.Value(),
|
||||
ExecutionPayment: h.ExecutionPayment(),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
FeeRecipient: feeRecipient[:],
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
ParentBlockRoot: parentBlockRoot[:],
|
||||
BlockHash: blockHash[:],
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: h.GasLimit(),
|
||||
BuilderIndex: h.BuilderIndex(),
|
||||
Slot: h.Slot(),
|
||||
Value: h.Value(),
|
||||
ExecutionPayment: h.ExecutionPayment(),
|
||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot[:],
|
||||
FeeRecipient: feeRecipient[:],
|
||||
}
|
||||
b.markFieldAsDirty(types.LatestExecutionPayloadBid)
|
||||
|
||||
|
||||
@@ -14,17 +14,17 @@ import (
|
||||
)
|
||||
|
||||
type testExecutionPayloadBid struct {
|
||||
parentBlockHash [32]byte
|
||||
parentBlockRoot [32]byte
|
||||
blockHash [32]byte
|
||||
prevRandao [32]byte
|
||||
blobKzgCommitments [][]byte
|
||||
feeRecipient [20]byte
|
||||
gasLimit uint64
|
||||
builderIndex primitives.BuilderIndex
|
||||
slot primitives.Slot
|
||||
value primitives.Gwei
|
||||
executionPayment primitives.Gwei
|
||||
parentBlockHash [32]byte
|
||||
parentBlockRoot [32]byte
|
||||
blockHash [32]byte
|
||||
prevRandao [32]byte
|
||||
blobKzgCommitmentsRoot [32]byte
|
||||
feeRecipient [20]byte
|
||||
gasLimit uint64
|
||||
builderIndex primitives.BuilderIndex
|
||||
slot primitives.Slot
|
||||
value primitives.Gwei
|
||||
executionPayment primitives.Gwei
|
||||
}
|
||||
|
||||
func (t testExecutionPayloadBid) ParentBlockHash() [32]byte { return t.parentBlockHash }
|
||||
@@ -40,12 +40,9 @@ func (t testExecutionPayloadBid) Value() primitives.Gwei { return t.value }
|
||||
func (t testExecutionPayloadBid) ExecutionPayment() primitives.Gwei {
|
||||
return t.executionPayment
|
||||
}
|
||||
func (t testExecutionPayloadBid) BlobKzgCommitments() [][]byte { return t.blobKzgCommitments }
|
||||
func (t testExecutionPayloadBid) BlobKzgCommitmentCount() uint64 {
|
||||
return uint64(len(t.blobKzgCommitments))
|
||||
}
|
||||
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
|
||||
func (t testExecutionPayloadBid) IsNil() bool { return false }
|
||||
func (t testExecutionPayloadBid) BlobKzgCommitmentsRoot() [32]byte { return t.blobKzgCommitmentsRoot }
|
||||
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
|
||||
func (t testExecutionPayloadBid) IsNil() bool { return false }
|
||||
|
||||
func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
@@ -60,7 +57,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
parentBlockRoot = [32]byte(bytes.Repeat([]byte{0xCD}, 32))
|
||||
blockHash = [32]byte(bytes.Repeat([]byte{0xEF}, 32))
|
||||
prevRandao = [32]byte(bytes.Repeat([]byte{0x11}, 32))
|
||||
blobCommitments = [][]byte{bytes.Repeat([]byte{0x22}, 48)}
|
||||
blobRoot = [32]byte(bytes.Repeat([]byte{0x22}, 32))
|
||||
feeRecipient [20]byte
|
||||
)
|
||||
copy(feeRecipient[:], bytes.Repeat([]byte{0x33}, len(feeRecipient)))
|
||||
@@ -69,17 +66,17 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
bid := testExecutionPayloadBid{
|
||||
parentBlockHash: parentBlockHash,
|
||||
parentBlockRoot: parentBlockRoot,
|
||||
blockHash: blockHash,
|
||||
prevRandao: prevRandao,
|
||||
blobKzgCommitments: blobCommitments,
|
||||
feeRecipient: feeRecipient,
|
||||
gasLimit: 123,
|
||||
builderIndex: 7,
|
||||
slot: 9,
|
||||
value: 11,
|
||||
executionPayment: 22,
|
||||
parentBlockHash: parentBlockHash,
|
||||
parentBlockRoot: parentBlockRoot,
|
||||
blockHash: blockHash,
|
||||
prevRandao: prevRandao,
|
||||
blobKzgCommitmentsRoot: blobRoot,
|
||||
feeRecipient: feeRecipient,
|
||||
gasLimit: 123,
|
||||
builderIndex: 7,
|
||||
slot: 9,
|
||||
value: 11,
|
||||
executionPayment: 22,
|
||||
}
|
||||
|
||||
require.NoError(t, st.SetExecutionPayloadBid(bid))
|
||||
@@ -89,7 +86,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
require.DeepEqual(t, parentBlockRoot[:], st.latestExecutionPayloadBid.ParentBlockRoot)
|
||||
require.DeepEqual(t, blockHash[:], st.latestExecutionPayloadBid.BlockHash)
|
||||
require.DeepEqual(t, prevRandao[:], st.latestExecutionPayloadBid.PrevRandao)
|
||||
require.DeepEqual(t, blobCommitments, st.latestExecutionPayloadBid.BlobKzgCommitments)
|
||||
require.DeepEqual(t, blobRoot[:], st.latestExecutionPayloadBid.BlobKzgCommitmentsRoot)
|
||||
require.DeepEqual(t, feeRecipient[:], st.latestExecutionPayloadBid.FeeRecipient)
|
||||
require.Equal(t, uint64(123), st.latestExecutionPayloadBid.GasLimit)
|
||||
require.Equal(t, primitives.BuilderIndex(7), st.latestExecutionPayloadBid.BuilderIndex)
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Moved blob KZG commitments into `ExecutionPayloadBid` and removed them from `ExecutionPayloadEnvelope` for Gloas.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Add read only wrapper for execution payload envelope for gloas
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"execution.go",
|
||||
"execution_payload_envelope.go",
|
||||
"factory.go",
|
||||
"get_payload.go",
|
||||
"getters.go",
|
||||
@@ -46,7 +45,6 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"execution_payload_envelope_test.go",
|
||||
"execution_test.go",
|
||||
"factory_test.go",
|
||||
"getters_test.go",
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
field_params "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type signedExecutionPayloadEnvelope struct {
|
||||
s *ethpb.SignedExecutionPayloadEnvelope
|
||||
}
|
||||
|
||||
type executionPayloadEnvelope struct {
|
||||
p *ethpb.ExecutionPayloadEnvelope
|
||||
}
|
||||
|
||||
// WrappedROSignedExecutionPayloadEnvelope wraps a signed execution payload envelope proto in a read-only interface.
|
||||
func WrappedROSignedExecutionPayloadEnvelope(s *ethpb.SignedExecutionPayloadEnvelope) (interfaces.ROSignedExecutionPayloadEnvelope, error) {
|
||||
w := signedExecutionPayloadEnvelope{s: s}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// WrappedROExecutionPayloadEnvelope wraps an execution payload envelope proto in a read-only interface.
|
||||
func WrappedROExecutionPayloadEnvelope(p *ethpb.ExecutionPayloadEnvelope) (interfaces.ROExecutionPayloadEnvelope, error) {
|
||||
w := &executionPayloadEnvelope{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Envelope returns the execution payload envelope as a read-only interface.
|
||||
func (s signedExecutionPayloadEnvelope) Envelope() (interfaces.ROExecutionPayloadEnvelope, error) {
|
||||
return WrappedROExecutionPayloadEnvelope(s.s.Message)
|
||||
}
|
||||
|
||||
// Signature returns the BLS signature as a 96-byte array.
|
||||
func (s signedExecutionPayloadEnvelope) Signature() [field_params.BLSSignatureLength]byte {
|
||||
return [field_params.BLSSignatureLength]byte(s.s.Signature)
|
||||
}
|
||||
|
||||
// IsNil reports whether the signed envelope or its contents are invalid.
|
||||
func (s signedExecutionPayloadEnvelope) IsNil() bool {
|
||||
if s.s == nil {
|
||||
return true
|
||||
}
|
||||
if len(s.s.Signature) != field_params.BLSSignatureLength {
|
||||
return true
|
||||
}
|
||||
w := executionPayloadEnvelope{p: s.s.Message}
|
||||
return w.IsNil()
|
||||
}
|
||||
|
||||
// SigningRoot computes the signing root for the signed envelope with the provided domain.
|
||||
func (s signedExecutionPayloadEnvelope) SigningRoot(domain []byte) (root [32]byte, err error) {
|
||||
return signing.ComputeSigningRoot(s.s.Message, domain)
|
||||
}
|
||||
|
||||
// Proto returns the underlying protobuf message.
|
||||
func (s signedExecutionPayloadEnvelope) Proto() proto.Message {
|
||||
return s.s
|
||||
}
|
||||
|
||||
// IsNil reports whether the envelope or its required fields are invalid.
|
||||
func (p *executionPayloadEnvelope) IsNil() bool {
|
||||
if p.p == nil {
|
||||
return true
|
||||
}
|
||||
if p.p.Payload == nil {
|
||||
return true
|
||||
}
|
||||
if len(p.p.BeaconBlockRoot) != field_params.RootLength {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsBlinded reports whether the envelope contains a blinded payload.
|
||||
func (p *executionPayloadEnvelope) IsBlinded() bool {
|
||||
return !p.IsNil() && p.p.Payload == nil
|
||||
}
|
||||
|
||||
// Execution returns the execution payload as a read-only interface.
|
||||
func (p *executionPayloadEnvelope) Execution() (interfaces.ExecutionData, error) {
|
||||
return WrappedExecutionPayloadDeneb(p.p.Payload)
|
||||
}
|
||||
|
||||
// ExecutionRequests returns the execution requests attached to the envelope.
|
||||
func (p *executionPayloadEnvelope) ExecutionRequests() *enginev1.ExecutionRequests {
|
||||
return p.p.ExecutionRequests
|
||||
}
|
||||
|
||||
// BuilderIndex returns the proposer/builder index for the envelope.
|
||||
func (p *executionPayloadEnvelope) BuilderIndex() primitives.BuilderIndex {
|
||||
return p.p.BuilderIndex
|
||||
}
|
||||
|
||||
// BeaconBlockRoot returns the beacon block root referenced by the envelope.
|
||||
func (p *executionPayloadEnvelope) BeaconBlockRoot() [field_params.RootLength]byte {
|
||||
return [field_params.RootLength]byte(p.p.BeaconBlockRoot)
|
||||
}
|
||||
|
||||
// Slot returns the slot of the envelope.
|
||||
func (p *executionPayloadEnvelope) Slot() primitives.Slot {
|
||||
return p.p.Slot
|
||||
}
|
||||
|
||||
// StateRoot returns the state root carried by the envelope.
|
||||
func (p *executionPayloadEnvelope) StateRoot() [field_params.RootLength]byte {
|
||||
return [field_params.RootLength]byte(p.p.StateRoot)
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func validExecutionPayloadEnvelope() *ethpb.ExecutionPayloadEnvelope {
|
||||
payload := &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
|
||||
StateRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||
ReceiptsRoot: bytes.Repeat([]byte{0x04}, 32),
|
||||
LogsBloom: bytes.Repeat([]byte{0x05}, 256),
|
||||
PrevRandao: bytes.Repeat([]byte{0x06}, 32),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 2,
|
||||
GasUsed: 3,
|
||||
Timestamp: 4,
|
||||
BaseFeePerGas: bytes.Repeat([]byte{0x07}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x08}, 32),
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*enginev1.Withdrawal{},
|
||||
BlobGasUsed: 0,
|
||||
ExcessBlobGas: 0,
|
||||
}
|
||||
|
||||
return ðpb.ExecutionPayloadEnvelope{
|
||||
Payload: payload,
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||
BuilderIndex: 10,
|
||||
BeaconBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
Slot: 9,
|
||||
StateRoot: bytes.Repeat([]byte{0xBB}, 32),
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrappedROExecutionPayloadEnvelope(t *testing.T) {
|
||||
t.Run("returns error on nil payload", func(t *testing.T) {
|
||||
invalid := validExecutionPayloadEnvelope()
|
||||
invalid.Payload = nil
|
||||
_, err := blocks.WrappedROExecutionPayloadEnvelope(invalid)
|
||||
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
|
||||
})
|
||||
|
||||
t.Run("returns error on invalid beacon root length", func(t *testing.T) {
|
||||
invalid := validExecutionPayloadEnvelope()
|
||||
invalid.BeaconBlockRoot = []byte{0x01}
|
||||
_, err := blocks.WrappedROExecutionPayloadEnvelope(invalid)
|
||||
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
|
||||
})
|
||||
|
||||
t.Run("wraps and exposes fields", func(t *testing.T) {
|
||||
env := validExecutionPayloadEnvelope()
|
||||
wrapped, err := blocks.WrappedROExecutionPayloadEnvelope(env)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, primitives.BuilderIndex(10), wrapped.BuilderIndex())
|
||||
require.Equal(t, primitives.Slot(9), wrapped.Slot())
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xAA}, 32)), wrapped.BeaconBlockRoot())
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xBB}, 32)), wrapped.StateRoot())
|
||||
|
||||
reqs := wrapped.ExecutionRequests()
|
||||
require.NotNil(t, reqs)
|
||||
|
||||
exec, err := wrapped.Execution()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, env.Payload.ParentHash, exec.ParentHash())
|
||||
|
||||
require.Equal(t, false, wrapped.IsBlinded())
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrappedROSignedExecutionPayloadEnvelope(t *testing.T) {
|
||||
t.Run("returns error for invalid signature length", func(t *testing.T) {
|
||||
signed := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: validExecutionPayloadEnvelope(),
|
||||
Signature: bytes.Repeat([]byte{0xAA}, 95),
|
||||
}
|
||||
_, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signed)
|
||||
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
|
||||
})
|
||||
|
||||
t.Run("returns error on nil envelope", func(t *testing.T) {
|
||||
_, err := blocks.WrappedROSignedExecutionPayloadEnvelope(nil)
|
||||
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
|
||||
})
|
||||
|
||||
t.Run("wraps and provides envelope/signing data", func(t *testing.T) {
|
||||
sig := bytes.Repeat([]byte{0xAB}, 96)
|
||||
signed := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: validExecutionPayloadEnvelope(),
|
||||
Signature: sig,
|
||||
}
|
||||
|
||||
wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signed)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotSig := wrapped.Signature()
|
||||
assert.DeepEqual(t, [96]byte(sig), gotSig)
|
||||
|
||||
env, err := wrapped.Envelope()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xAA}, 32)), env.BeaconBlockRoot())
|
||||
|
||||
domain := bytes.Repeat([]byte{0xCC}, 32)
|
||||
wantRoot, err := signing.ComputeSigningRoot(signed.Message, domain)
|
||||
require.NoError(t, err)
|
||||
gotRoot, err := wrapped.SigningRoot(domain)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wantRoot, gotRoot)
|
||||
|
||||
require.Equal(t, signed, wrapped.Proto())
|
||||
})
|
||||
}
|
||||
@@ -671,7 +671,7 @@ func hydrateBeaconBlockBodyGloas() *eth.BeaconBlockBodyGloas {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, fieldparams.BLSPubkeyLength)},
|
||||
BlobKzgCommitmentsRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -44,16 +43,11 @@ func (h executionPayloadBidGloas) IsNil() bool {
|
||||
len(h.payload.ParentBlockRoot) != 32 ||
|
||||
len(h.payload.BlockHash) != 32 ||
|
||||
len(h.payload.PrevRandao) != 32 ||
|
||||
len(h.payload.BlobKzgCommitmentsRoot) != 32 ||
|
||||
len(h.payload.FeeRecipient) != 20 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, commitment := range h.payload.BlobKzgCommitments {
|
||||
if len(commitment) != 48 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -137,14 +131,9 @@ func (h executionPayloadBidGloas) ExecutionPayment() primitives.Gwei {
|
||||
return primitives.Gwei(h.payload.ExecutionPayment)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments returns the KZG commitments for blobs.
|
||||
func (h executionPayloadBidGloas) BlobKzgCommitments() [][]byte {
|
||||
return bytesutil.SafeCopy2dBytes(h.payload.BlobKzgCommitments)
|
||||
}
|
||||
|
||||
// BlobKzgCommitmentCount returns the number of blob KZG commitments.
|
||||
func (h executionPayloadBidGloas) BlobKzgCommitmentCount() uint64 {
|
||||
return uint64(len(h.payload.BlobKzgCommitments))
|
||||
// BlobKzgCommitmentsRoot returns the root of the KZG commitments for blobs.
|
||||
func (h executionPayloadBidGloas) BlobKzgCommitmentsRoot() [32]byte {
|
||||
return [32]byte(h.payload.BlobKzgCommitmentsRoot)
|
||||
}
|
||||
|
||||
// FeeRecipient returns the execution address that will receive the builder payment.
|
||||
|
||||
@@ -15,17 +15,17 @@ import (
|
||||
|
||||
func validExecutionPayloadBid() *ethpb.ExecutionPayloadBid {
|
||||
return ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
GasLimit: 123,
|
||||
BuilderIndex: 5,
|
||||
Slot: 6,
|
||||
Value: 7,
|
||||
ExecutionPayment: 8,
|
||||
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x05}, 48)},
|
||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
GasLimit: 123,
|
||||
BuilderIndex: 5,
|
||||
Slot: 6,
|
||||
Value: 7,
|
||||
ExecutionPayment: 8,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,8 +52,8 @@ func TestWrappedROExecutionPayloadBid(t *testing.T) {
|
||||
mutate: func(b *ethpb.ExecutionPayloadBid) { b.PrevRandao = []byte{0x04} },
|
||||
},
|
||||
{
|
||||
name: "blob kzg commitments length",
|
||||
mutate: func(b *ethpb.ExecutionPayloadBid) { b.BlobKzgCommitments = [][]byte{[]byte{0x05}} },
|
||||
name: "blob kzg commitments root",
|
||||
mutate: func(b *ethpb.ExecutionPayloadBid) { b.BlobKzgCommitmentsRoot = []byte{0x05} },
|
||||
},
|
||||
{
|
||||
name: "fee recipient",
|
||||
@@ -85,8 +85,7 @@ func TestWrappedROExecutionPayloadBid(t *testing.T) {
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x02}, 32)), wrapped.ParentBlockRoot())
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x03}, 32)), wrapped.BlockHash())
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x04}, 32)), wrapped.PrevRandao())
|
||||
assert.DeepEqual(t, [][]byte{bytes.Repeat([]byte{0x05}, 48)}, wrapped.BlobKzgCommitments())
|
||||
require.Equal(t, uint64(1), wrapped.BlobKzgCommitmentCount())
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x05}, 32)), wrapped.BlobKzgCommitmentsRoot())
|
||||
assert.DeepEqual(t, [20]byte(bytes.Repeat([]byte{0x06}, 20)), wrapped.FeeRecipient())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"beacon_block.go",
|
||||
"error.go",
|
||||
"execution_payload_envelope.go",
|
||||
"light_client.go",
|
||||
"signed_execution_payload_bid.go",
|
||||
"utils.go",
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package interfaces
|
||||
|
||||
import (
|
||||
field_params "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type ROSignedExecutionPayloadEnvelope interface {
|
||||
Envelope() (ROExecutionPayloadEnvelope, error)
|
||||
Signature() [field_params.BLSSignatureLength]byte
|
||||
SigningRoot([]byte) ([32]byte, error)
|
||||
IsNil() bool
|
||||
Proto() proto.Message
|
||||
}
|
||||
|
||||
type ROExecutionPayloadEnvelope interface {
|
||||
Execution() (ExecutionData, error)
|
||||
ExecutionRequests() *enginev1.ExecutionRequests
|
||||
BuilderIndex() primitives.BuilderIndex
|
||||
BeaconBlockRoot() [field_params.RootLength]byte
|
||||
Slot() primitives.Slot
|
||||
StateRoot() [field_params.RootLength]byte
|
||||
IsBlinded() bool
|
||||
IsNil() bool
|
||||
}
|
||||
@@ -22,8 +22,7 @@ type ROExecutionPayloadBid interface {
|
||||
Slot() primitives.Slot
|
||||
Value() primitives.Gwei
|
||||
ExecutionPayment() primitives.Gwei
|
||||
BlobKzgCommitments() [][]byte
|
||||
BlobKzgCommitmentCount() uint64
|
||||
BlobKzgCommitmentsRoot() [32]byte
|
||||
FeeRecipient() [20]byte
|
||||
IsNil() bool
|
||||
}
|
||||
|
||||
288
proto/engine/v1/gloas.pb.go
generated
Executable file
288
proto/engine/v1/gloas.pb.go
generated
Executable file
@@ -0,0 +1,288 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.3
|
||||
// protoc v3.21.7
|
||||
// source: proto/engine/v1/gloas.proto
|
||||
|
||||
package enginev1
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
github_com_OffchainLabs_prysm_v6_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type ExecutionPayloadEnvelope struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Payload *ExecutionPayloadDeneb `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
ExecutionRequests *ExecutionRequests `protobuf:"bytes,2,opt,name=execution_requests,json=executionRequests,proto3" json:"execution_requests,omitempty"`
|
||||
BuilderIndex github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex `protobuf:"varint,3,opt,name=builder_index,json=builderIndex,proto3" json:"builder_index,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ValidatorIndex"`
|
||||
BeaconBlockRoot []byte `protobuf:"bytes,4,opt,name=beacon_block_root,json=beaconBlockRoot,proto3" json:"beacon_block_root,omitempty" ssz-size:"32"`
|
||||
Slot github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot `protobuf:"varint,5,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"`
|
||||
BlobKzgCommitments [][]byte `protobuf:"bytes,6,rep,name=blob_kzg_commitments,json=blobKzgCommitments,proto3" json:"blob_kzg_commitments,omitempty" ssz-max:"4096" ssz-size:"?,48"`
|
||||
StateRoot []byte `protobuf:"bytes,7,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty" ssz-size:"32"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExecutionPayloadEnvelope) Reset() {
|
||||
*x = ExecutionPayloadEnvelope{}
|
||||
mi := &file_proto_engine_v1_gloas_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ExecutionPayloadEnvelope) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExecutionPayloadEnvelope) ProtoMessage() {}
|
||||
|
||||
func (x *ExecutionPayloadEnvelope) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_engine_v1_gloas_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExecutionPayloadEnvelope.ProtoReflect.Descriptor instead.
|
||||
func (*ExecutionPayloadEnvelope) Descriptor() ([]byte, []int) {
|
||||
return file_proto_engine_v1_gloas_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *ExecutionPayloadEnvelope) GetPayload() *ExecutionPayloadDeneb {
|
||||
if x != nil {
|
||||
return x.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecutionPayloadEnvelope) GetExecutionRequests() *ExecutionRequests {
|
||||
if x != nil {
|
||||
return x.ExecutionRequests
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecutionPayloadEnvelope) GetBuilderIndex() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex {
|
||||
if x != nil {
|
||||
return x.BuilderIndex
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex(0)
|
||||
}
|
||||
|
||||
func (x *ExecutionPayloadEnvelope) GetBeaconBlockRoot() []byte {
|
||||
if x != nil {
|
||||
return x.BeaconBlockRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecutionPayloadEnvelope) GetSlot() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot {
|
||||
if x != nil {
|
||||
return x.Slot
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot(0)
|
||||
}
|
||||
|
||||
func (x *ExecutionPayloadEnvelope) GetBlobKzgCommitments() [][]byte {
|
||||
if x != nil {
|
||||
return x.BlobKzgCommitments
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecutionPayloadEnvelope) GetStateRoot() []byte {
|
||||
if x != nil {
|
||||
return x.StateRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SignedExecutionPayloadEnvelope struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Message *ExecutionPayloadEnvelope `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty" ssz-size:"96"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SignedExecutionPayloadEnvelope) Reset() {
|
||||
*x = SignedExecutionPayloadEnvelope{}
|
||||
mi := &file_proto_engine_v1_gloas_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SignedExecutionPayloadEnvelope) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SignedExecutionPayloadEnvelope) ProtoMessage() {}
|
||||
|
||||
func (x *SignedExecutionPayloadEnvelope) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_engine_v1_gloas_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SignedExecutionPayloadEnvelope.ProtoReflect.Descriptor instead.
|
||||
func (*SignedExecutionPayloadEnvelope) Descriptor() ([]byte, []int) {
|
||||
return file_proto_engine_v1_gloas_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *SignedExecutionPayloadEnvelope) GetMessage() *ExecutionPayloadEnvelope {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SignedExecutionPayloadEnvelope) GetSignature() []byte {
|
||||
if x != nil {
|
||||
return x.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_proto_engine_v1_gloas_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_engine_v1_gloas_proto_rawDesc = []byte{
|
||||
0x0a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x67, 0x6c, 0x6f, 0x61, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x65,
|
||||
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x1a, 0x26, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f,
|
||||
0x76, 0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x67,
|
||||
0x69, 0x6e, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x6c, 0x65, 0x63, 0x74,
|
||||
0x72, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
|
||||
0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x04, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f,
|
||||
0x70, 0x65, 0x12, 0x43, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
|
||||
0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x65, 0x6e, 0x65, 0x62, 0x52, 0x07,
|
||||
0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x54, 0x0a, 0x12, 0x65, 0x78, 0x65, 0x63, 0x75,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
|
||||
0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63,
|
||||
0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x73, 0x0a,
|
||||
0x0d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x04, 0x42, 0x4e, 0x82, 0xb5, 0x18, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62,
|
||||
0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69,
|
||||
0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49,
|
||||
0x6e, 0x64, 0x65, 0x78, 0x52, 0x0c, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x64,
|
||||
0x65, 0x78, 0x12, 0x32, 0x0a, 0x11, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a,
|
||||
0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x05,
|
||||
0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62,
|
||||
0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69,
|
||||
0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74,
|
||||
0x12, 0x42, 0x0a, 0x14, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d,
|
||||
0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10,
|
||||
0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36,
|
||||
0x52, 0x12, 0x62, 0x6c, 0x6f, 0x62, 0x4b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d,
|
||||
0x65, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f,
|
||||
0x6f, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32,
|
||||
0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x1e,
|
||||
0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50,
|
||||
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x46,
|
||||
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61,
|
||||
0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x52, 0x07, 0x6d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
|
||||
0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39,
|
||||
0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x3b, 0x5a, 0x39,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68,
|
||||
0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31,
|
||||
0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_proto_engine_v1_gloas_proto_rawDescOnce sync.Once
|
||||
file_proto_engine_v1_gloas_proto_rawDescData = file_proto_engine_v1_gloas_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_proto_engine_v1_gloas_proto_rawDescGZIP() []byte {
|
||||
file_proto_engine_v1_gloas_proto_rawDescOnce.Do(func() {
|
||||
file_proto_engine_v1_gloas_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_engine_v1_gloas_proto_rawDescData)
|
||||
})
|
||||
return file_proto_engine_v1_gloas_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_engine_v1_gloas_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_proto_engine_v1_gloas_proto_goTypes = []any{
|
||||
(*ExecutionPayloadEnvelope)(nil), // 0: ethereum.engine.v1.ExecutionPayloadEnvelope
|
||||
(*SignedExecutionPayloadEnvelope)(nil), // 1: ethereum.engine.v1.SignedExecutionPayloadEnvelope
|
||||
(*ExecutionPayloadDeneb)(nil), // 2: ethereum.engine.v1.ExecutionPayloadDeneb
|
||||
(*ExecutionRequests)(nil), // 3: ethereum.engine.v1.ExecutionRequests
|
||||
}
|
||||
var file_proto_engine_v1_gloas_proto_depIdxs = []int32{
|
||||
2, // 0: ethereum.engine.v1.ExecutionPayloadEnvelope.payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
|
||||
3, // 1: ethereum.engine.v1.ExecutionPayloadEnvelope.execution_requests:type_name -> ethereum.engine.v1.ExecutionRequests
|
||||
0, // 2: ethereum.engine.v1.SignedExecutionPayloadEnvelope.message:type_name -> ethereum.engine.v1.ExecutionPayloadEnvelope
|
||||
3, // [3:3] is the sub-list for method output_type
|
||||
3, // [3:3] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
3, // [3:3] is the sub-list for extension extendee
|
||||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_engine_v1_gloas_proto_init() }
|
||||
func file_proto_engine_v1_gloas_proto_init() {
|
||||
if File_proto_engine_v1_gloas_proto != nil {
|
||||
return
|
||||
}
|
||||
file_proto_engine_v1_execution_engine_proto_init()
|
||||
file_proto_engine_v1_electra_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_engine_v1_gloas_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_proto_engine_v1_gloas_proto_goTypes,
|
||||
DependencyIndexes: file_proto_engine_v1_gloas_proto_depIdxs,
|
||||
MessageInfos: file_proto_engine_v1_gloas_proto_msgTypes,
|
||||
}.Build()
|
||||
File_proto_engine_v1_gloas_proto = out.File
|
||||
file_proto_engine_v1_gloas_proto_rawDesc = nil
|
||||
file_proto_engine_v1_gloas_proto_goTypes = nil
|
||||
file_proto_engine_v1_gloas_proto_depIdxs = nil
|
||||
}
|
||||
@@ -144,17 +144,15 @@ func copySignedExecutionPayloadBid(header *SignedExecutionPayloadBid) *SignedExe
|
||||
}
|
||||
if header.Message != nil {
|
||||
copied.Message = &ExecutionPayloadBid{
|
||||
ParentBlockHash: bytesutil.SafeCopyBytes(header.Message.ParentBlockHash),
|
||||
ParentBlockRoot: bytesutil.SafeCopyBytes(header.Message.ParentBlockRoot),
|
||||
BlockHash: bytesutil.SafeCopyBytes(header.Message.BlockHash),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(header.Message.PrevRandao),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(header.Message.FeeRecipient),
|
||||
GasLimit: header.Message.GasLimit,
|
||||
BuilderIndex: header.Message.BuilderIndex,
|
||||
Slot: header.Message.Slot,
|
||||
Value: header.Message.Value,
|
||||
ExecutionPayment: header.Message.ExecutionPayment,
|
||||
BlobKzgCommitments: bytesutil.SafeCopy2dBytes(header.Message.BlobKzgCommitments),
|
||||
ParentBlockHash: bytesutil.SafeCopyBytes(header.Message.ParentBlockHash),
|
||||
ParentBlockRoot: bytesutil.SafeCopyBytes(header.Message.ParentBlockRoot),
|
||||
BlockHash: bytesutil.SafeCopyBytes(header.Message.BlockHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(header.Message.FeeRecipient),
|
||||
GasLimit: header.Message.GasLimit,
|
||||
BuilderIndex: header.Message.BuilderIndex,
|
||||
Slot: header.Message.Slot,
|
||||
Value: header.Message.Value,
|
||||
BlobKzgCommitmentsRoot: bytesutil.SafeCopyBytes(header.Message.BlobKzgCommitmentsRoot),
|
||||
}
|
||||
}
|
||||
return copied
|
||||
|
||||
@@ -1215,16 +1215,15 @@ func genSignedExecutionPayloadBidGloas() *v1alpha1.SignedExecutionPayloadBid {
|
||||
|
||||
func genExecutionPayloadBidGloas() *v1alpha1.ExecutionPayloadBid {
|
||||
return &v1alpha1.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes(32),
|
||||
ParentBlockRoot: bytes(32),
|
||||
BlockHash: bytes(32),
|
||||
FeeRecipient: bytes(20),
|
||||
GasLimit: rand.Uint64(),
|
||||
BuilderIndex: primitives.BuilderIndex(rand.Uint64()),
|
||||
Slot: primitives.Slot(rand.Uint64()),
|
||||
Value: primitives.Gwei(rand.Uint64()),
|
||||
ExecutionPayment: primitives.Gwei(rand.Uint64()),
|
||||
BlobKzgCommitments: [][]byte{bytes(48)},
|
||||
ParentBlockHash: bytes(32),
|
||||
ParentBlockRoot: bytes(32),
|
||||
BlockHash: bytes(32),
|
||||
FeeRecipient: bytes(20),
|
||||
GasLimit: rand.Uint64(),
|
||||
BuilderIndex: primitives.BuilderIndex(rand.Uint64()),
|
||||
Slot: primitives.Slot(rand.Uint64()),
|
||||
Value: primitives.Gwei(rand.Uint64()),
|
||||
BlobKzgCommitmentsRoot: bytes(32),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,17 +10,17 @@ func (header *ExecutionPayloadBid) Copy() *ExecutionPayloadBid {
|
||||
return nil
|
||||
}
|
||||
return &ExecutionPayloadBid{
|
||||
ParentBlockHash: bytesutil.SafeCopyBytes(header.ParentBlockHash),
|
||||
ParentBlockRoot: bytesutil.SafeCopyBytes(header.ParentBlockRoot),
|
||||
BlockHash: bytesutil.SafeCopyBytes(header.BlockHash),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(header.PrevRandao),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(header.FeeRecipient),
|
||||
GasLimit: header.GasLimit,
|
||||
BuilderIndex: header.BuilderIndex,
|
||||
Slot: header.Slot,
|
||||
Value: header.Value,
|
||||
ExecutionPayment: header.ExecutionPayment,
|
||||
BlobKzgCommitments: bytesutil.SafeCopy2dBytes(header.BlobKzgCommitments),
|
||||
ParentBlockHash: bytesutil.SafeCopyBytes(header.ParentBlockHash),
|
||||
ParentBlockRoot: bytesutil.SafeCopyBytes(header.ParentBlockRoot),
|
||||
BlockHash: bytesutil.SafeCopyBytes(header.BlockHash),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(header.PrevRandao),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(header.FeeRecipient),
|
||||
GasLimit: header.GasLimit,
|
||||
BuilderIndex: header.BuilderIndex,
|
||||
Slot: header.Slot,
|
||||
Value: header.Value,
|
||||
ExecutionPayment: header.ExecutionPayment,
|
||||
BlobKzgCommitmentsRoot: bytesutil.SafeCopyBytes(header.BlobKzgCommitmentsRoot),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1089
proto/prysm/v1alpha1/gloas.pb.go
generated
1089
proto/prysm/v1alpha1/gloas.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -33,7 +33,7 @@ option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
|
||||
// slot: Slot
|
||||
// value: Gwei
|
||||
// execution_payment: Gwei
|
||||
// blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
// blob_kzg_commitments_root: Root
|
||||
message ExecutionPayloadBid {
|
||||
bytes parent_block_hash = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes parent_block_root = 2 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
@@ -56,10 +56,7 @@ message ExecutionPayloadBid {
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Gwei"
|
||||
];
|
||||
repeated bytes blob_kzg_commitments = 11 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,48",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
];
|
||||
bytes blob_kzg_commitments_root = 11 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadBid wraps an execution payload bid with a signature.
|
||||
@@ -369,6 +366,7 @@ message BuilderPendingWithdrawal {
|
||||
// class DataColumnSidecar(Container):
|
||||
// index: ColumnIndex
|
||||
// column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
// kzg_commitents: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
// kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
// slot: Slot
|
||||
// beacon_block_root: Root
|
||||
@@ -378,6 +376,10 @@ message DataColumnSidecarGloas {
|
||||
(ethereum.eth.ext.ssz_size) = "?,bytes_per_cell.size",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
];
|
||||
repeated bytes kzg_commitments = 3 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,48",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
];
|
||||
repeated bytes kzg_proofs = 4 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,48",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
@@ -400,6 +402,7 @@ message DataColumnSidecarGloas {
|
||||
// builder_index: BuilderIndex
|
||||
// beacon_block_root: Root
|
||||
// slot: Slot
|
||||
// blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
// state_root: Root
|
||||
message ExecutionPayloadEnvelope {
|
||||
ethereum.engine.v1.ExecutionPayloadDeneb payload = 1;
|
||||
@@ -412,7 +415,11 @@ message ExecutionPayloadEnvelope {
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
|
||||
];
|
||||
bytes state_root = 6 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
repeated bytes blob_kzg_commitments = 6 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,48",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
];
|
||||
bytes state_root = 7 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadEnvelope wraps an execution payload envelope with a signature.
|
||||
|
||||
@@ -15,7 +15,6 @@ func (e *ExecutionPayloadBid) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the ExecutionPayloadBid object to a target array
|
||||
func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(192)
|
||||
|
||||
// Field (0) 'ParentBlockHash'
|
||||
if size := len(e.ParentBlockHash); size != 32 {
|
||||
@@ -67,22 +66,12 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
// Field (9) 'ExecutionPayment'
|
||||
dst = ssz.MarshalUint64(dst, uint64(e.ExecutionPayment))
|
||||
|
||||
// Offset (10) 'BlobKzgCommitments'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(e.BlobKzgCommitments) * 48
|
||||
|
||||
// Field (10) 'BlobKzgCommitments'
|
||||
if size := len(e.BlobKzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
|
||||
// Field (10) 'BlobKzgCommitmentsRoot'
|
||||
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(e.BlobKzgCommitments); ii++ {
|
||||
if size := len(e.BlobKzgCommitments[ii]); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitments[ii]", size, 48)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlobKzgCommitments[ii]...)
|
||||
}
|
||||
dst = append(dst, e.BlobKzgCommitmentsRoot...)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -91,13 +80,10 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 192 {
|
||||
if size != 220 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o10 uint64
|
||||
|
||||
// Field (0) 'ParentBlockHash'
|
||||
if cap(e.ParentBlockHash) == 0 {
|
||||
e.ParentBlockHash = make([]byte, 0, len(buf[0:32]))
|
||||
@@ -143,40 +129,18 @@ func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
|
||||
// Field (9) 'ExecutionPayment'
|
||||
e.ExecutionPayment = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[180:188]))
|
||||
|
||||
// Offset (10) 'BlobKzgCommitments'
|
||||
if o10 = ssz.ReadOffset(buf[188:192]); o10 > size {
|
||||
return ssz.ErrOffset
|
||||
// Field (10) 'BlobKzgCommitmentsRoot'
|
||||
if cap(e.BlobKzgCommitmentsRoot) == 0 {
|
||||
e.BlobKzgCommitmentsRoot = make([]byte, 0, len(buf[188:220]))
|
||||
}
|
||||
e.BlobKzgCommitmentsRoot = append(e.BlobKzgCommitmentsRoot, buf[188:220]...)
|
||||
|
||||
if o10 != 192 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (10) 'BlobKzgCommitments'
|
||||
{
|
||||
buf = tail[o10:]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 4096)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.BlobKzgCommitments = make([][]byte, num)
|
||||
for ii := 0; ii < num; ii++ {
|
||||
if cap(e.BlobKzgCommitments[ii]) == 0 {
|
||||
e.BlobKzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
|
||||
}
|
||||
e.BlobKzgCommitments[ii] = append(e.BlobKzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadBid object
|
||||
func (e *ExecutionPayloadBid) SizeSSZ() (size int) {
|
||||
size = 192
|
||||
|
||||
// Field (10) 'BlobKzgCommitments'
|
||||
size += len(e.BlobKzgCommitments) * 48
|
||||
|
||||
size = 220
|
||||
return
|
||||
}
|
||||
|
||||
@@ -239,24 +203,12 @@ func (e *ExecutionPayloadBid) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
// Field (9) 'ExecutionPayment'
|
||||
hh.PutUint64(uint64(e.ExecutionPayment))
|
||||
|
||||
// Field (10) 'BlobKzgCommitments'
|
||||
{
|
||||
if size := len(e.BlobKzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
for _, i := range e.BlobKzgCommitments {
|
||||
if len(i) != 48 {
|
||||
err = ssz.ErrBytesLength
|
||||
return
|
||||
}
|
||||
hh.PutBytes(i)
|
||||
}
|
||||
|
||||
numItems := uint64(len(e.BlobKzgCommitments))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
// Field (10) 'BlobKzgCommitmentsRoot'
|
||||
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.BlobKzgCommitmentsRoot)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
@@ -270,14 +222,14 @@ func (s *SignedExecutionPayloadBid) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the SignedExecutionPayloadBid object to a target array
|
||||
func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(100)
|
||||
|
||||
// Offset (0) 'Message'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
// Field (0) 'Message'
|
||||
if s.Message == nil {
|
||||
s.Message = new(ExecutionPayloadBid)
|
||||
}
|
||||
offset += s.Message.SizeSSZ()
|
||||
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if size := len(s.Signature); size != 96 {
|
||||
@@ -286,11 +238,6 @@ func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err er
|
||||
}
|
||||
dst = append(dst, s.Signature...)
|
||||
|
||||
// Field (0) 'Message'
|
||||
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -298,51 +245,30 @@ func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err er
|
||||
func (s *SignedExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 100 {
|
||||
if size != 316 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o0 uint64
|
||||
|
||||
// Offset (0) 'Message'
|
||||
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 100 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if cap(s.Signature) == 0 {
|
||||
s.Signature = make([]byte, 0, len(buf[4:100]))
|
||||
}
|
||||
s.Signature = append(s.Signature, buf[4:100]...)
|
||||
|
||||
// Field (0) 'Message'
|
||||
{
|
||||
buf = tail[o0:]
|
||||
if s.Message == nil {
|
||||
s.Message = new(ExecutionPayloadBid)
|
||||
}
|
||||
if err = s.Message.UnmarshalSSZ(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the SignedExecutionPayloadBid object
|
||||
func (s *SignedExecutionPayloadBid) SizeSSZ() (size int) {
|
||||
size = 100
|
||||
|
||||
// Field (0) 'Message'
|
||||
if s.Message == nil {
|
||||
s.Message = new(ExecutionPayloadBid)
|
||||
}
|
||||
size += s.Message.SizeSSZ()
|
||||
if err = s.Message.UnmarshalSSZ(buf[0:220]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if cap(s.Signature) == 0 {
|
||||
s.Signature = make([]byte, 0, len(buf[220:316]))
|
||||
}
|
||||
s.Signature = append(s.Signature, buf[220:316]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the SignedExecutionPayloadBid object
|
||||
func (s *SignedExecutionPayloadBid) SizeSSZ() (size int) {
|
||||
size = 316
|
||||
return
|
||||
}
|
||||
|
||||
@@ -816,7 +742,7 @@ func (b *BeaconBlockBodyGloas) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the BeaconBlockBodyGloas object to a target array
|
||||
func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(392)
|
||||
offset := int(704)
|
||||
|
||||
// Field (0) 'RandaoReveal'
|
||||
if size := len(b.RandaoReveal); size != 96 {
|
||||
@@ -878,12 +804,13 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(b.BlsToExecutionChanges) * 172
|
||||
|
||||
// Offset (10) 'SignedExecutionPayloadBid'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
// Field (10) 'SignedExecutionPayloadBid'
|
||||
if b.SignedExecutionPayloadBid == nil {
|
||||
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
|
||||
}
|
||||
offset += b.SignedExecutionPayloadBid.SizeSSZ()
|
||||
if dst, err = b.SignedExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Offset (11) 'PayloadAttestations'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
@@ -969,11 +896,6 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (10) 'SignedExecutionPayloadBid'
|
||||
if dst, err = b.SignedExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (11) 'PayloadAttestations'
|
||||
if size := len(b.PayloadAttestations); size > 4 {
|
||||
err = ssz.ErrListTooBigFn("--.PayloadAttestations", size, 4)
|
||||
@@ -992,12 +914,12 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 392 {
|
||||
if size < 704 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o3, o4, o5, o6, o7, o9, o10, o11 uint64
|
||||
var o3, o4, o5, o6, o7, o9, o11 uint64
|
||||
|
||||
// Field (0) 'RandaoReveal'
|
||||
if cap(b.RandaoReveal) == 0 {
|
||||
@@ -1024,7 +946,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o3 != 392 {
|
||||
if o3 != 704 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -1061,13 +983,16 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Offset (10) 'SignedExecutionPayloadBid'
|
||||
if o10 = ssz.ReadOffset(buf[384:388]); o10 > size || o9 > o10 {
|
||||
return ssz.ErrOffset
|
||||
// Field (10) 'SignedExecutionPayloadBid'
|
||||
if b.SignedExecutionPayloadBid == nil {
|
||||
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
|
||||
}
|
||||
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf[384:700]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Offset (11) 'PayloadAttestations'
|
||||
if o11 = ssz.ReadOffset(buf[388:392]); o11 > size || o10 > o11 {
|
||||
if o11 = ssz.ReadOffset(buf[700:704]); o11 > size || o9 > o11 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
@@ -1171,7 +1096,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// Field (9) 'BlsToExecutionChanges'
|
||||
{
|
||||
buf = tail[o9:o10]
|
||||
buf = tail[o9:o11]
|
||||
num, err := ssz.DivideInt2(len(buf), 172, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1187,17 +1112,6 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Field (10) 'SignedExecutionPayloadBid'
|
||||
{
|
||||
buf = tail[o10:o11]
|
||||
if b.SignedExecutionPayloadBid == nil {
|
||||
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
|
||||
}
|
||||
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Field (11) 'PayloadAttestations'
|
||||
{
|
||||
buf = tail[o11:]
|
||||
@@ -1220,7 +1134,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockBodyGloas object
|
||||
func (b *BeaconBlockBodyGloas) SizeSSZ() (size int) {
|
||||
size = 392
|
||||
size = 704
|
||||
|
||||
// Field (3) 'ProposerSlashings'
|
||||
size += len(b.ProposerSlashings) * 416
|
||||
@@ -1246,12 +1160,6 @@ func (b *BeaconBlockBodyGloas) SizeSSZ() (size int) {
|
||||
// Field (9) 'BlsToExecutionChanges'
|
||||
size += len(b.BlsToExecutionChanges) * 172
|
||||
|
||||
// Field (10) 'SignedExecutionPayloadBid'
|
||||
if b.SignedExecutionPayloadBid == nil {
|
||||
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
|
||||
}
|
||||
size += b.SignedExecutionPayloadBid.SizeSSZ()
|
||||
|
||||
// Field (11) 'PayloadAttestations'
|
||||
size += len(b.PayloadAttestations) * 202
|
||||
|
||||
@@ -1529,7 +1437,7 @@ func (b *BeaconStateGloas) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the BeaconStateGloas object to a target array
|
||||
func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(2741117)
|
||||
offset := int(2741333)
|
||||
|
||||
// Field (0) 'GenesisTime'
|
||||
dst = ssz.MarshalUint64(dst, b.GenesisTime)
|
||||
@@ -1694,12 +1602,13 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Offset (24) 'LatestExecutionPayloadBid'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
// Field (24) 'LatestExecutionPayloadBid'
|
||||
if b.LatestExecutionPayloadBid == nil {
|
||||
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
|
||||
}
|
||||
offset += b.LatestExecutionPayloadBid.SizeSSZ()
|
||||
if dst, err = b.LatestExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (25) 'NextWithdrawalIndex'
|
||||
dst = ssz.MarshalUint64(dst, b.NextWithdrawalIndex)
|
||||
@@ -1857,11 +1766,6 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = ssz.MarshalUint64(dst, b.InactivityScores[ii])
|
||||
}
|
||||
|
||||
// Field (24) 'LatestExecutionPayloadBid'
|
||||
if dst, err = b.LatestExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (27) 'HistoricalSummaries'
|
||||
if size := len(b.HistoricalSummaries); size > 16777216 {
|
||||
err = ssz.ErrListTooBigFn("--.HistoricalSummaries", size, 16777216)
|
||||
@@ -1946,12 +1850,12 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 2741117 {
|
||||
if size < 2741333 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o7, o9, o11, o12, o15, o16, o21, o24, o27, o34, o35, o36, o38, o42, o44 uint64
|
||||
var o7, o9, o11, o12, o15, o16, o21, o27, o34, o35, o36, o38, o42, o44 uint64
|
||||
|
||||
// Field (0) 'GenesisTime'
|
||||
b.GenesisTime = ssz.UnmarshallUint64(buf[0:8])
|
||||
@@ -2004,7 +1908,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o7 != 2741117 {
|
||||
if o7 != 2741333 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -2110,74 +2014,77 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Offset (24) 'LatestExecutionPayloadBid'
|
||||
if o24 = ssz.ReadOffset(buf[2736629:2736633]); o24 > size || o21 > o24 {
|
||||
return ssz.ErrOffset
|
||||
// Field (24) 'LatestExecutionPayloadBid'
|
||||
if b.LatestExecutionPayloadBid == nil {
|
||||
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
|
||||
}
|
||||
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf[2736629:2736849]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Field (25) 'NextWithdrawalIndex'
|
||||
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736633:2736641])
|
||||
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736849:2736857])
|
||||
|
||||
// Field (26) 'NextWithdrawalValidatorIndex'
|
||||
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736641:2736649]))
|
||||
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736857:2736865]))
|
||||
|
||||
// Offset (27) 'HistoricalSummaries'
|
||||
if o27 = ssz.ReadOffset(buf[2736649:2736653]); o27 > size || o24 > o27 {
|
||||
if o27 = ssz.ReadOffset(buf[2736865:2736869]); o27 > size || o21 > o27 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (28) 'DepositRequestsStartIndex'
|
||||
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736653:2736661])
|
||||
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736869:2736877])
|
||||
|
||||
// Field (29) 'DepositBalanceToConsume'
|
||||
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736661:2736669]))
|
||||
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736877:2736885]))
|
||||
|
||||
// Field (30) 'ExitBalanceToConsume'
|
||||
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736669:2736677]))
|
||||
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736885:2736893]))
|
||||
|
||||
// Field (31) 'EarliestExitEpoch'
|
||||
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736677:2736685]))
|
||||
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736893:2736901]))
|
||||
|
||||
// Field (32) 'ConsolidationBalanceToConsume'
|
||||
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736685:2736693]))
|
||||
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736901:2736909]))
|
||||
|
||||
// Field (33) 'EarliestConsolidationEpoch'
|
||||
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736693:2736701]))
|
||||
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736909:2736917]))
|
||||
|
||||
// Offset (34) 'PendingDeposits'
|
||||
if o34 = ssz.ReadOffset(buf[2736701:2736705]); o34 > size || o27 > o34 {
|
||||
if o34 = ssz.ReadOffset(buf[2736917:2736921]); o34 > size || o27 > o34 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Offset (35) 'PendingPartialWithdrawals'
|
||||
if o35 = ssz.ReadOffset(buf[2736705:2736709]); o35 > size || o34 > o35 {
|
||||
if o35 = ssz.ReadOffset(buf[2736921:2736925]); o35 > size || o34 > o35 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Offset (36) 'PendingConsolidations'
|
||||
if o36 = ssz.ReadOffset(buf[2736709:2736713]); o36 > size || o35 > o36 {
|
||||
if o36 = ssz.ReadOffset(buf[2736925:2736929]); o36 > size || o35 > o36 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (37) 'ProposerLookahead'
|
||||
b.ProposerLookahead = ssz.ExtendUint64(b.ProposerLookahead, 64)
|
||||
for ii := 0; ii < 64; ii++ {
|
||||
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736713:2737225][ii*8 : (ii+1)*8])
|
||||
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736929:2737441][ii*8 : (ii+1)*8])
|
||||
}
|
||||
|
||||
// Offset (38) 'Builders'
|
||||
if o38 = ssz.ReadOffset(buf[2737225:2737229]); o38 > size || o36 > o38 {
|
||||
if o38 = ssz.ReadOffset(buf[2737441:2737445]); o38 > size || o36 > o38 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (39) 'NextWithdrawalBuilderIndex'
|
||||
b.NextWithdrawalBuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[2737229:2737237]))
|
||||
b.NextWithdrawalBuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[2737445:2737453]))
|
||||
|
||||
// Field (40) 'ExecutionPayloadAvailability'
|
||||
if cap(b.ExecutionPayloadAvailability) == 0 {
|
||||
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737237:2738261]))
|
||||
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737453:2738477]))
|
||||
}
|
||||
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737237:2738261]...)
|
||||
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737453:2738477]...)
|
||||
|
||||
// Field (41) 'BuilderPendingPayments'
|
||||
b.BuilderPendingPayments = make([]*BuilderPendingPayment, 64)
|
||||
@@ -2185,24 +2092,24 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
if b.BuilderPendingPayments[ii] == nil {
|
||||
b.BuilderPendingPayments[ii] = new(BuilderPendingPayment)
|
||||
}
|
||||
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738261:2741077][ii*44 : (ii+1)*44]); err != nil {
|
||||
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738477:2741293][ii*44 : (ii+1)*44]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Offset (42) 'BuilderPendingWithdrawals'
|
||||
if o42 = ssz.ReadOffset(buf[2741077:2741081]); o42 > size || o38 > o42 {
|
||||
if o42 = ssz.ReadOffset(buf[2741293:2741297]); o42 > size || o38 > o42 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (43) 'LatestBlockHash'
|
||||
if cap(b.LatestBlockHash) == 0 {
|
||||
b.LatestBlockHash = make([]byte, 0, len(buf[2741081:2741113]))
|
||||
b.LatestBlockHash = make([]byte, 0, len(buf[2741297:2741329]))
|
||||
}
|
||||
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741081:2741113]...)
|
||||
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741297:2741329]...)
|
||||
|
||||
// Offset (44) 'PayloadExpectedWithdrawals'
|
||||
if o44 = ssz.ReadOffset(buf[2741113:2741117]); o44 > size || o42 > o44 {
|
||||
if o44 = ssz.ReadOffset(buf[2741329:2741333]); o44 > size || o42 > o44 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
@@ -2297,7 +2204,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// Field (21) 'InactivityScores'
|
||||
{
|
||||
buf = tail[o21:o24]
|
||||
buf = tail[o21:o27]
|
||||
num, err := ssz.DivideInt2(len(buf), 8, 1099511627776)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2308,17 +2215,6 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Field (24) 'LatestExecutionPayloadBid'
|
||||
{
|
||||
buf = tail[o24:o27]
|
||||
if b.LatestExecutionPayloadBid == nil {
|
||||
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
|
||||
}
|
||||
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Field (27) 'HistoricalSummaries'
|
||||
{
|
||||
buf = tail[o27:o34]
|
||||
@@ -2449,7 +2345,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BeaconStateGloas object
|
||||
func (b *BeaconStateGloas) SizeSSZ() (size int) {
|
||||
size = 2741117
|
||||
size = 2741333
|
||||
|
||||
// Field (7) 'HistoricalRoots'
|
||||
size += len(b.HistoricalRoots) * 32
|
||||
@@ -2472,12 +2368,6 @@ func (b *BeaconStateGloas) SizeSSZ() (size int) {
|
||||
// Field (21) 'InactivityScores'
|
||||
size += len(b.InactivityScores) * 8
|
||||
|
||||
// Field (24) 'LatestExecutionPayloadBid'
|
||||
if b.LatestExecutionPayloadBid == nil {
|
||||
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
|
||||
}
|
||||
size += b.LatestExecutionPayloadBid.SizeSSZ()
|
||||
|
||||
// Field (27) 'HistoricalSummaries'
|
||||
size += len(b.HistoricalSummaries) * 64
|
||||
|
||||
@@ -3091,7 +2981,7 @@ func (d *DataColumnSidecarGloas) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the DataColumnSidecarGloas object to a target array
|
||||
func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(56)
|
||||
offset := int(60)
|
||||
|
||||
// Field (0) 'Index'
|
||||
dst = ssz.MarshalUint64(dst, d.Index)
|
||||
@@ -3100,14 +2990,18 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(d.Column) * 2048
|
||||
|
||||
// Offset (2) 'KzgProofs'
|
||||
// Offset (2) 'KzgCommitments'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(d.KzgCommitments) * 48
|
||||
|
||||
// Offset (3) 'KzgProofs'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(d.KzgProofs) * 48
|
||||
|
||||
// Field (3) 'Slot'
|
||||
// Field (4) 'Slot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(d.Slot))
|
||||
|
||||
// Field (4) 'BeaconBlockRoot'
|
||||
// Field (5) 'BeaconBlockRoot'
|
||||
if size := len(d.BeaconBlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BeaconBlockRoot", size, 32)
|
||||
return
|
||||
@@ -3127,7 +3021,20 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
|
||||
dst = append(dst, d.Column[ii]...)
|
||||
}
|
||||
|
||||
// Field (2) 'KzgProofs'
|
||||
// Field (2) 'KzgCommitments'
|
||||
if size := len(d.KzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(d.KzgCommitments); ii++ {
|
||||
if size := len(d.KzgCommitments[ii]); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.KzgCommitments[ii]", size, 48)
|
||||
return
|
||||
}
|
||||
dst = append(dst, d.KzgCommitments[ii]...)
|
||||
}
|
||||
|
||||
// Field (3) 'KzgProofs'
|
||||
if size := len(d.KzgProofs); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 4096)
|
||||
return
|
||||
@@ -3147,12 +3054,12 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
|
||||
func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 56 {
|
||||
if size < 60 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o1, o2 uint64
|
||||
var o1, o2, o3 uint64
|
||||
|
||||
// Field (0) 'Index'
|
||||
d.Index = ssz.UnmarshallUint64(buf[0:8])
|
||||
@@ -3162,23 +3069,28 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o1 != 56 {
|
||||
if o1 != 60 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Offset (2) 'KzgProofs'
|
||||
// Offset (2) 'KzgCommitments'
|
||||
if o2 = ssz.ReadOffset(buf[12:16]); o2 > size || o1 > o2 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (3) 'Slot'
|
||||
d.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[16:24]))
|
||||
|
||||
// Field (4) 'BeaconBlockRoot'
|
||||
if cap(d.BeaconBlockRoot) == 0 {
|
||||
d.BeaconBlockRoot = make([]byte, 0, len(buf[24:56]))
|
||||
// Offset (3) 'KzgProofs'
|
||||
if o3 = ssz.ReadOffset(buf[16:20]); o3 > size || o2 > o3 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
d.BeaconBlockRoot = append(d.BeaconBlockRoot, buf[24:56]...)
|
||||
|
||||
// Field (4) 'Slot'
|
||||
d.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[20:28]))
|
||||
|
||||
// Field (5) 'BeaconBlockRoot'
|
||||
if cap(d.BeaconBlockRoot) == 0 {
|
||||
d.BeaconBlockRoot = make([]byte, 0, len(buf[28:60]))
|
||||
}
|
||||
d.BeaconBlockRoot = append(d.BeaconBlockRoot, buf[28:60]...)
|
||||
|
||||
// Field (1) 'Column'
|
||||
{
|
||||
@@ -3196,9 +3108,25 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Field (2) 'KzgProofs'
|
||||
// Field (2) 'KzgCommitments'
|
||||
{
|
||||
buf = tail[o2:]
|
||||
buf = tail[o2:o3]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 4096)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.KzgCommitments = make([][]byte, num)
|
||||
for ii := 0; ii < num; ii++ {
|
||||
if cap(d.KzgCommitments[ii]) == 0 {
|
||||
d.KzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
|
||||
}
|
||||
d.KzgCommitments[ii] = append(d.KzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (3) 'KzgProofs'
|
||||
{
|
||||
buf = tail[o3:]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 4096)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -3216,12 +3144,15 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the DataColumnSidecarGloas object
|
||||
func (d *DataColumnSidecarGloas) SizeSSZ() (size int) {
|
||||
size = 56
|
||||
size = 60
|
||||
|
||||
// Field (1) 'Column'
|
||||
size += len(d.Column) * 2048
|
||||
|
||||
// Field (2) 'KzgProofs'
|
||||
// Field (2) 'KzgCommitments'
|
||||
size += len(d.KzgCommitments) * 48
|
||||
|
||||
// Field (3) 'KzgProofs'
|
||||
size += len(d.KzgProofs) * 48
|
||||
|
||||
return
|
||||
@@ -3258,7 +3189,26 @@ func (d *DataColumnSidecarGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
|
||||
// Field (2) 'KzgProofs'
|
||||
// Field (2) 'KzgCommitments'
|
||||
{
|
||||
if size := len(d.KzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
for _, i := range d.KzgCommitments {
|
||||
if len(i) != 48 {
|
||||
err = ssz.ErrBytesLength
|
||||
return
|
||||
}
|
||||
hh.PutBytes(i)
|
||||
}
|
||||
|
||||
numItems := uint64(len(d.KzgCommitments))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
|
||||
// Field (3) 'KzgProofs'
|
||||
{
|
||||
if size := len(d.KzgProofs); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 4096)
|
||||
@@ -3277,10 +3227,10 @@ func (d *DataColumnSidecarGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
|
||||
// Field (3) 'Slot'
|
||||
// Field (4) 'Slot'
|
||||
hh.PutUint64(uint64(d.Slot))
|
||||
|
||||
// Field (4) 'BeaconBlockRoot'
|
||||
// Field (5) 'BeaconBlockRoot'
|
||||
if size := len(d.BeaconBlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BeaconBlockRoot", size, 32)
|
||||
return
|
||||
@@ -3299,7 +3249,7 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the ExecutionPayloadEnvelope object to a target array
|
||||
func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(88)
|
||||
offset := int(92)
|
||||
|
||||
// Offset (0) 'Payload'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
@@ -3328,7 +3278,11 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
|
||||
// Field (4) 'Slot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
|
||||
|
||||
// Field (5) 'StateRoot'
|
||||
// Offset (5) 'BlobKzgCommitments'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(e.BlobKzgCommitments) * 48
|
||||
|
||||
// Field (6) 'StateRoot'
|
||||
if size := len(e.StateRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.StateRoot", size, 32)
|
||||
return
|
||||
@@ -3345,6 +3299,19 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
|
||||
return
|
||||
}
|
||||
|
||||
// Field (5) 'BlobKzgCommitments'
|
||||
if size := len(e.BlobKzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(e.BlobKzgCommitments); ii++ {
|
||||
if size := len(e.BlobKzgCommitments[ii]); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitments[ii]", size, 48)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlobKzgCommitments[ii]...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3352,19 +3319,19 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
|
||||
func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 88 {
|
||||
if size < 92 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o0, o1 uint64
|
||||
var o0, o1, o5 uint64
|
||||
|
||||
// Offset (0) 'Payload'
|
||||
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 88 {
|
||||
if o0 != 92 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -3385,11 +3352,16 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
// Field (4) 'Slot'
|
||||
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[48:56]))
|
||||
|
||||
// Field (5) 'StateRoot'
|
||||
if cap(e.StateRoot) == 0 {
|
||||
e.StateRoot = make([]byte, 0, len(buf[56:88]))
|
||||
// Offset (5) 'BlobKzgCommitments'
|
||||
if o5 = ssz.ReadOffset(buf[56:60]); o5 > size || o1 > o5 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
e.StateRoot = append(e.StateRoot, buf[56:88]...)
|
||||
|
||||
// Field (6) 'StateRoot'
|
||||
if cap(e.StateRoot) == 0 {
|
||||
e.StateRoot = make([]byte, 0, len(buf[60:92]))
|
||||
}
|
||||
e.StateRoot = append(e.StateRoot, buf[60:92]...)
|
||||
|
||||
// Field (0) 'Payload'
|
||||
{
|
||||
@@ -3404,7 +3376,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// Field (1) 'ExecutionRequests'
|
||||
{
|
||||
buf = tail[o1:]
|
||||
buf = tail[o1:o5]
|
||||
if e.ExecutionRequests == nil {
|
||||
e.ExecutionRequests = new(v1.ExecutionRequests)
|
||||
}
|
||||
@@ -3412,12 +3384,28 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Field (5) 'BlobKzgCommitments'
|
||||
{
|
||||
buf = tail[o5:]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 4096)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.BlobKzgCommitments = make([][]byte, num)
|
||||
for ii := 0; ii < num; ii++ {
|
||||
if cap(e.BlobKzgCommitments[ii]) == 0 {
|
||||
e.BlobKzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
|
||||
}
|
||||
e.BlobKzgCommitments[ii] = append(e.BlobKzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadEnvelope object
|
||||
func (e *ExecutionPayloadEnvelope) SizeSSZ() (size int) {
|
||||
size = 88
|
||||
size = 92
|
||||
|
||||
// Field (0) 'Payload'
|
||||
if e.Payload == nil {
|
||||
@@ -3431,6 +3419,9 @@ func (e *ExecutionPayloadEnvelope) SizeSSZ() (size int) {
|
||||
}
|
||||
size += e.ExecutionRequests.SizeSSZ()
|
||||
|
||||
// Field (5) 'BlobKzgCommitments'
|
||||
size += len(e.BlobKzgCommitments) * 48
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3466,7 +3457,26 @@ func (e *ExecutionPayloadEnvelope) HashTreeRootWith(hh *ssz.Hasher) (err error)
|
||||
// Field (4) 'Slot'
|
||||
hh.PutUint64(uint64(e.Slot))
|
||||
|
||||
// Field (5) 'StateRoot'
|
||||
// Field (5) 'BlobKzgCommitments'
|
||||
{
|
||||
if size := len(e.BlobKzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
for _, i := range e.BlobKzgCommitments {
|
||||
if len(i) != 48 {
|
||||
err = ssz.ErrBytesLength
|
||||
return
|
||||
}
|
||||
hh.PutBytes(i)
|
||||
}
|
||||
|
||||
numItems := uint64(len(e.BlobKzgCommitments))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
|
||||
// Field (6) 'StateRoot'
|
||||
if size := len(e.StateRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.StateRoot", size, 32)
|
||||
return
|
||||
|
||||
@@ -23,17 +23,17 @@ func TestExecutionPayloadBid_Copy(t *testing.T) {
|
||||
{
|
||||
name: "fully populated bid",
|
||||
bid: &ExecutionPayloadBid{
|
||||
ParentBlockHash: []byte("parent_block_hash_32_bytes_long!"),
|
||||
ParentBlockRoot: []byte("parent_block_root_32_bytes_long!"),
|
||||
BlockHash: []byte("block_hash_32_bytes_are_long!!"),
|
||||
PrevRandao: []byte("prev_randao_32_bytes_long!!!"),
|
||||
FeeRecipient: []byte("fee_recipient_20_byt"),
|
||||
GasLimit: 15000000,
|
||||
BuilderIndex: primitives.BuilderIndex(42),
|
||||
Slot: primitives.Slot(12345),
|
||||
Value: 1000000000000000000,
|
||||
ExecutionPayment: 5645654,
|
||||
BlobKzgCommitments: [][]byte{[]byte("blob_kzg_commitments_48_bytes_longer_than_needed")},
|
||||
ParentBlockHash: []byte("parent_block_hash_32_bytes_long!"),
|
||||
ParentBlockRoot: []byte("parent_block_root_32_bytes_long!"),
|
||||
BlockHash: []byte("block_hash_32_bytes_are_long!!"),
|
||||
PrevRandao: []byte("prev_randao_32_bytes_long!!!"),
|
||||
FeeRecipient: []byte("fee_recipient_20_byt"),
|
||||
GasLimit: 15000000,
|
||||
BuilderIndex: primitives.BuilderIndex(42),
|
||||
Slot: primitives.Slot(12345),
|
||||
ExecutionPayment: 5645654,
|
||||
Value: 1000000000000000000,
|
||||
BlobKzgCommitmentsRoot: []byte("blob_kzg_commitments_32_bytes!!"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -512,8 +512,8 @@
|
||||
- name: MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="MIN_BUILDER_WITHDRAWABILITY_DELAY" fork="gloas" hash="be7f8473">
|
||||
MIN_BUILDER_WITHDRAWABILITY_DELAY: uint64 = 64
|
||||
<spec config_var="MIN_BUILDER_WITHDRAWABILITY_DELAY" fork="gloas" hash="d378428f">
|
||||
MIN_BUILDER_WITHDRAWABILITY_DELAY: uint64 = 4096
|
||||
</spec>
|
||||
|
||||
- name: MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS#deneb
|
||||
|
||||
@@ -738,12 +738,11 @@
|
||||
- name: DataColumnSidecar#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec ssz_object="DataColumnSidecar" fork="gloas" hash="332c7cfc">
|
||||
<spec ssz_object="DataColumnSidecar" fork="gloas" hash="8028928b">
|
||||
class DataColumnSidecar(Container):
|
||||
index: ColumnIndex
|
||||
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
# [Modified in Gloas:EIP7732]
|
||||
# Removed `kzg_commitments`
|
||||
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
# [Modified in Gloas:EIP7732]
|
||||
# Removed `signed_block_header`
|
||||
@@ -917,7 +916,7 @@
|
||||
- name: ExecutionPayloadBid#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec ssz_object="ExecutionPayloadBid" fork="gloas" hash="1a7b9dea">
|
||||
<spec ssz_object="ExecutionPayloadBid" fork="gloas" hash="aa71ba16">
|
||||
class ExecutionPayloadBid(Container):
|
||||
parent_block_hash: Hash32
|
||||
parent_block_root: Root
|
||||
@@ -929,19 +928,20 @@
|
||||
slot: Slot
|
||||
value: Gwei
|
||||
execution_payment: Gwei
|
||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
blob_kzg_commitments_root: Root
|
||||
</spec>
|
||||
|
||||
- name: ExecutionPayloadEnvelope#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec ssz_object="ExecutionPayloadEnvelope" fork="gloas" hash="ec5c0233">
|
||||
<spec ssz_object="ExecutionPayloadEnvelope" fork="gloas" hash="cd522f7f">
|
||||
class ExecutionPayloadEnvelope(Container):
|
||||
payload: ExecutionPayload
|
||||
execution_requests: ExecutionRequests
|
||||
builder_index: BuilderIndex
|
||||
beacon_block_root: Root
|
||||
slot: Slot
|
||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
state_root: Root
|
||||
</spec>
|
||||
|
||||
|
||||
@@ -1,26 +1,13 @@
|
||||
- name: add_builder_to_registry#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="add_builder_to_registry" fork="gloas" hash="cd0414c9">
|
||||
<spec fn="add_builder_to_registry" fork="gloas" hash="938224ec">
|
||||
def add_builder_to_registry(
|
||||
state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64,
|
||||
slot: Slot,
|
||||
state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64
|
||||
) -> None:
|
||||
set_or_append_list(
|
||||
state.builders,
|
||||
get_index_for_new_builder(state),
|
||||
Builder(
|
||||
pubkey=pubkey,
|
||||
version=uint8(withdrawal_credentials[0]),
|
||||
execution_address=ExecutionAddress(withdrawal_credentials[12:]),
|
||||
balance=amount,
|
||||
deposit_epoch=compute_epoch_at_slot(slot),
|
||||
withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
),
|
||||
)
|
||||
index = get_index_for_new_builder(state)
|
||||
builder = get_builder_from_deposit(state, pubkey, withdrawal_credentials, amount)
|
||||
set_or_append_list(state.builders, index, builder)
|
||||
</spec>
|
||||
|
||||
- name: add_flag#altair
|
||||
@@ -158,20 +145,19 @@
|
||||
- name: apply_deposit_for_builder#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="apply_deposit_for_builder" fork="gloas" hash="e4bc98c7">
|
||||
<spec fn="apply_deposit_for_builder" fork="gloas" hash="eae84bc2">
|
||||
def apply_deposit_for_builder(
|
||||
state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64,
|
||||
signature: BLSSignature,
|
||||
slot: Slot,
|
||||
) -> None:
|
||||
builder_pubkeys = [b.pubkey for b in state.builders]
|
||||
if pubkey not in builder_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
|
||||
add_builder_to_registry(state, pubkey, withdrawal_credentials, amount, slot)
|
||||
add_builder_to_registry(state, pubkey, withdrawal_credentials, amount)
|
||||
else:
|
||||
# Increase balance by deposit amount
|
||||
builder_index = builder_pubkeys.index(pubkey)
|
||||
@@ -565,11 +551,9 @@
|
||||
- file: beacon-chain/core/signing/signing_root.go
|
||||
search: func ComputeDomain(
|
||||
spec: |
|
||||
<spec fn="compute_domain" fork="phase0" hash="a78b32e4">
|
||||
<spec fn="compute_domain" fork="phase0" hash="948e1334">
|
||||
def compute_domain(
|
||||
domain_type: DomainType,
|
||||
fork_version: Optional[Version] = None,
|
||||
genesis_validators_root: Optional[Root] = None,
|
||||
domain_type: DomainType, fork_version: Version = None, genesis_validators_root: Root = None
|
||||
) -> Domain:
|
||||
"""
|
||||
Return the domain for the ``domain_type`` and ``fork_version``.
|
||||
@@ -2328,6 +2312,23 @@
|
||||
return bls.Sign(privkey, signing_root)
|
||||
</spec>
|
||||
|
||||
- name: get_builder_from_deposit#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_builder_from_deposit" fork="gloas" hash="7f914af6">
|
||||
def get_builder_from_deposit(
|
||||
state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64
|
||||
) -> Builder:
|
||||
return Builder(
|
||||
pubkey=pubkey,
|
||||
version=uint8(withdrawal_credentials[0]),
|
||||
execution_address=ExecutionAddress(withdrawal_credentials[12:]),
|
||||
balance=amount,
|
||||
deposit_epoch=get_current_epoch(state),
|
||||
withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
)
|
||||
</spec>
|
||||
|
||||
- name: get_builder_payment_quorum_threshold#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
@@ -2344,20 +2345,19 @@
|
||||
- name: get_builder_withdrawals#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_builder_withdrawals" fork="gloas" hash="d54dd146">
|
||||
<spec fn="get_builder_withdrawals" fork="gloas" hash="35cd32cd">
|
||||
def get_builder_withdrawals(
|
||||
state: BeaconState,
|
||||
withdrawal_index: WithdrawalIndex,
|
||||
prior_withdrawals: Sequence[Withdrawal],
|
||||
) -> Tuple[Sequence[Withdrawal], WithdrawalIndex, uint64]:
|
||||
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
|
||||
assert len(prior_withdrawals) <= withdrawals_limit
|
||||
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
|
||||
processed_count: uint64 = 0
|
||||
withdrawals: List[Withdrawal] = []
|
||||
for withdrawal in state.builder_pending_withdrawals:
|
||||
all_withdrawals = prior_withdrawals + withdrawals
|
||||
has_reached_limit = len(all_withdrawals) >= withdrawals_limit
|
||||
has_reached_limit = len(all_withdrawals) == withdrawals_limit
|
||||
if has_reached_limit:
|
||||
break
|
||||
|
||||
@@ -2379,7 +2379,7 @@
|
||||
- name: get_builders_sweep_withdrawals#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_builders_sweep_withdrawals" fork="gloas" hash="04c1cb10">
|
||||
<spec fn="get_builders_sweep_withdrawals" fork="gloas" hash="028d161d">
|
||||
def get_builders_sweep_withdrawals(
|
||||
state: BeaconState,
|
||||
withdrawal_index: WithdrawalIndex,
|
||||
@@ -2387,15 +2387,14 @@
|
||||
) -> Tuple[Sequence[Withdrawal], WithdrawalIndex, uint64]:
|
||||
epoch = get_current_epoch(state)
|
||||
builders_limit = min(len(state.builders), MAX_BUILDERS_PER_WITHDRAWALS_SWEEP)
|
||||
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
|
||||
assert len(prior_withdrawals) <= withdrawals_limit
|
||||
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
|
||||
processed_count: uint64 = 0
|
||||
withdrawals: List[Withdrawal] = []
|
||||
builder_index = state.next_withdrawal_builder_index
|
||||
for _ in range(builders_limit):
|
||||
all_withdrawals = prior_withdrawals + withdrawals
|
||||
has_reached_limit = len(all_withdrawals) >= withdrawals_limit
|
||||
has_reached_limit = len(all_withdrawals) == withdrawals_limit
|
||||
if has_reached_limit:
|
||||
break
|
||||
|
||||
@@ -2675,7 +2674,7 @@
|
||||
- name: get_data_column_sidecars#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_data_column_sidecars" fork="gloas" hash="abaf4385">
|
||||
<spec fn="get_data_column_sidecars" fork="gloas" hash="c8d64ac9">
|
||||
def get_data_column_sidecars(
|
||||
# [Modified in Gloas:EIP7732]
|
||||
# Removed `signed_block_header`
|
||||
@@ -2683,8 +2682,7 @@
|
||||
beacon_block_root: Root,
|
||||
# [New in Gloas:EIP7732]
|
||||
slot: Slot,
|
||||
# [Modified in Gloas:EIP7732]
|
||||
# Removed `kzg_commitments`
|
||||
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
|
||||
# [Modified in Gloas:EIP7732]
|
||||
# Removed `kzg_commitments_inclusion_proof`
|
||||
cells_and_kzg_proofs: Sequence[
|
||||
@@ -2692,10 +2690,11 @@
|
||||
],
|
||||
) -> Sequence[DataColumnSidecar]:
|
||||
"""
|
||||
Given a beacon block root and the cells/proofs associated with each blob
|
||||
in the corresponding payload, assemble the sidecars which can be
|
||||
distributed to peers.
|
||||
Given a beacon block root and the commitments, cells/proofs associated with
|
||||
each blob in the block, assemble the sidecars which can be distributed to peers.
|
||||
"""
|
||||
assert len(cells_and_kzg_proofs) == len(kzg_commitments)
|
||||
|
||||
sidecars = []
|
||||
for column_index in range(NUMBER_OF_COLUMNS):
|
||||
column_cells, column_proofs = [], []
|
||||
@@ -2703,10 +2702,10 @@
|
||||
column_cells.append(cells[column_index])
|
||||
column_proofs.append(proofs[column_index])
|
||||
sidecars.append(
|
||||
# [Modified in Gloas:EIP7732]
|
||||
DataColumnSidecar(
|
||||
index=column_index,
|
||||
column=column_cells,
|
||||
kzg_commitments=kzg_commitments,
|
||||
kzg_proofs=column_proofs,
|
||||
slot=slot,
|
||||
beacon_block_root=beacon_block_root,
|
||||
@@ -2746,9 +2745,11 @@
|
||||
- name: get_data_column_sidecars_from_block#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_data_column_sidecars_from_block" fork="gloas" hash="302616d2">
|
||||
<spec fn="get_data_column_sidecars_from_block" fork="gloas" hash="8ac19a18">
|
||||
def get_data_column_sidecars_from_block(
|
||||
signed_block: SignedBeaconBlock,
|
||||
# [New in Gloas:EIP7732]
|
||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
|
||||
cells_and_kzg_proofs: Sequence[
|
||||
Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]
|
||||
],
|
||||
@@ -2761,6 +2762,7 @@
|
||||
return get_data_column_sidecars(
|
||||
beacon_block_root,
|
||||
signed_block.message.slot,
|
||||
blob_kzg_commitments,
|
||||
cells_and_kzg_proofs,
|
||||
)
|
||||
</spec>
|
||||
@@ -2768,7 +2770,7 @@
|
||||
- name: get_data_column_sidecars_from_column_sidecar#fulu
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_data_column_sidecars_from_column_sidecar" fork="fulu" hash="4877148a">
|
||||
<spec fn="get_data_column_sidecars_from_column_sidecar" fork="fulu" hash="4304cdec">
|
||||
def get_data_column_sidecars_from_column_sidecar(
|
||||
sidecar: DataColumnSidecar,
|
||||
cells_and_kzg_proofs: Sequence[
|
||||
@@ -2776,7 +2778,7 @@
|
||||
],
|
||||
) -> Sequence[DataColumnSidecar]:
|
||||
"""
|
||||
Given a data column sidecar and the cells/proofs associated with each blob corresponding
|
||||
Given a DataColumnSidecar and the cells/proofs associated with each blob corresponding
|
||||
to the commitments it contains, assemble all sidecars for distribution to peers.
|
||||
"""
|
||||
assert len(cells_and_kzg_proofs) == len(sidecar.kzg_commitments)
|
||||
@@ -2792,7 +2794,7 @@
|
||||
- name: get_data_column_sidecars_from_column_sidecar#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_data_column_sidecars_from_column_sidecar" fork="gloas" hash="beb1f94f">
|
||||
<spec fn="get_data_column_sidecars_from_column_sidecar" fork="gloas" hash="a1052a1c">
|
||||
def get_data_column_sidecars_from_column_sidecar(
|
||||
sidecar: DataColumnSidecar,
|
||||
cells_and_kzg_proofs: Sequence[
|
||||
@@ -2800,14 +2802,15 @@
|
||||
],
|
||||
) -> Sequence[DataColumnSidecar]:
|
||||
"""
|
||||
Given a data column sidecar and the cells/proofs associated with each blob
|
||||
in the corresponding payload, assemble the sidecars which can be
|
||||
distributed to peers.
|
||||
Given a DataColumnSidecar and the cells/proofs associated with each blob corresponding
|
||||
to the commitments it contains, assemble all sidecars for distribution to peers.
|
||||
"""
|
||||
# [Modified in Gloas:EIP7732]
|
||||
assert len(cells_and_kzg_proofs) == len(sidecar.kzg_commitments)
|
||||
|
||||
return get_data_column_sidecars(
|
||||
sidecar.beacon_block_root,
|
||||
sidecar.slot,
|
||||
sidecar.kzg_commitments,
|
||||
cells_and_kzg_proofs,
|
||||
)
|
||||
</spec>
|
||||
@@ -2817,10 +2820,8 @@
|
||||
- file: beacon-chain/core/signing/domain.go
|
||||
search: func Domain(
|
||||
spec: |
|
||||
<spec fn="get_domain" fork="phase0" hash="e60c5fbc">
|
||||
def get_domain(
|
||||
state: BeaconState, domain_type: DomainType, epoch: Optional[Epoch] = None
|
||||
) -> Domain:
|
||||
<spec fn="get_domain" fork="phase0" hash="99ea23f6">
|
||||
def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch = None) -> Domain:
|
||||
"""
|
||||
Return the signature domain (fork version concatenated with domain type) of a message.
|
||||
"""
|
||||
@@ -3887,7 +3888,7 @@
|
||||
- name: get_pending_partial_withdrawals#electra
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_pending_partial_withdrawals" fork="electra" hash="306047e9">
|
||||
<spec fn="get_pending_partial_withdrawals" fork="electra" hash="b53b25d7">
|
||||
def get_pending_partial_withdrawals(
|
||||
state: BeaconState,
|
||||
withdrawal_index: WithdrawalIndex,
|
||||
@@ -3898,14 +3899,13 @@
|
||||
len(prior_withdrawals) + MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP,
|
||||
MAX_WITHDRAWALS_PER_PAYLOAD - 1,
|
||||
)
|
||||
assert len(prior_withdrawals) <= withdrawals_limit
|
||||
|
||||
processed_count: uint64 = 0
|
||||
withdrawals: List[Withdrawal] = []
|
||||
for withdrawal in state.pending_partial_withdrawals:
|
||||
all_withdrawals = prior_withdrawals + withdrawals
|
||||
is_withdrawable = withdrawal.withdrawable_epoch <= epoch
|
||||
has_reached_limit = len(all_withdrawals) >= withdrawals_limit
|
||||
has_reached_limit = len(all_withdrawals) == withdrawals_limit
|
||||
if not is_withdrawable or has_reached_limit:
|
||||
break
|
||||
|
||||
@@ -4091,13 +4091,13 @@
|
||||
- name: get_ptc_assignment#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_ptc_assignment" fork="gloas" hash="7fd50097">
|
||||
<spec fn="get_ptc_assignment" fork="gloas" hash="817acb90">
|
||||
def get_ptc_assignment(
|
||||
state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex
|
||||
) -> Optional[Slot]:
|
||||
"""
|
||||
Returns the slot during the requested epoch in which the validator with
|
||||
index ``validator_index`` is a member of the PTC. Returns None if no
|
||||
index `validator_index` is a member of the PTC. Returns None if no
|
||||
assignment is found.
|
||||
"""
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
@@ -4509,7 +4509,7 @@
|
||||
- name: get_validators_sweep_withdrawals#capella
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_validators_sweep_withdrawals" fork="capella" hash="59563c2a">
|
||||
<spec fn="get_validators_sweep_withdrawals" fork="capella" hash="81868c81">
|
||||
def get_validators_sweep_withdrawals(
|
||||
state: BeaconState,
|
||||
withdrawal_index: WithdrawalIndex,
|
||||
@@ -4518,15 +4518,13 @@
|
||||
epoch = get_current_epoch(state)
|
||||
validators_limit = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
# There must be at least one space reserved for validator sweep withdrawals
|
||||
assert len(prior_withdrawals) < withdrawals_limit
|
||||
|
||||
processed_count: uint64 = 0
|
||||
withdrawals: List[Withdrawal] = []
|
||||
validator_index = state.next_withdrawal_validator_index
|
||||
for _ in range(validators_limit):
|
||||
all_withdrawals = prior_withdrawals + withdrawals
|
||||
has_reached_limit = len(all_withdrawals) >= withdrawals_limit
|
||||
has_reached_limit = len(all_withdrawals) == withdrawals_limit
|
||||
if has_reached_limit:
|
||||
break
|
||||
|
||||
@@ -4562,7 +4560,7 @@
|
||||
- name: get_validators_sweep_withdrawals#electra
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_validators_sweep_withdrawals" fork="electra" hash="034093ad">
|
||||
<spec fn="get_validators_sweep_withdrawals" fork="electra" hash="74bbd437">
|
||||
def get_validators_sweep_withdrawals(
|
||||
state: BeaconState,
|
||||
withdrawal_index: WithdrawalIndex,
|
||||
@@ -4571,15 +4569,13 @@
|
||||
epoch = get_current_epoch(state)
|
||||
validators_limit = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
# There must be at least one space reserved for validator sweep withdrawals
|
||||
assert len(prior_withdrawals) < withdrawals_limit
|
||||
|
||||
processed_count: uint64 = 0
|
||||
withdrawals: List[Withdrawal] = []
|
||||
validator_index = state.next_withdrawal_validator_index
|
||||
for _ in range(validators_limit):
|
||||
all_withdrawals = prior_withdrawals + withdrawals
|
||||
has_reached_limit = len(all_withdrawals) >= withdrawals_limit
|
||||
has_reached_limit = len(all_withdrawals) == withdrawals_limit
|
||||
if has_reached_limit:
|
||||
break
|
||||
|
||||
@@ -5735,24 +5731,24 @@
|
||||
- name: is_valid_indexed_payload_attestation#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="d76e0f89">
|
||||
<spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="cf1e65b5">
|
||||
def is_valid_indexed_payload_attestation(
|
||||
state: BeaconState, attestation: IndexedPayloadAttestation
|
||||
state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation
|
||||
) -> bool:
|
||||
"""
|
||||
Check if ``attestation`` is non-empty, has sorted indices, and has
|
||||
Check if ``indexed_payload_attestation`` is non-empty, has sorted indices, and has
|
||||
a valid aggregate signature.
|
||||
"""
|
||||
# Verify indices are non-empty and sorted
|
||||
indices = attestation.attesting_indices
|
||||
indices = indexed_payload_attestation.attesting_indices
|
||||
if len(indices) == 0 or not indices == sorted(indices):
|
||||
return False
|
||||
|
||||
# Verify aggregate signature
|
||||
pubkeys = [state.validators[i].pubkey for i in indices]
|
||||
domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
|
||||
signing_root = compute_signing_root(attestation.data, domain)
|
||||
return bls.FastAggregateVerify(pubkeys, signing_root, attestation.signature)
|
||||
domain = get_domain(state, DOMAIN_PTC_ATTESTER, None)
|
||||
signing_root = compute_signing_root(indexed_payload_attestation.data, domain)
|
||||
return bls.FastAggregateVerify(pubkeys, signing_root, indexed_payload_attestation.signature)
|
||||
</spec>
|
||||
|
||||
- name: is_valid_light_client_header#altair
|
||||
@@ -6578,15 +6574,13 @@
|
||||
- name: prepare_execution_payload#capella
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="capella" hash="bdb15c3f">
|
||||
<spec fn="prepare_execution_payload" fork="capella" hash="998e8b92">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine,
|
||||
# [Modified in Capella]
|
||||
# Removed `pow_chain`
|
||||
) -> Optional[PayloadId]:
|
||||
# [Modified in Capella]
|
||||
# Removed `is_merge_transition_complete` check
|
||||
@@ -7317,7 +7311,7 @@
|
||||
- name: process_deposit_request#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="process_deposit_request" fork="gloas" hash="3c6b0310">
|
||||
<spec fn="process_deposit_request" fork="gloas" hash="50ffbd27">
|
||||
def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
# [New in Gloas:EIP7732]
|
||||
builder_pubkeys = [b.pubkey for b in state.builders]
|
||||
@@ -7337,7 +7331,6 @@
|
||||
deposit_request.withdrawal_credentials,
|
||||
deposit_request.amount,
|
||||
deposit_request.signature,
|
||||
state.slot,
|
||||
)
|
||||
return
|
||||
|
||||
@@ -7848,7 +7841,7 @@
|
||||
- name: process_execution_payload#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="process_execution_payload" fork="gloas" hash="36bd3af3">
|
||||
<spec fn="process_execution_payload" fork="gloas" hash="98cceb7d">
|
||||
def process_execution_payload(
|
||||
state: BeaconState,
|
||||
# [Modified in Gloas:EIP7732]
|
||||
@@ -7878,6 +7871,7 @@
|
||||
# Verify consistency with the committed bid
|
||||
committed_bid = state.latest_execution_payload_bid
|
||||
assert envelope.builder_index == committed_bid.builder_index
|
||||
assert committed_bid.blob_kzg_commitments_root == hash_tree_root(envelope.blob_kzg_commitments)
|
||||
assert committed_bid.prev_randao == payload.prev_randao
|
||||
|
||||
# Verify consistency with expected withdrawals
|
||||
@@ -7891,11 +7885,14 @@
|
||||
assert payload.parent_hash == state.latest_block_hash
|
||||
# Verify timestamp
|
||||
assert payload.timestamp == compute_time_at_slot(state, state.slot)
|
||||
# Verify commitments are under limit
|
||||
assert (
|
||||
len(envelope.blob_kzg_commitments)
|
||||
<= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
|
||||
)
|
||||
# Verify the execution payload is valid
|
||||
versioned_hashes = [
|
||||
kzg_commitment_to_versioned_hash(commitment)
|
||||
# [Modified in Gloas:EIP7732]
|
||||
for commitment in committed_bid.blob_kzg_commitments
|
||||
kzg_commitment_to_versioned_hash(commitment) for commitment in envelope.blob_kzg_commitments
|
||||
]
|
||||
requests = envelope.execution_requests
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
@@ -7936,7 +7933,7 @@
|
||||
- name: process_execution_payload_bid#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
|
||||
<spec fn="process_execution_payload_bid" fork="gloas" hash="6dc696bb">
|
||||
def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
|
||||
signed_bid = block.body.signed_execution_payload_bid
|
||||
bid = signed_bid.message
|
||||
@@ -7955,12 +7952,6 @@
|
||||
# Verify that the bid signature is valid
|
||||
assert verify_execution_payload_bid_signature(state, signed_bid)
|
||||
|
||||
# Verify commitments are under limit
|
||||
assert (
|
||||
len(bid.blob_kzg_commitments)
|
||||
<= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
|
||||
)
|
||||
|
||||
# Verify that the bid is for the current slot
|
||||
assert bid.slot == block.slot
|
||||
# Verify that the bid is for the right parent block
|
||||
@@ -9535,11 +9526,9 @@
|
||||
- file: beacon-chain/core/validators/validator.go
|
||||
search: func SlashValidator(
|
||||
spec: |
|
||||
<spec fn="slash_validator" fork="phase0" hash="d2b5fafa">
|
||||
<spec fn="slash_validator" fork="phase0" hash="85d8d7c9">
|
||||
def slash_validator(
|
||||
state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: Optional[ValidatorIndex] = None,
|
||||
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
|
||||
) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
@@ -9571,11 +9560,9 @@
|
||||
- file: beacon-chain/core/validators/validator.go
|
||||
search: func SlashValidator(
|
||||
spec: |
|
||||
<spec fn="slash_validator" fork="altair" hash="179ea102">
|
||||
<spec fn="slash_validator" fork="altair" hash="88f6c284">
|
||||
def slash_validator(
|
||||
state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: Optional[ValidatorIndex] = None,
|
||||
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
|
||||
) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
@@ -9607,11 +9594,9 @@
|
||||
- file: beacon-chain/core/validators/validator.go
|
||||
search: func SlashValidator(
|
||||
spec: |
|
||||
<spec fn="slash_validator" fork="bellatrix" hash="5964268e">
|
||||
<spec fn="slash_validator" fork="bellatrix" hash="124f6889">
|
||||
def slash_validator(
|
||||
state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: Optional[ValidatorIndex] = None,
|
||||
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
|
||||
) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
@@ -9643,11 +9628,9 @@
|
||||
- file: beacon-chain/core/validators/validator.go
|
||||
search: func SlashValidator(
|
||||
spec: |
|
||||
<spec fn="slash_validator" fork="electra" hash="07e584e2">
|
||||
<spec fn="slash_validator" fork="electra" hash="54b64d21">
|
||||
def slash_validator(
|
||||
state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: Optional[ValidatorIndex] = None,
|
||||
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
|
||||
) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
@@ -10650,7 +10633,7 @@
|
||||
- name: upgrade_to_gloas#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="upgrade_to_gloas" fork="gloas" hash="6e66df25">
|
||||
<spec fn="upgrade_to_gloas" fork="gloas" hash="855ad3f7">
|
||||
def upgrade_to_gloas(pre: fulu.BeaconState) -> BeaconState:
|
||||
epoch = fulu.get_current_epoch(pre)
|
||||
|
||||
@@ -10719,9 +10702,6 @@
|
||||
payload_expected_withdrawals=[],
|
||||
)
|
||||
|
||||
# [New in Gloas:EIP7732]
|
||||
onboard_builders_from_pending_deposits(post)
|
||||
|
||||
return post
|
||||
</spec>
|
||||
|
||||
@@ -10999,12 +10979,8 @@
|
||||
- name: verify_data_column_sidecar#gloas
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="verify_data_column_sidecar" fork="gloas" hash="71548b68">
|
||||
def verify_data_column_sidecar(
|
||||
sidecar: DataColumnSidecar,
|
||||
# [New in Gloas:EIP7732]
|
||||
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
|
||||
) -> bool:
|
||||
<spec fn="verify_data_column_sidecar" fork="gloas" hash="8838c4fd">
|
||||
def verify_data_column_sidecar(sidecar: DataColumnSidecar) -> bool:
|
||||
"""
|
||||
Verify if the data column sidecar is valid.
|
||||
"""
|
||||
@@ -11012,14 +10988,18 @@
|
||||
if sidecar.index >= NUMBER_OF_COLUMNS:
|
||||
return False
|
||||
|
||||
# [Modified in Gloas:EIP7732]
|
||||
# A sidecar for zero blobs is invalid
|
||||
if len(sidecar.column) == 0:
|
||||
if len(sidecar.kzg_commitments) == 0:
|
||||
return False
|
||||
|
||||
# [Modified in Gloas:EIP7732]
|
||||
# Check that the sidecar respects the blob limit
|
||||
epoch = compute_epoch_at_slot(sidecar.slot)
|
||||
if len(sidecar.kzg_commitments) > get_blob_parameters(epoch).max_blobs_per_block:
|
||||
return False
|
||||
|
||||
# The column length must be equal to the number of commitments/proofs
|
||||
if len(sidecar.column) != len(kzg_commitments) or len(sidecar.column) != len(
|
||||
if len(sidecar.column) != len(sidecar.kzg_commitments) or len(sidecar.column) != len(
|
||||
sidecar.kzg_proofs
|
||||
):
|
||||
return False
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
@@ -19,9 +17,6 @@ import (
|
||||
|
||||
func runExecutionPayloadBidTest(t *testing.T, config string, fork string, objName string, block blockWithSSZObject, sszToState SSZToState, operationFn BlockOperation) {
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
cfg := params.BeaconConfig()
|
||||
params.SetGenesisFork(t, cfg, version.Fulu)
|
||||
|
||||
testFolders, testsFolderPath := utils.TestFolders(t, config, fork, "operations/"+objName+"/pyspec_tests")
|
||||
if len(testFolders) == 0 {
|
||||
t.Fatalf("No test folders found for %s/%s/%s", config, fork, "operations/"+objName+"/pyspec_tests")
|
||||
|
||||
@@ -1568,8 +1568,8 @@ func HydrateExecutionPayloadBid(b *ethpb.ExecutionPayloadBid) *ethpb.ExecutionPa
|
||||
if b.FeeRecipient == nil {
|
||||
b.FeeRecipient = make([]byte, fieldparams.FeeRecipientLength)
|
||||
}
|
||||
if b.BlobKzgCommitments == nil {
|
||||
b.BlobKzgCommitments = make([][]byte, 0)
|
||||
if b.BlobKzgCommitmentsRoot == nil {
|
||||
b.BlobKzgCommitmentsRoot = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
return b
|
||||
}
|
||||
@@ -1636,22 +1636,21 @@ func GenerateTestSignedExecutionPayloadBid(slot primitives.Slot) *ethpb.SignedEx
|
||||
blockHash := bytesutil.PadTo([]byte{0x03}, fieldparams.RootLength)
|
||||
prevRandao := bytesutil.PadTo([]byte{0x04}, fieldparams.RootLength)
|
||||
feeRecipient := bytesutil.PadTo([]byte{0x05}, fieldparams.FeeRecipientLength)
|
||||
blobKzgCommitment := bytesutil.PadTo([]byte{0x06}, fieldparams.BLSPubkeyLength)
|
||||
blobKzgRoot := bytesutil.PadTo([]byte{0x06}, fieldparams.RootLength)
|
||||
signature := bytesutil.PadTo([]byte{0x07}, fieldparams.BLSSignatureLength)
|
||||
|
||||
return ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
Slot: slot,
|
||||
BuilderIndex: 1,
|
||||
ParentBlockHash: parentBlockHash,
|
||||
ParentBlockRoot: parentBlockRoot,
|
||||
BlockHash: blockHash,
|
||||
GasLimit: 30000000,
|
||||
PrevRandao: prevRandao,
|
||||
FeeRecipient: feeRecipient,
|
||||
Value: 1000000,
|
||||
ExecutionPayment: 2000000,
|
||||
BlobKzgCommitments: [][]byte{blobKzgCommitment},
|
||||
Slot: slot,
|
||||
BuilderIndex: 1,
|
||||
ParentBlockHash: parentBlockHash,
|
||||
ParentBlockRoot: parentBlockRoot,
|
||||
BlockHash: blockHash,
|
||||
GasLimit: 30000000,
|
||||
PrevRandao: prevRandao,
|
||||
FeeRecipient: feeRecipient,
|
||||
Value: 1000000,
|
||||
BlobKzgCommitmentsRoot: blobKzgRoot,
|
||||
},
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
@@ -413,7 +413,6 @@ func TestGenerateTestSignedExecutionPayloadBid(t *testing.T) {
|
||||
require.Equal(t, primitives.BuilderIndex(1), bid.Message.BuilderIndex)
|
||||
require.Equal(t, uint64(30000000), bid.Message.GasLimit)
|
||||
require.Equal(t, primitives.Gwei(1000000), bid.Message.Value)
|
||||
require.Equal(t, primitives.Gwei(2000000), bid.Message.ExecutionPayment)
|
||||
|
||||
// Verify fields are populated
|
||||
require.NotNil(t, bid.Message.ParentBlockHash)
|
||||
@@ -421,8 +420,7 @@ func TestGenerateTestSignedExecutionPayloadBid(t *testing.T) {
|
||||
require.NotNil(t, bid.Message.BlockHash)
|
||||
require.NotNil(t, bid.Message.PrevRandao)
|
||||
require.NotNil(t, bid.Message.FeeRecipient)
|
||||
require.NotNil(t, bid.Message.BlobKzgCommitments)
|
||||
require.Equal(t, 1, len(bid.Message.BlobKzgCommitments))
|
||||
require.NotNil(t, bid.Message.BlobKzgCommitmentsRoot)
|
||||
|
||||
// Verify HashTreeRoot works
|
||||
_, err := bid.HashTreeRoot()
|
||||
|
||||
@@ -559,12 +559,12 @@ func NewBeaconStateGloas(options ...func(state *ethpb.BeaconStateGloas) error) (
|
||||
},
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Builders: make([]*ethpb.Builder, 0),
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
|
||||
Reference in New Issue
Block a user