mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-30 23:58:23 -05:00
Compare commits
6 Commits
e2e-debugg
...
move-kzg-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7aa8c86ac5 | ||
|
|
55fe85c887 | ||
|
|
31f77567dd | ||
|
|
a7fdd11777 | ||
|
|
919bd5d6aa | ||
|
|
0476eeda57 |
@@ -519,7 +519,7 @@ type ExecutionPayloadBid struct {
|
||||
Slot string `json:"slot"`
|
||||
Value string `json:"value"`
|
||||
ExecutionPayment string `json:"execution_payment"`
|
||||
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type SignedExecutionPayloadBid struct {
|
||||
|
||||
@@ -2939,6 +2939,10 @@ func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *S
|
||||
}
|
||||
|
||||
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
|
||||
blobKzgCommitments := make([]string, len(b.BlobKzgCommitments))
|
||||
for i := range b.BlobKzgCommitments {
|
||||
blobKzgCommitments[i] = hexutil.Encode(b.BlobKzgCommitments[i])
|
||||
}
|
||||
return &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
|
||||
@@ -2950,7 +2954,7 @@ func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayl
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
Value: fmt.Sprintf("%d", b.Value),
|
||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
|
||||
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3187,9 +3191,17 @@ func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayment")
|
||||
}
|
||||
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
|
||||
err = slice.VerifyMaxLength(b.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
|
||||
return nil, server.NewDecodeError(err, "BlobKzgCommitments")
|
||||
}
|
||||
blobKzgCommitments := make([][]byte, len(b.BlobKzgCommitments))
|
||||
for i, commitment := range b.BlobKzgCommitments {
|
||||
kzg, err := bytesutil.DecodeHexWithLength(commitment, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("BlobKzgCommitments[%d]", i))
|
||||
}
|
||||
blobKzgCommitments[i] = kzg
|
||||
}
|
||||
return ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: parentBlockHash,
|
||||
@@ -3202,7 +3214,7 @@ func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
|
||||
Slot: primitives.Slot(slot),
|
||||
Value: primitives.Gwei(value),
|
||||
ExecutionPayment: primitives.Gwei(executionPayment),
|
||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// Spec v1.7.0-alpha.2 (pseudocode):
|
||||
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
|
||||
//
|
||||
// signed_bid = block.body.signed_execution_payload_bid
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
// assert is_active_builder(state, builder_index)
|
||||
// assert can_builder_cover_bid(state, builder_index, amount)
|
||||
// assert verify_execution_payload_bid_signature(state, signed_bid)
|
||||
// assert len(bid.blob_kzg_commitments) <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
|
||||
// assert bid.slot == block.slot
|
||||
// assert bid.parent_block_hash == state.latest_block_hash
|
||||
// assert bid.parent_block_root == block.parent_root
|
||||
@@ -86,6 +87,11 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
|
||||
}
|
||||
}
|
||||
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(block.Slot()))
|
||||
if len(bid.BlobKzgCommitments()) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("bid has %d blob KZG commitments over max %d", len(bid.BlobKzgCommitments()), maxBlobsPerBlock)
|
||||
}
|
||||
|
||||
if err := validateBidConsistency(st, bid, block); err != nil {
|
||||
return errors.Wrap(err, "bid consistency validation failed")
|
||||
}
|
||||
|
||||
@@ -184,6 +184,28 @@ func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid,
|
||||
return out
|
||||
}
|
||||
|
||||
func blobCommitmentsForSlot(slot primitives.Slot, count int) [][]byte {
|
||||
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
|
||||
if count > max {
|
||||
count = max
|
||||
}
|
||||
commitments := make([][]byte, count)
|
||||
for i := range commitments {
|
||||
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
|
||||
}
|
||||
return commitments
|
||||
}
|
||||
|
||||
func tooManyBlobCommitmentsForSlot(slot primitives.Slot) [][]byte {
|
||||
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
|
||||
count := max + 1
|
||||
commitments := make([][]byte, count)
|
||||
for i := range commitments {
|
||||
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
|
||||
}
|
||||
return commitments
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
||||
slot := primitives.Slot(12)
|
||||
proposerIdx := primitives.ValidatorIndex(0)
|
||||
@@ -203,7 +225,7 @@ func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
||||
Slot: slot,
|
||||
Value: 0,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
@@ -244,7 +266,7 @@ func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
@@ -289,7 +311,7 @@ func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
|
||||
Slot: slot,
|
||||
Value: 500_000,
|
||||
ExecutionPayment: 1,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
|
||||
@@ -350,7 +372,7 @@ func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
@@ -403,7 +425,7 @@ func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
|
||||
Slot: slot,
|
||||
Value: 25,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
@@ -445,7 +467,7 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
// Use an invalid signature.
|
||||
@@ -463,6 +485,42 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
|
||||
require.ErrorContains(t, "bid signature validation failed", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_TooManyBlobCommitments(t *testing.T) {
|
||||
slot := primitives.Slot(9)
|
||||
proposerIdx := primitives.ValidatorIndex(0)
|
||||
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
pubKey := [48]byte{}
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
BlobKzgCommitments: tooManyBlobCommitmentsForSlot(slot),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
}
|
||||
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err := ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "blob KZG commitments over max", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(10)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
@@ -487,7 +545,7 @@ func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
||||
Slot: slot + 1, // mismatch
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
@@ -529,7 +587,7 @@ func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
@@ -572,7 +630,7 @@ func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
@@ -614,7 +672,7 @@ func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
|
||||
@@ -114,17 +114,32 @@ func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
|
||||
}
|
||||
|
||||
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
||||
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
|
||||
selected := make([]primitives.ValidatorIndex, 0, fieldparams.PTCSize)
|
||||
var i uint64
|
||||
for uint64(len(selected)) < fieldparams.PTCSize {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
for committeeIndex := primitives.CommitteeIndex(0); committeeIndex < primitives.CommitteeIndex(committeesPerSlot); committeeIndex++ {
|
||||
if uint64(len(selected)) >= fieldparams.PTCSize {
|
||||
break
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, committeeIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get beacon committee %d", committeeIndex)
|
||||
}
|
||||
|
||||
selected, i, err = selectByBalanceFill(ctx, st, committee, seed, selected, i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to sample beacon committee %d", committeeIndex)
|
||||
}
|
||||
}
|
||||
out = append(out, committee...)
|
||||
}
|
||||
|
||||
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
|
||||
return selected, nil
|
||||
}
|
||||
|
||||
// ptcSeed computes the seed for the payload timeliness committee.
|
||||
@@ -148,33 +163,39 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
|
||||
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
||||
// selected.append(indices[next])
|
||||
// i += 1
|
||||
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
|
||||
if len(candidates) == 0 {
|
||||
return nil, errors.New("no candidates for balance weighted selection")
|
||||
}
|
||||
|
||||
func selectByBalanceFill(
|
||||
ctx context.Context,
|
||||
st state.ReadOnlyBeaconState,
|
||||
candidates []primitives.ValidatorIndex,
|
||||
seed [32]byte,
|
||||
selected []primitives.ValidatorIndex,
|
||||
i uint64,
|
||||
) ([]primitives.ValidatorIndex, uint64, error) {
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
|
||||
var buf [40]byte
|
||||
copy(buf[:], seed[:])
|
||||
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
|
||||
selected := make([]primitives.ValidatorIndex, 0, count)
|
||||
total := uint64(len(candidates))
|
||||
for i := uint64(0); uint64(len(selected)) < count; i++ {
|
||||
for _, idx := range candidates {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
return nil, i, ctx.Err()
|
||||
}
|
||||
idx := candidates[i%total]
|
||||
|
||||
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, i, err
|
||||
}
|
||||
if ok {
|
||||
selected = append(selected, idx)
|
||||
}
|
||||
if uint64(len(selected)) == fieldparams.PTCSize {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
return selected, nil
|
||||
|
||||
return selected, i, nil
|
||||
}
|
||||
|
||||
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
||||
|
||||
@@ -78,7 +78,7 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
|
||||
@@ -67,6 +67,7 @@ func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
||||
return subscribed
|
||||
}
|
||||
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
|
||||
@@ -575,7 +575,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".PublishBlockV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlockV2,
|
||||
@@ -586,7 +586,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".PublishBlindedBlockV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlindedBlockV2,
|
||||
|
||||
@@ -26,8 +26,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
|
||||
@@ -48,6 +48,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//reflection:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
|
||||
@@ -35,18 +35,19 @@ import (
|
||||
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
|
||||
// version information, and services the node implements and runs.
|
||||
type Server struct {
|
||||
LogsStreamer logs.Streamer
|
||||
StreamLogsBufferSize int
|
||||
SyncChecker sync.Checker
|
||||
Server *grpc.Server
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
POWChainInfoFetcher execution.ChainInfoFetcher
|
||||
BeaconMonitoringHost string
|
||||
BeaconMonitoringPort int
|
||||
LogsStreamer logs.Streamer
|
||||
StreamLogsBufferSize int
|
||||
SyncChecker sync.Checker
|
||||
Server *grpc.Server
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
POWChainInfoFetcher execution.ChainInfoFetcher
|
||||
BeaconMonitoringHost string
|
||||
BeaconMonitoringPort int
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
@@ -61,21 +62,28 @@ func (ns *Server) GetHealth(ctx context.Context, request *ethpb.HealthRequest) (
|
||||
ctx, cancel := context.WithTimeout(ctx, timeoutDuration)
|
||||
defer cancel() // Important to avoid a context leak
|
||||
|
||||
if ns.SyncChecker.Synced() {
|
||||
// Check optimistic status - validators should not participate when optimistic
|
||||
isOptimistic, err := ns.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
|
||||
}
|
||||
|
||||
if ns.SyncChecker.Synced() && !isOptimistic {
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
if ns.SyncChecker.Syncing() || ns.SyncChecker.Initialized() {
|
||||
if request.SyncingStatus != 0 {
|
||||
// override the 200 success with the provided request status
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(request.SyncingStatus, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
// Set header for REST API clients (via gRPC-gateway)
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
return &empty.Empty{}, status.Error(codes.Unavailable, "node is syncing")
|
||||
}
|
||||
if isOptimistic {
|
||||
// Set header for REST API clients (via gRPC-gateway)
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, status.Error(codes.Unavailable, "node is optimistic")
|
||||
}
|
||||
return &empty.Empty{}, status.Errorf(codes.Unavailable, "service unavailable")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package node
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"maps"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
@@ -187,32 +189,71 @@ func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) {
|
||||
assert.Equal(t, errStr, res.CurrentConnectionError)
|
||||
}
|
||||
|
||||
// mockServerTransportStream implements grpc.ServerTransportStream for testing
|
||||
type mockServerTransportStream struct {
|
||||
headers map[string][]string
|
||||
}
|
||||
|
||||
func (m *mockServerTransportStream) Method() string { return "" }
|
||||
func (m *mockServerTransportStream) SetHeader(md metadata.MD) error {
|
||||
maps.Copy(m.headers, md)
|
||||
return nil
|
||||
}
|
||||
func (m *mockServerTransportStream) SendHeader(metadata.MD) error { return nil }
|
||||
func (m *mockServerTransportStream) SetTrailer(metadata.MD) error { return nil }
|
||||
|
||||
func TestNodeServer_GetHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input *mockSync.Sync
|
||||
customStatus uint64
|
||||
isOptimistic bool
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||
name: "happy path - synced and not optimistic",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||
isOptimistic: false,
|
||||
},
|
||||
{
|
||||
name: "syncing",
|
||||
input: &mockSync.Sync{IsSyncing: false},
|
||||
wantedErr: "service unavailable",
|
||||
name: "returns error when not synced and not syncing",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: false},
|
||||
isOptimistic: false,
|
||||
wantedErr: "service unavailable",
|
||||
},
|
||||
{
|
||||
name: "returns error when syncing",
|
||||
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
|
||||
isOptimistic: false,
|
||||
wantedErr: "node is syncing",
|
||||
},
|
||||
{
|
||||
name: "returns error when synced but optimistic",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||
isOptimistic: true,
|
||||
wantedErr: "node is optimistic",
|
||||
},
|
||||
{
|
||||
name: "returns error when syncing and optimistic",
|
||||
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
|
||||
isOptimistic: true,
|
||||
wantedErr: "node is syncing",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
server := grpc.NewServer()
|
||||
ns := &Server{
|
||||
SyncChecker: tt.input,
|
||||
SyncChecker: tt.input,
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: tt.isOptimistic},
|
||||
}
|
||||
ethpb.RegisterNodeServer(server, ns)
|
||||
reflection.Register(server)
|
||||
_, err := ns.GetHealth(t.Context(), ðpb.HealthRequest{SyncingStatus: tt.customStatus})
|
||||
|
||||
// Create context with mock transport stream so grpc.SetHeader works
|
||||
stream := &mockServerTransportStream{headers: make(map[string][]string)}
|
||||
ctx := grpc.NewContextWithServerTransportStream(t.Context(), stream)
|
||||
|
||||
_, err := ns.GetHealth(ctx, ðpb.HealthRequest{})
|
||||
if tt.wantedErr == "" {
|
||||
require.NoError(t, err)
|
||||
return
|
||||
|
||||
@@ -259,18 +259,19 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
}
|
||||
s.validatorServer = validatorServer
|
||||
nodeServer := &nodev1alpha1.Server{
|
||||
LogsStreamer: logs.NewStreamServer(),
|
||||
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
GenesisFetcher: s.cfg.GenesisFetcher,
|
||||
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||
LogsStreamer: logs.NewStreamServer(),
|
||||
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
GenesisFetcher: s.cfg.GenesisFetcher,
|
||||
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
}
|
||||
beaconChainServer := &beaconv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
)
|
||||
@@ -82,7 +83,7 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
|
||||
parentBlockRoot := h.ParentBlockRoot()
|
||||
blockHash := h.BlockHash()
|
||||
randao := h.PrevRandao()
|
||||
blobKzgCommitmentsRoot := h.BlobKzgCommitmentsRoot()
|
||||
blobKzgCommitments := bytesutil.SafeCopy2dBytes(h.BlobKzgCommitments())
|
||||
feeRecipient := h.FeeRecipient()
|
||||
b.latestExecutionPayloadBid = ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
@@ -94,7 +95,7 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
|
||||
Slot: h.Slot(),
|
||||
Value: h.Value(),
|
||||
ExecutionPayment: h.ExecutionPayment(),
|
||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot[:],
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
FeeRecipient: feeRecipient[:],
|
||||
}
|
||||
b.markFieldAsDirty(types.LatestExecutionPayloadBid)
|
||||
|
||||
@@ -18,7 +18,7 @@ type testExecutionPayloadBid struct {
|
||||
parentBlockRoot [32]byte
|
||||
blockHash [32]byte
|
||||
prevRandao [32]byte
|
||||
blobKzgCommitmentsRoot [32]byte
|
||||
blobKzgCommitments [][]byte
|
||||
feeRecipient [20]byte
|
||||
gasLimit uint64
|
||||
builderIndex primitives.BuilderIndex
|
||||
@@ -40,9 +40,9 @@ func (t testExecutionPayloadBid) Value() primitives.Gwei { return t.value }
|
||||
func (t testExecutionPayloadBid) ExecutionPayment() primitives.Gwei {
|
||||
return t.executionPayment
|
||||
}
|
||||
func (t testExecutionPayloadBid) BlobKzgCommitmentsRoot() [32]byte { return t.blobKzgCommitmentsRoot }
|
||||
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
|
||||
func (t testExecutionPayloadBid) IsNil() bool { return false }
|
||||
func (t testExecutionPayloadBid) BlobKzgCommitments() [][]byte { return t.blobKzgCommitments }
|
||||
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
|
||||
func (t testExecutionPayloadBid) IsNil() bool { return false }
|
||||
|
||||
func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
@@ -57,7 +57,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
parentBlockRoot = [32]byte(bytes.Repeat([]byte{0xCD}, 32))
|
||||
blockHash = [32]byte(bytes.Repeat([]byte{0xEF}, 32))
|
||||
prevRandao = [32]byte(bytes.Repeat([]byte{0x11}, 32))
|
||||
blobRoot = [32]byte(bytes.Repeat([]byte{0x22}, 32))
|
||||
blobCommitments = [][]byte{bytes.Repeat([]byte{0x22}, 48)}
|
||||
feeRecipient [20]byte
|
||||
)
|
||||
copy(feeRecipient[:], bytes.Repeat([]byte{0x33}, len(feeRecipient)))
|
||||
@@ -70,7 +70,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
parentBlockRoot: parentBlockRoot,
|
||||
blockHash: blockHash,
|
||||
prevRandao: prevRandao,
|
||||
blobKzgCommitmentsRoot: blobRoot,
|
||||
blobKzgCommitments: blobCommitments,
|
||||
feeRecipient: feeRecipient,
|
||||
gasLimit: 123,
|
||||
builderIndex: 7,
|
||||
@@ -86,7 +86,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
require.DeepEqual(t, parentBlockRoot[:], st.latestExecutionPayloadBid.ParentBlockRoot)
|
||||
require.DeepEqual(t, blockHash[:], st.latestExecutionPayloadBid.BlockHash)
|
||||
require.DeepEqual(t, prevRandao[:], st.latestExecutionPayloadBid.PrevRandao)
|
||||
require.DeepEqual(t, blobRoot[:], st.latestExecutionPayloadBid.BlobKzgCommitmentsRoot)
|
||||
require.DeepEqual(t, blobCommitments, st.latestExecutionPayloadBid.BlobKzgCommitments)
|
||||
require.DeepEqual(t, feeRecipient[:], st.latestExecutionPayloadBid.FeeRecipient)
|
||||
require.Equal(t, uint64(123), st.latestExecutionPayloadBid.GasLimit)
|
||||
require.Equal(t, primitives.BuilderIndex(7), st.latestExecutionPayloadBid.BuilderIndex)
|
||||
|
||||
@@ -1027,10 +1027,10 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
||||
sc: signatureCache,
|
||||
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:], // Same as parent
|
||||
headSlot: 32, // Epoch 1
|
||||
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
||||
headStateReadOnly: nil, // Should not use ReadOnly path
|
||||
headRoot: parentRoot[:], // Same as parent
|
||||
headSlot: 32, // Epoch 1
|
||||
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
||||
headStateReadOnly: nil, // Should not use ReadOnly path
|
||||
},
|
||||
fc: &mockForkchoicer{
|
||||
// Return same root for both to simulate same chain
|
||||
@@ -1045,8 +1045,8 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
||||
// Wrap to detect HeadState call
|
||||
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
|
||||
wrappedHsp := &mockHeadStateProvider{
|
||||
headRoot: originalHsp.headRoot,
|
||||
headSlot: originalHsp.headSlot,
|
||||
headRoot: originalHsp.headRoot,
|
||||
headSlot: originalHsp.headSlot,
|
||||
headState: originalHsp.headState,
|
||||
}
|
||||
initializer.shared.hsp = &headStateCallTracker{
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
### Added
|
||||
|
||||
- Added new proofCollector type to ssz-query
|
||||
|
||||
### Ignored
|
||||
- Added testing covering the production of Merkle proof from Phase0 beacon state and benchmarked against real Hoodi beacon state (Fulu version)
|
||||
3
changelog/james-prysm_update-health-endpoint.md
Normal file
3
changelog/james-prysm_update-health-endpoint.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- gRPC health endpoint will now return an error on syncing or optimistic status showing that it's unavailable.
|
||||
3
changelog/jtraglia-add-specrefs-readme.md
Normal file
3
changelog/jtraglia-add-specrefs-readme.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Added README for maintaining specrefs.
|
||||
3
changelog/jtraglia-nightly-reftests-with-run-id.md
Normal file
3
changelog/jtraglia-nightly-reftests-with-run-id.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- The ability to download the nightly reference tests from a specific day.
|
||||
2
changelog/terencechain_gloas-ptc-sampling.md
Normal file
2
changelog/terencechain_gloas-ptc-sampling.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Sample PTC per committee to reduce allocations.
|
||||
3
changelog/tt_move-kzg-commitments.md
Normal file
3
changelog/tt_move-kzg-commitments.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Moved blob KZG commitments into `ExecutionPayloadBid` and removed them from `ExecutionPayloadEnvelope` for Gloas.
|
||||
@@ -671,7 +671,7 @@ func hydrateBeaconBlockBodyGloas() *eth.BeaconBlockBodyGloas {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, fieldparams.RootLength),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, fieldparams.BLSPubkeyLength)},
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
|
||||
@@ -43,11 +43,16 @@ func (h executionPayloadBidGloas) IsNil() bool {
|
||||
len(h.payload.ParentBlockRoot) != 32 ||
|
||||
len(h.payload.BlockHash) != 32 ||
|
||||
len(h.payload.PrevRandao) != 32 ||
|
||||
len(h.payload.BlobKzgCommitmentsRoot) != 32 ||
|
||||
len(h.payload.FeeRecipient) != 20 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, commitment := range h.payload.BlobKzgCommitments {
|
||||
if len(commitment) != 48 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -131,9 +136,9 @@ func (h executionPayloadBidGloas) ExecutionPayment() primitives.Gwei {
|
||||
return primitives.Gwei(h.payload.ExecutionPayment)
|
||||
}
|
||||
|
||||
// BlobKzgCommitmentsRoot returns the root of the KZG commitments for blobs.
|
||||
func (h executionPayloadBidGloas) BlobKzgCommitmentsRoot() [32]byte {
|
||||
return [32]byte(h.payload.BlobKzgCommitmentsRoot)
|
||||
// BlobKzgCommitments returns the KZG commitments for blobs.
|
||||
func (h executionPayloadBidGloas) BlobKzgCommitments() [][]byte {
|
||||
return h.payload.BlobKzgCommitments
|
||||
}
|
||||
|
||||
// FeeRecipient returns the execution address that will receive the builder payment.
|
||||
|
||||
@@ -24,7 +24,7 @@ func validExecutionPayloadBid() *ethpb.ExecutionPayloadBid {
|
||||
Slot: 6,
|
||||
Value: 7,
|
||||
ExecutionPayment: 8,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
|
||||
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x05}, 48)},
|
||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||
}
|
||||
}
|
||||
@@ -52,8 +52,8 @@ func TestWrappedROExecutionPayloadBid(t *testing.T) {
|
||||
mutate: func(b *ethpb.ExecutionPayloadBid) { b.PrevRandao = []byte{0x04} },
|
||||
},
|
||||
{
|
||||
name: "blob kzg commitments root",
|
||||
mutate: func(b *ethpb.ExecutionPayloadBid) { b.BlobKzgCommitmentsRoot = []byte{0x05} },
|
||||
name: "blob kzg commitments length",
|
||||
mutate: func(b *ethpb.ExecutionPayloadBid) { b.BlobKzgCommitments = [][]byte{[]byte{0x05}} },
|
||||
},
|
||||
{
|
||||
name: "fee recipient",
|
||||
@@ -85,7 +85,7 @@ func TestWrappedROExecutionPayloadBid(t *testing.T) {
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x02}, 32)), wrapped.ParentBlockRoot())
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x03}, 32)), wrapped.BlockHash())
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x04}, 32)), wrapped.PrevRandao())
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x05}, 32)), wrapped.BlobKzgCommitmentsRoot())
|
||||
assert.DeepEqual(t, [][]byte{bytes.Repeat([]byte{0x05}, 48)}, wrapped.BlobKzgCommitments())
|
||||
assert.DeepEqual(t, [20]byte(bytes.Repeat([]byte{0x06}, 20)), wrapped.FeeRecipient())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ type ROExecutionPayloadBid interface {
|
||||
Slot() primitives.Slot
|
||||
Value() primitives.Gwei
|
||||
ExecutionPayment() primitives.Gwei
|
||||
BlobKzgCommitmentsRoot() [32]byte
|
||||
BlobKzgCommitments() [][]byte
|
||||
FeeRecipient() [20]byte
|
||||
IsNil() bool
|
||||
}
|
||||
|
||||
@@ -163,3 +163,18 @@ func Uint256ToSSZBytes(num string) ([]byte, error) {
|
||||
}
|
||||
return PadTo(ReverseByteOrder(uint256.Bytes()), 32), nil
|
||||
}
|
||||
|
||||
// PutLittleEndian writes an unsigned integer value in little-endian format.
|
||||
// Supports sizes 1, 2, 4, or 8 bytes for uint8/16/32/64 respectively.
|
||||
func PutLittleEndian(dst []byte, val uint64, size int) {
|
||||
switch size {
|
||||
case 1:
|
||||
dst[0] = byte(val)
|
||||
case 2:
|
||||
binary.LittleEndian.PutUint16(dst, uint16(val))
|
||||
case 4:
|
||||
binary.LittleEndian.PutUint32(dst, uint32(val))
|
||||
case 8:
|
||||
binary.LittleEndian.PutUint64(dst, val)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,9 @@ go_library(
|
||||
"container.go",
|
||||
"generalized_index.go",
|
||||
"list.go",
|
||||
"merkle_proof.go",
|
||||
"path.go",
|
||||
"proof_collector.go",
|
||||
"query.go",
|
||||
"ssz_info.go",
|
||||
"ssz_object.go",
|
||||
@@ -20,7 +22,12 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/encoding/ssz/query",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -29,15 +36,24 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"generalized_index_test.go",
|
||||
"merkle_proof_test.go",
|
||||
"path_test.go",
|
||||
"proof_collector_test.go",
|
||||
"query_test.go",
|
||||
"tag_parser_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//encoding/ssz/query/testutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/ssz_query/testing:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
34
encoding/ssz/query/merkle_proof.go
Normal file
34
encoding/ssz/query/merkle_proof.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
// Prove is the entrypoint to generate an SSZ Merkle proof for the given generalized index.
|
||||
// Parameters:
|
||||
// - gindex: the generalized index of the node to prove inclusion for.
|
||||
// Returns:
|
||||
// - fastssz.Proof: the Merkle proof containing the leaf, index, and sibling hashes.
|
||||
// - error: any error encountered during proof generation.
|
||||
func (info *SszInfo) Prove(gindex uint64) (*fastssz.Proof, error) {
|
||||
if info == nil {
|
||||
return nil, fmt.Errorf("nil SszInfo")
|
||||
}
|
||||
|
||||
collector := newProofCollector()
|
||||
collector.addTarget(gindex)
|
||||
|
||||
// info.source is guaranteed to be valid and dereferenced by AnalyzeObject
|
||||
v := reflect.ValueOf(info.source).Elem()
|
||||
|
||||
// Start the merkleization and proof collection process.
|
||||
// In SSZ generalized indices, the root is always at index 1.
|
||||
if _, err := collector.merkleize(info, v, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return collector.toProof()
|
||||
}
|
||||
163
encoding/ssz/query/merkle_proof_test.go
Normal file
163
encoding/ssz/query/merkle_proof_test.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package query_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz/query"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
func TestProve_FixedTestContainer(t *testing.T) {
|
||||
obj := createFixedTestContainer()
|
||||
|
||||
tests := []string{
|
||||
".field_uint32",
|
||||
".nested.value2",
|
||||
".vector_field[3]",
|
||||
".bitvector64_field",
|
||||
".trailing_field",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, obj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProve_VariableTestContainer(t *testing.T) {
|
||||
obj := createVariableTestContainer()
|
||||
|
||||
tests := []string{
|
||||
".leading_field",
|
||||
".field_list_uint64[2]",
|
||||
"len(field_list_uint64)",
|
||||
".nested.nested_list_field[1]",
|
||||
".variable_container_list[0].inner_1.field_list_uint64[1]",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, obj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProve_BeaconBlock(t *testing.T) {
|
||||
randaoReveal := make([]byte, 96)
|
||||
for i := range randaoReveal {
|
||||
randaoReveal[i] = 0x42
|
||||
}
|
||||
root32 := make([]byte, 32)
|
||||
for i := range root32 {
|
||||
root32[i] = 0x24
|
||||
}
|
||||
sig := make([]byte, 96)
|
||||
for i := range sig {
|
||||
sig[i] = 0x99
|
||||
}
|
||||
|
||||
att := ð.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: root32,
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root32,
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root32,
|
||||
},
|
||||
},
|
||||
Signature: sig,
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 123
|
||||
b.Block.Body.RandaoReveal = randaoReveal
|
||||
b.Block.Body.Attestations = []*eth.Attestation{att}
|
||||
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
protoBlock, err := sb.Block().Proto()
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, ok := protoBlock.(query.SSZObject)
|
||||
require.Equal(t, true, ok, "block proto does not implement query.SSZObject")
|
||||
|
||||
tests := []string{
|
||||
".slot",
|
||||
".body.randao_reveal",
|
||||
".body.attestations[0].data.slot",
|
||||
"len(body.attestations)",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, obj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProve_BeaconState(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisState(t, 16)
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(42)))
|
||||
|
||||
sszObj, ok := st.ToProtoUnsafe().(query.SSZObject)
|
||||
require.Equal(t, true, ok, "state proto does not implement query.SSZObject")
|
||||
|
||||
tests := []string{
|
||||
".slot",
|
||||
".latest_block_header",
|
||||
".validators[0].effective_balance",
|
||||
"len(validators)",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, sszObj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// proveAndVerify helper to analyze an object, generate a merkle proof for the given path,
|
||||
// and verify the proof against the object's root.
|
||||
func proveAndVerify(t *testing.T, obj query.SSZObject, pathStr string) {
|
||||
t.Helper()
|
||||
|
||||
info, err := query.AnalyzeObject(obj)
|
||||
require.NoError(t, err)
|
||||
|
||||
path, err := query.ParsePath(pathStr)
|
||||
require.NoError(t, err)
|
||||
|
||||
gi, err := query.GetGeneralizedIndexFromPath(info, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
proof, err := info.Prove(gi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(gi), proof.Index)
|
||||
|
||||
root, err := obj.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ok, err := ssz.VerifyProof(root[:], proof)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok, "merkle proof verification failed")
|
||||
|
||||
require.Equal(t, 32, len(proof.Leaf))
|
||||
for i, h := range proof.Hashes {
|
||||
require.Equal(t, 32, len(h), "proof hash %d is not 32 bytes", i)
|
||||
}
|
||||
|
||||
}
|
||||
672
encoding/ssz/query/proof_collector.go
Normal file
672
encoding/ssz/query/proof_collector.go
Normal file
@@ -0,0 +1,672 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ssz "github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
"github.com/OffchainLabs/prysm/v7/math"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
// proofCollector collects sibling hashes and leaves needed for Merkle proofs.
|
||||
//
|
||||
// Multiproof-ready design:
|
||||
// - requiredSiblings/requiredLeaves store which gindices we want to collect (registered before merkleization).
|
||||
// - siblings/leaves store the actual collected hashes.
|
||||
//
|
||||
// Concurrency:
|
||||
// - required* maps are read-only during merkleization.
|
||||
// - siblings/leaves writes are protected by mutex.
|
||||
type proofCollector struct {
|
||||
sync.Mutex
|
||||
|
||||
// Required gindices (registered before merkleization)
|
||||
requiredSiblings map[uint64]struct{}
|
||||
requiredLeaves map[uint64]struct{}
|
||||
|
||||
// Collected hashes
|
||||
siblings map[uint64][32]byte
|
||||
leaves map[uint64][32]byte
|
||||
}
|
||||
|
||||
func newProofCollector() *proofCollector {
|
||||
return &proofCollector{
|
||||
requiredSiblings: make(map[uint64]struct{}),
|
||||
requiredLeaves: make(map[uint64]struct{}),
|
||||
siblings: make(map[uint64][32]byte),
|
||||
leaves: make(map[uint64][32]byte),
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *proofCollector) reset() {
|
||||
pc.Lock()
|
||||
defer pc.Unlock()
|
||||
|
||||
pc.requiredSiblings = make(map[uint64]struct{})
|
||||
pc.requiredLeaves = make(map[uint64]struct{})
|
||||
pc.siblings = make(map[uint64][32]byte)
|
||||
pc.leaves = make(map[uint64][32]byte)
|
||||
}
|
||||
|
||||
// addTarget register the target leaf and its required sibling nodes for proof construction.
|
||||
// Registration should happen before merkleization begins.
|
||||
func (pc *proofCollector) addTarget(gindex uint64) {
|
||||
pc.Lock()
|
||||
defer pc.Unlock()
|
||||
|
||||
pc.requiredLeaves[gindex] = struct{}{}
|
||||
|
||||
// Walk from the target leaf up to (but not including) the root (gindex=1).
|
||||
// At each step, register the sibling node required to prove inclusion.
|
||||
nodeGindex := gindex
|
||||
for nodeGindex > 1 {
|
||||
siblingGindex := nodeGindex ^ 1 // flip the last bit: left<->right sibling
|
||||
pc.requiredSiblings[siblingGindex] = struct{}{}
|
||||
|
||||
// Move to parent
|
||||
nodeGindex /= 2
|
||||
}
|
||||
}
|
||||
|
||||
// toProof converts the collected siblings and leaves into a fastssz.Proof structure.
|
||||
// Current behavior expects a single target leaf (single proof).
|
||||
func (pc *proofCollector) toProof() (*fastssz.Proof, error) {
|
||||
pc.Lock()
|
||||
defer pc.Unlock()
|
||||
|
||||
proof := &fastssz.Proof{}
|
||||
if len(pc.leaves) == 0 {
|
||||
return nil, errors.New("no leaves collected: add target leaves before merkleization")
|
||||
}
|
||||
|
||||
leafGindices := make([]uint64, 0, len(pc.leaves))
|
||||
for g := range pc.leaves {
|
||||
leafGindices = append(leafGindices, g)
|
||||
}
|
||||
slices.Sort(leafGindices)
|
||||
|
||||
// single proof resides in leafGindices[0]
|
||||
targetGindex := leafGindices[0]
|
||||
proofIndex, err := math.Int(targetGindex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gindex %d overflows int: %w", targetGindex, err)
|
||||
}
|
||||
proof.Index = proofIndex
|
||||
|
||||
// store the leaf
|
||||
leaf := pc.leaves[targetGindex]
|
||||
leafBuf := make([]byte, 32)
|
||||
copy(leafBuf, leaf[:])
|
||||
proof.Leaf = leafBuf
|
||||
|
||||
// Walk from target up to root, collecting siblings.
|
||||
steps := bits.Len64(targetGindex) - 1
|
||||
proof.Hashes = make([][]byte, 0, steps)
|
||||
|
||||
for targetGindex > 1 {
|
||||
sib := targetGindex ^ 1
|
||||
h, ok := pc.siblings[sib]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing sibling hash for gindex %d", sib)
|
||||
}
|
||||
proof.Hashes = append(proof.Hashes, h[:])
|
||||
targetGindex /= 2
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
// collectLeaf checks if the given gindex is a required leaf for the proof,
|
||||
// and if so, stores the provided leaf hash in the collector.
|
||||
func (pc *proofCollector) collectLeaf(gindex uint64, leaf [32]byte) {
|
||||
if _, ok := pc.requiredLeaves[gindex]; !ok {
|
||||
return
|
||||
}
|
||||
pc.Lock()
|
||||
pc.leaves[gindex] = leaf
|
||||
pc.Unlock()
|
||||
}
|
||||
|
||||
// collectSibling stores the hash for a sibling node identified by gindex.
|
||||
// It only stores the hash if gindex was pre-registered via addTarget (present in requiredSiblings).
|
||||
// Writes to the collected siblings map are protected by the collector mutex.
|
||||
func (pc *proofCollector) collectSibling(gindex uint64, hash [32]byte) {
|
||||
if _, ok := pc.requiredSiblings[gindex]; !ok {
|
||||
return
|
||||
}
|
||||
pc.Lock()
|
||||
pc.siblings[gindex] = hash
|
||||
pc.Unlock()
|
||||
}
|
||||
|
||||
// Merkleizers and proof collection methods
|
||||
|
||||
// merkleize recursively traverses an SSZ info and computes the Merkle root of the subtree.
|
||||
//
|
||||
// Proof collection:
|
||||
// - During traversal it calls collectLeaf/collectSibling with the SSZ generalized indices (gindices)
|
||||
// of visited nodes.
|
||||
// - The collector only stores hashes for gindices that were pre-registered via addTarget
|
||||
// (requiredLeaves/requiredSiblings). This makes the traversal multiproof-ready: you can register
|
||||
// multiple targets before calling merkleize.
|
||||
//
|
||||
// SSZ types handled: basic types, containers, lists, vectors, bitlists, and bitvectors.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the current value.
|
||||
// - v: reflect.Value of the current value.
|
||||
// - currentGindex: generalized index of the current subtree root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the current subtree.
|
||||
// - error: any error encountered during traversal/merkleization.
|
||||
func (pc *proofCollector) merkleize(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
if info.sszType.isBasic() {
|
||||
return pc.merkleizeBasicType(info.sszType, v, currentGindex)
|
||||
}
|
||||
switch info.sszType {
|
||||
case Container:
|
||||
return pc.merkleizeContainer(info, v, currentGindex)
|
||||
case List:
|
||||
return pc.merkleizeList(info, v, currentGindex)
|
||||
case Vector:
|
||||
return pc.merkleizeVector(info, v, currentGindex)
|
||||
case Bitlist:
|
||||
return pc.merkleizeBitlist(info, v, currentGindex)
|
||||
case Bitvector:
|
||||
return pc.merkleizeBitvector(info, v, currentGindex)
|
||||
default:
|
||||
return [32]byte{}, fmt.Errorf("unsupported SSZ type: %v", info.sszType)
|
||||
}
|
||||
}
|
||||
|
||||
// merkleizeBasicType serializes a basic SSZ value into a 32-byte leaf chunk (little-endian, zero-padded).
|
||||
//
|
||||
// Proof collection:
|
||||
// - It calls collectLeaf(currentGindex, leaf) and stores the leaf if currentGindex was pre-registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - t: the SSZType (basic).
|
||||
// - v: the reflect.Value of the basic value.
|
||||
// - currentGindex: the generalized index (gindex) of this leaf.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: the 32-byte SSZ leaf chunk.
|
||||
// - error: if the SSZType is not a supported basic type.
|
||||
func (pc *proofCollector) merkleizeBasicType(t SSZType, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
var leaf [32]byte
|
||||
|
||||
// Serialize the value into a 32-byte chunk (little-endian, zero-padded)
|
||||
switch t {
|
||||
case Uint8:
|
||||
leaf[0] = uint8(v.Uint())
|
||||
case Uint16:
|
||||
binary.LittleEndian.PutUint16(leaf[:2], uint16(v.Uint()))
|
||||
case Uint32:
|
||||
binary.LittleEndian.PutUint32(leaf[:4], uint32(v.Uint()))
|
||||
case Uint64:
|
||||
binary.LittleEndian.PutUint64(leaf[:8], v.Uint())
|
||||
case Boolean:
|
||||
if v.Bool() {
|
||||
leaf[0] = 1
|
||||
}
|
||||
default:
|
||||
return [32]byte{}, fmt.Errorf("unexpected basic type: %v", t)
|
||||
}
|
||||
|
||||
pc.collectLeaf(currentGindex, leaf)
|
||||
|
||||
return leaf, nil
|
||||
}
|
||||
|
||||
// merkleizeContainer computes the Merkle root of an SSZ container by:
|
||||
// 1. Merkleizing each field into a 32-byte subtree root
|
||||
// 2. Merkleizing the field roots into the container root (padding to the next power-of-2)
|
||||
//
|
||||
// Generalized indices (gindices): depth = ssz.Depth(uint64(N)) and field i has gindex = (currentGindex << depth) + uint64(i).
|
||||
// Proof collection: merkleize() computes each field root, merkleizeVectorAndCollect collects required siblings, and collectLeaf stores the container root if registered.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the container.
|
||||
// - v: reflect.Value of the container value.
|
||||
// - currentGindex: generalized index (gindex) of the container root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the container.
|
||||
// - error: any error encountered while merkleizing fields.
|
||||
func (pc *proofCollector) merkleizeContainer(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
// If the container root itself is the target, compute directly and return early.
|
||||
// This avoids full subtree merkleization when we only need the root.
|
||||
if _, ok := pc.requiredLeaves[currentGindex]; ok {
|
||||
root, err := info.HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
return root, nil
|
||||
}
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
v = dereferencePointer(v)
|
||||
|
||||
// Calculate depth: how many levels from container root to field leaves
|
||||
numFields := len(ci.order)
|
||||
depth := ssz.Depth(uint64(numFields))
|
||||
|
||||
// Step 1: Compute HTR for each subtree (field)
|
||||
fieldRoots := make([][32]byte, numFields)
|
||||
|
||||
for i, name := range ci.order {
|
||||
fieldInfo := ci.fields[name]
|
||||
fieldVal := v.FieldByName(fieldInfo.goFieldName)
|
||||
|
||||
// Field i's gindex: shift currentGindex left by depth, then OR with field index
|
||||
fieldGindex := currentGindex<<depth + uint64(i)
|
||||
|
||||
htr, err := pc.merkleize(fieldInfo.sszInfo, fieldVal, fieldGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("field %s: %w", name, err)
|
||||
}
|
||||
fieldRoots[i] = htr
|
||||
}
|
||||
|
||||
// Step 2: Merkleize the field hashes into the container root,
|
||||
// collecting sibling hashes if target is within this subtree
|
||||
root := pc.merkleizeVectorAndCollect(fieldRoots, currentGindex, uint64(depth))
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeVectorBody computes the Merkle root of the "data" subtree for vector-like SSZ types
|
||||
// (vectors and the data-part of lists/bitlists).
|
||||
//
|
||||
// Generalized indices (gindices): depth = ssz.Depth(limit); leafBase = subtreeRootGindex << depth; element/chunk i gindex = leafBase + uint64(i).
|
||||
// Proof collection: merkleize() is called for composite elements; merkleizeVectorAndCollect collects required siblings at this layer.
|
||||
// Padding: merkleizeVectorAndCollect uses trie.ZeroHashes as needed.
|
||||
//
|
||||
// Parameters:
|
||||
// - elemInfo: SSZ type metadata for the element.
|
||||
// - v: reflect.Value of the vector/list data.
|
||||
// - length: number of actual elements present.
|
||||
// - limit: virtual leaf capacity used for padding/Depth (fixed length for vectors, limit for lists).
|
||||
// - subtreeRootGindex: gindex of the data subtree root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the data subtree.
|
||||
// - error: any error encountered while merkleizing composite elements.
|
||||
func (pc *proofCollector) merkleizeVectorBody(elemInfo *SszInfo, v reflect.Value, length int, limit uint64, subtreeRootGindex uint64) ([32]byte, error) {
|
||||
depth := uint64(ssz.Depth(limit))
|
||||
|
||||
var chunks [][32]byte
|
||||
if elemInfo.sszType.isBasic() {
|
||||
// Serialize basic elements and pack into 32-byte chunks using ssz.PackByChunk.
|
||||
elemSize, err := math.Int(itemLength(elemInfo))
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("element size %d overflows int: %w", itemLength(elemInfo), err)
|
||||
}
|
||||
serialized := make([][]byte, length)
|
||||
// Single contiguous allocation for all element data
|
||||
allData := make([]byte, length*elemSize)
|
||||
for i := range length {
|
||||
buf := allData[i*elemSize : (i+1)*elemSize]
|
||||
elem := v.Index(i)
|
||||
if elemInfo.sszType == Boolean && elem.Bool() {
|
||||
buf[0] = 1
|
||||
} else {
|
||||
bytesutil.PutLittleEndian(buf, elem.Uint(), elemSize)
|
||||
}
|
||||
serialized[i] = buf
|
||||
}
|
||||
chunks, err = ssz.PackByChunk(serialized)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
} else {
|
||||
// Composite elements: compute each element root (no padding here; merkleizeVectorAndCollect pads).
|
||||
chunks = make([][32]byte, length)
|
||||
|
||||
// Fall back to per-element merkleization with proper gindices for proof collection.
|
||||
// Parallel execution
|
||||
workerCount := min(runtime.GOMAXPROCS(0), length)
|
||||
|
||||
jobs := make(chan int, workerCount*16)
|
||||
errCh := make(chan error, 1) // only need the first error
|
||||
stopCh := make(chan struct{})
|
||||
var stopOnce sync.Once
|
||||
var wg sync.WaitGroup
|
||||
|
||||
worker := func() {
|
||||
defer wg.Done()
|
||||
for idx := range jobs {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
elemGindex := subtreeRootGindex<<depth + uint64(idx)
|
||||
htr, err := pc.merkleize(elemInfo, v.Index(idx), elemGindex)
|
||||
if err != nil {
|
||||
stopOnce.Do(func() { close(stopCh) })
|
||||
select {
|
||||
case errCh <- fmt.Errorf("index %d: %w", idx, err):
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
chunks[idx] = htr
|
||||
}
|
||||
}
|
||||
|
||||
wg.Add(workerCount)
|
||||
for range workerCount {
|
||||
go worker()
|
||||
}
|
||||
|
||||
// Enqueue jobs; stop early if any worker reports an error.
|
||||
enqueue:
|
||||
for i := range length {
|
||||
select {
|
||||
case <-stopCh:
|
||||
break enqueue
|
||||
case jobs <- i:
|
||||
}
|
||||
}
|
||||
close(jobs)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return [32]byte{}, err
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
root := pc.merkleizeVectorAndCollect(chunks, subtreeRootGindex, depth)
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeVector computes the Merkle root of an SSZ vector (fixed-length).
|
||||
//
|
||||
// Generalized indices (gindices): currentGindex is the gindex of the vector root; element/chunk gindices are derived
|
||||
// inside merkleizeVectorBody using leafBase = currentGindex << ssz.Depth(leaves).
|
||||
//
|
||||
// Proof collection: merkleizeVectorBody performs element/chunk merkleization and collects required siblings at the
|
||||
// vector layer; collectLeaf stores the vector root if currentGindex was registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the vector.
|
||||
// - v: reflect.Value of the vector value.
|
||||
// - currentGindex: generalized index (gindex) of the vector root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the vector.
|
||||
// - error: any error encountered while merkleizing composite elements.
|
||||
func (pc *proofCollector) merkleizeVector(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
vi, err := info.VectorInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
length, err := math.Int(vi.Length())
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("vector length %d overflows int: %w", vi.Length(), err)
|
||||
}
|
||||
elemInfo := vi.element
|
||||
|
||||
// Determine the virtual leaf capacity for the vector.
|
||||
leaves, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
root, err := pc.merkleizeVectorBody(elemInfo, v, length, leaves, currentGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// If the vector root itself is the target
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeList computes the Merkle root of an SSZ list by merkleizing its data subtree and mixing in the length.
|
||||
//
|
||||
// Generalized indices (gindices): dataRoot is the left child of the list root (dataRootGindex = currentGindex*2); the length mixin is the right child (currentGindex*2+1).
|
||||
// Proof collection: merkleizeVectorBody computes the data root (collecting required siblings in the data subtree), and mixinLengthAndCollect collects required siblings at the length-mixin level; collectLeaf stores the list root if registered.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the list.
|
||||
// - v: reflect.Value of the list value.
|
||||
// - currentGindex: generalized index (gindex) of the list root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the list.
|
||||
// - error: any error encountered while merkleizing the data subtree.
|
||||
func (pc *proofCollector) merkleizeList(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
li, err := info.ListInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
length := v.Len()
|
||||
elemInfo := li.element
|
||||
|
||||
chunks := make([][32]byte, 2)
|
||||
// Compute the length hash (little-endian uint256)
|
||||
binary.LittleEndian.PutUint64(chunks[1][:8], uint64(length))
|
||||
|
||||
// Data subtree root is the left child of the list root.
|
||||
dataRootGindex := currentGindex * 2
|
||||
|
||||
// Compute virtual leaf capacity for the data subtree.
|
||||
leaves, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
chunks[0], err = pc.merkleizeVectorBody(elemInfo, v, length, leaves, dataRootGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// Handle the length mixin level (and proof bookkeeping at this level).
|
||||
// Compute the final list root: hash(dataRoot || lengthHash)
|
||||
root := pc.mixinLengthAndCollect(currentGindex, chunks)
|
||||
|
||||
// If the list root itself is the target
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeBitvectorBody computes the Merkle root of a bitvector-like byte sequence by packing it into 32-byte chunks
|
||||
// and merkleizing those chunks as a fixed-capacity vector (padding with trie.ZeroHashes as needed).
|
||||
//
|
||||
// Generalized indices (gindices): depth = ssz.Depth(chunkLimit); leafBase = subtreeRootGindex << depth; chunk i uses gindex = leafBase + uint64(i).
|
||||
// Proof collection: merkleizeVectorAndCollect collects required sibling hashes at the chunk-merkleization layer.
|
||||
//
|
||||
// Parameters:
|
||||
// - data: raw byte sequence representing the bitvector payload.
|
||||
// - chunkLimit: fixed/limit number of 32-byte chunks (used for padding/Depth).
|
||||
// - subtreeRootGindex: gindex of the bitvector data subtree root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the bitvector data subtree.
|
||||
// - error: any error encountered while packing data into chunks.
|
||||
func (pc *proofCollector) merkleizeBitvectorBody(data []byte, chunkLimit uint64, subtreeRootGindex uint64) ([32]byte, error) {
|
||||
depth := ssz.Depth(chunkLimit)
|
||||
chunks, err := ssz.PackByChunk([][]byte{data})
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
root := pc.merkleizeVectorAndCollect(chunks, subtreeRootGindex, uint64(depth))
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeBitvector computes the Merkle root of a fixed-length SSZ bitvector and collects proof nodes for targets.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the bitvector.
|
||||
// - v: reflect.Value of the bitvector value.
|
||||
// - currentGindex: generalized index (gindex) of the bitvector root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the bitvector.
|
||||
// - error: any error encountered during packing or merkleization.
|
||||
func (pc *proofCollector) merkleizeBitvector(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
bitvectorBytes := v.Bytes()
|
||||
if len(bitvectorBytes) == 0 {
|
||||
return [32]byte{}, fmt.Errorf("bitvector field is uninitialized (nil or empty slice)")
|
||||
}
|
||||
|
||||
// Compute virtual leaf capacity for the bitvector.
|
||||
numChunks, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
root, err := pc.merkleizeBitvectorBody(bitvectorBytes, numChunks, currentGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeBitlist computes the Merkle root of an SSZ bitlist by merkleizing its data chunks and mixing in the bit length.
|
||||
//
|
||||
// Generalized indices (gindices): dataRoot is the left child (dataRootGindex = currentGindex*2) and the length mixin is the right child (currentGindex*2+1).
|
||||
// Proof collection: merkleizeBitvectorBody computes the data root (collecting required siblings under dataRootGindex), and mixinLengthAndCollect collects required siblings at the length-mixin level; collectLeaf stores the bitlist root if registered.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the bitlist.
|
||||
// - v: reflect.Value of the bitlist value.
|
||||
// - currentGindex: generalized index (gindex) of the bitlist root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the bitlist.
|
||||
// - error: any error encountered while merkleizing the data subtree.
|
||||
func (pc *proofCollector) merkleizeBitlist(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
bi, err := info.BitlistInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
bitlistBytes := v.Bytes()
|
||||
|
||||
// Use go-bitfield to get bytes with termination bit cleared
|
||||
bl := bitfield.Bitlist(bitlistBytes)
|
||||
data := bl.BytesNoTrim()
|
||||
|
||||
// Get the bit length from bitlistInfo
|
||||
bitLength := bi.Length()
|
||||
|
||||
// Get the chunk limit from getChunkCount
|
||||
limitChunks, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
chunks := make([][32]byte, 2)
|
||||
// Compute the length hash (little-endian uint256)
|
||||
binary.LittleEndian.PutUint64(chunks[1][:8], uint64(bitLength))
|
||||
|
||||
dataRootGindex := currentGindex * 2
|
||||
chunks[0], err = pc.merkleizeBitvectorBody(data, limitChunks, dataRootGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// Handle the length mixin level (and proof bookkeeping at this level).
|
||||
root := pc.mixinLengthAndCollect(currentGindex, chunks)
|
||||
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeVectorAndCollect merkleizes a slice of 32-byte leaf nodes into a subtree root, padding to a virtual size of 2^depth.
|
||||
//
|
||||
// Generalized indices (gindices): at layer i (0-based), nodes have gindices levelBase = subtreeGeneralizedIndex << (depth-i) and node gindex = levelBase + idx.
|
||||
// Proof collection: for each layer it calls collectSibling(nodeGindex, nodeHash) and stores only those gindices registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - elements: leaf-level hashes (may be shorter than 2^depth; padding is applied with trie.ZeroHashes).
|
||||
// - subtreeGeneralizedIndex: gindex of the subtree root.
|
||||
// - depth: number of merkleization layers from subtree root to leaves.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the subtree.
|
||||
func (pc *proofCollector) merkleizeVectorAndCollect(elements [][32]byte, subtreeGeneralizedIndex uint64, depth uint64) [32]byte {
|
||||
// Return zerohash at depth
|
||||
if len(elements) == 0 {
|
||||
return trie.ZeroHashes[depth]
|
||||
}
|
||||
for i := range depth {
|
||||
layerLen := len(elements)
|
||||
oddNodeLength := layerLen%2 == 1
|
||||
if oddNodeLength {
|
||||
zerohash := trie.ZeroHashes[i]
|
||||
elements = append(elements, zerohash)
|
||||
}
|
||||
|
||||
levelBaseGindex := subtreeGeneralizedIndex << (depth - i)
|
||||
for idx := range elements {
|
||||
gindex := levelBaseGindex + uint64(idx)
|
||||
pc.collectSibling(gindex, elements[idx])
|
||||
pc.collectLeaf(gindex, elements[idx])
|
||||
}
|
||||
|
||||
elements = htr.VectorizedSha256(elements)
|
||||
}
|
||||
return elements[0]
|
||||
}
|
||||
|
||||
// mixinLengthAndCollect computes the final mix-in root for list/bitlist values:
|
||||
//
|
||||
// root = hash(dataRoot, lengthHash)
|
||||
//
|
||||
// where chunks[0] is dataRoot and chunks[1] is the 32-byte length hash.
|
||||
//
|
||||
// Generalized indices (gindices): dataRoot is the left child (dataRootGindex = currentGindex*2) and lengthHash is the right child (lengthHashGindex = currentGindex*2+1).
|
||||
// Proof collection: it calls collectSibling/collectLeaf for both child gindices; the collector stores them only if they were registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - currentGindex: gindex of the parent node (list/bitlist root).
|
||||
// - chunks: two 32-byte nodes: [dataRoot, lengthHash].
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: mixed-in Merkle root (or zero value on hashing error).
|
||||
// - error: any error encountered during hashing.
|
||||
func (pc *proofCollector) mixinLengthAndCollect(currentGindex uint64, chunks [][32]byte) [32]byte {
|
||||
dataRoot, lengthHash := chunks[0], chunks[1]
|
||||
dataRootGindex, lengthHashGindex := currentGindex*2, currentGindex*2+1
|
||||
|
||||
pc.collectSibling(dataRootGindex, dataRoot)
|
||||
pc.collectSibling(lengthHashGindex, lengthHash)
|
||||
|
||||
pc.collectLeaf(dataRootGindex, dataRoot)
|
||||
pc.collectLeaf(lengthHashGindex, lengthHash)
|
||||
|
||||
return ssz.MixInLength(dataRoot, lengthHash[:])
|
||||
}
|
||||
531
encoding/ssz/query/proof_collector_test.go
Normal file
531
encoding/ssz/query/proof_collector_test.go
Normal file
@@ -0,0 +1,531 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"reflect"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ssz "github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
sszquerypb "github.com/OffchainLabs/prysm/v7/proto/ssz_query/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProofCollector_New(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
|
||||
require.NotNil(t, pc)
|
||||
require.Equal(t, 0, len(pc.requiredSiblings))
|
||||
require.Equal(t, 0, len(pc.requiredLeaves))
|
||||
require.Equal(t, 0, len(pc.siblings))
|
||||
require.Equal(t, 0, len(pc.leaves))
|
||||
}
|
||||
|
||||
func TestProofCollector_Reset(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.requiredSiblings[3] = struct{}{}
|
||||
pc.requiredLeaves[5] = struct{}{}
|
||||
pc.siblings[3] = [32]byte{1}
|
||||
pc.leaves[5] = [32]byte{2}
|
||||
|
||||
pc.reset()
|
||||
|
||||
require.Equal(t, 0, len(pc.requiredSiblings))
|
||||
require.Equal(t, 0, len(pc.requiredLeaves))
|
||||
require.Equal(t, 0, len(pc.siblings))
|
||||
require.Equal(t, 0, len(pc.leaves))
|
||||
}
|
||||
|
||||
func TestProofCollector_AddTarget(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(5)
|
||||
|
||||
_, hasLeaf := pc.requiredLeaves[5]
|
||||
_, hasSibling4 := pc.requiredSiblings[4]
|
||||
_, hasSibling3 := pc.requiredSiblings[3]
|
||||
_, hasSibling1 := pc.requiredSiblings[1] // GI 1 is the root
|
||||
|
||||
require.Equal(t, true, hasLeaf)
|
||||
require.Equal(t, true, hasSibling4)
|
||||
require.Equal(t, true, hasSibling3)
|
||||
require.Equal(t, false, hasSibling1)
|
||||
}
|
||||
|
||||
func TestProofCollector_ToProof(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(5)
|
||||
|
||||
leaf := [32]byte{9}
|
||||
sibling4 := [32]byte{4}
|
||||
sibling3 := [32]byte{3}
|
||||
|
||||
pc.collectLeaf(5, leaf)
|
||||
pc.collectSibling(4, sibling4)
|
||||
pc.collectSibling(3, sibling3)
|
||||
|
||||
proof, err := pc.toProof()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 5, proof.Index)
|
||||
require.DeepEqual(t, leaf[:], proof.Leaf)
|
||||
require.Equal(t, 2, len(proof.Hashes))
|
||||
require.DeepEqual(t, sibling4[:], proof.Hashes[0])
|
||||
require.DeepEqual(t, sibling3[:], proof.Hashes[1])
|
||||
}
|
||||
|
||||
func TestProofCollector_ToProof_NoLeaves(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
_, err := pc.toProof()
|
||||
require.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestProofCollector_CollectLeaf(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
leaf := [32]byte{7}
|
||||
|
||||
pc.collectLeaf(10, leaf)
|
||||
require.Equal(t, 0, len(pc.leaves))
|
||||
|
||||
pc.addTarget(10)
|
||||
pc.collectLeaf(10, leaf)
|
||||
stored, ok := pc.leaves[10]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, leaf, stored)
|
||||
}
|
||||
|
||||
func TestProofCollector_CollectSibling(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
hash := [32]byte{5}
|
||||
|
||||
pc.collectSibling(4, hash)
|
||||
require.Equal(t, 0, len(pc.siblings))
|
||||
|
||||
pc.addTarget(5)
|
||||
pc.collectSibling(4, hash)
|
||||
stored, ok := pc.siblings[4]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, hash, stored)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_BasicTypes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
sszType SSZType
|
||||
value any
|
||||
expected [32]byte
|
||||
}{
|
||||
{
|
||||
name: "uint8",
|
||||
sszType: Uint8,
|
||||
value: uint8(0x11),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
leaf[0] = 0x11
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "uint16",
|
||||
sszType: Uint16,
|
||||
value: uint16(0x2211),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
binary.LittleEndian.PutUint16(leaf[:2], 0x2211)
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "uint32",
|
||||
sszType: Uint32,
|
||||
value: uint32(0x44332211),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
binary.LittleEndian.PutUint32(leaf[:4], 0x44332211)
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "uint64",
|
||||
sszType: Uint64,
|
||||
value: uint64(0x8877665544332211),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
binary.LittleEndian.PutUint64(leaf[:8], 0x8877665544332211)
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "bool",
|
||||
sszType: Boolean,
|
||||
value: true,
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
leaf[0] = 1
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
gindex := uint64(3)
|
||||
pc.addTarget(gindex)
|
||||
|
||||
leaf, err := pc.merkleizeBasicType(tc.sszType, reflect.ValueOf(tc.value), gindex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, leaf)
|
||||
|
||||
stored, ok := pc.leaves[gindex]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, tc.expected, stored)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Container(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(1)
|
||||
|
||||
root, err := pc.merkleize(info, reflect.ValueOf(container), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err := container.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
|
||||
stored, ok := pc.leaves[1]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, expected, stored)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Vector(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["vector_field"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeVector(field.sszInfo, reflect.ValueOf(container.VectorField), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
serialized := make([][]byte, len(container.VectorField))
|
||||
for i, v := range container.VectorField {
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, v)
|
||||
serialized[i] = buf
|
||||
}
|
||||
chunks, err := ssz.PackByChunk(serialized)
|
||||
require.NoError(t, err)
|
||||
limit, err := getChunkCount(field.sszInfo)
|
||||
require.NoError(t, err)
|
||||
expected := ssz.MerkleizeVector(chunks, limit)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_List(t *testing.T) {
|
||||
list := []*sszquerypb.FixedNestedContainer{
|
||||
makeFixedNestedContainer(1),
|
||||
makeFixedNestedContainer(2),
|
||||
}
|
||||
container := makeVariableTestContainer(list, bitfield.NewBitlist(1))
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["field_list_container"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeList(field.sszInfo, reflect.ValueOf(list), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
listInfo, err := field.sszInfo.ListInfo()
|
||||
require.NoError(t, err)
|
||||
expected, err := ssz.MerkleizeListSSZ(list, listInfo.Limit())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Bitvector(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["bitvector64_field"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeBitvector(field.sszInfo, reflect.ValueOf(container.Bitvector64Field), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err := ssz.MerkleizeByteSliceSSZ([]byte(container.Bitvector64Field))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Bitlist(t *testing.T) {
|
||||
bitlist := bitfield.NewBitlist(16)
|
||||
bitlist.SetBitAt(3, true)
|
||||
bitlist.SetBitAt(8, true)
|
||||
|
||||
container := makeVariableTestContainer(nil, bitlist)
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["bitlist_field"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeBitlist(field.sszInfo, reflect.ValueOf(container.BitlistField), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
bitlistInfo, err := field.sszInfo.BitlistInfo()
|
||||
require.NoError(t, err)
|
||||
expected, err := ssz.BitlistRoot(bitfield.Bitlist(bitlist), bitlistInfo.Limit())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_MerkleizeVectorBody_Basic(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["vector_field"]
|
||||
vectorInfo, err := field.sszInfo.VectorInfo()
|
||||
require.NoError(t, err)
|
||||
length := len(container.VectorField)
|
||||
limit, err := getChunkCount(field.sszInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeVectorBody(vectorInfo.element, reflect.ValueOf(container.VectorField), length, limit, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
serialized := make([][]byte, len(container.VectorField))
|
||||
for i, v := range container.VectorField {
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, v)
|
||||
serialized[i] = buf
|
||||
}
|
||||
chunks, err := ssz.PackByChunk(serialized)
|
||||
require.NoError(t, err)
|
||||
expected := ssz.MerkleizeVector(chunks, limit)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_MerkleizeVectorAndCollect(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(6)
|
||||
|
||||
elements := [][32]byte{{1}, {2}}
|
||||
expected := ssz.MerkleizeVector(slices.Clone(elements), 2)
|
||||
root := pc.merkleizeVectorAndCollect(elements, 3, 1)
|
||||
|
||||
storedLeaf, hasLeaf := pc.leaves[6]
|
||||
storedSibling, hasSibling := pc.siblings[7]
|
||||
|
||||
require.Equal(t, true, hasLeaf)
|
||||
require.Equal(t, true, hasSibling)
|
||||
require.Equal(t, elements[0], storedLeaf)
|
||||
require.Equal(t, elements[1], storedSibling)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_MixinLengthAndCollect(t *testing.T) {
|
||||
list := []*sszquerypb.FixedNestedContainer{
|
||||
makeFixedNestedContainer(1),
|
||||
makeFixedNestedContainer(2),
|
||||
}
|
||||
container := makeVariableTestContainer(list, bitfield.NewBitlist(1))
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["field_list_container"]
|
||||
|
||||
// Target gindex 2 (data root) - sibling at gindex 3 (length hash) should be collected
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(2)
|
||||
root, err := pc.merkleizeList(field.sszInfo, reflect.ValueOf(list), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
listInfo, err := field.sszInfo.ListInfo()
|
||||
require.NoError(t, err)
|
||||
expected, err := ssz.MerkleizeListSSZ(list, listInfo.Limit())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
|
||||
// Verify data root is collected as leaf at gindex 2
|
||||
storedLeaf, hasLeaf := pc.leaves[2]
|
||||
require.Equal(t, true, hasLeaf)
|
||||
|
||||
// Verify length hash is collected as sibling at gindex 3
|
||||
storedSibling, hasSibling := pc.siblings[3]
|
||||
require.Equal(t, true, hasSibling)
|
||||
|
||||
// Verify the root is hash(dataRoot || lengthHash)
|
||||
expectedBuf := append(storedLeaf[:], storedSibling[:]...)
|
||||
expectedRoot := sha256.Sum256(expectedBuf)
|
||||
require.Equal(t, expectedRoot, root)
|
||||
}
|
||||
|
||||
func BenchmarkOptimizedValidatorRoots(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 1000)
|
||||
for i := range validators {
|
||||
validators[i] = makeTestValidator(i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
_, err := stateutil.OptimizedValidatorRoots(validators)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProofCollectorMerkleize(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 1000)
|
||||
for i := range validators {
|
||||
validators[i] = makeTestValidator(i)
|
||||
}
|
||||
|
||||
info, err := AnalyzeObject(validators[0])
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
for _, val := range validators {
|
||||
pc := newProofCollector()
|
||||
v := reflect.ValueOf(val)
|
||||
_, err := pc.merkleize(info, v, 1)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestValidator(i int) *ethpb.Validator {
|
||||
pubkey := make([]byte, 48)
|
||||
for j := range pubkey {
|
||||
pubkey[j] = byte(i + j)
|
||||
}
|
||||
|
||||
withdrawalCredentials := make([]byte, 32)
|
||||
for j := range withdrawalCredentials {
|
||||
withdrawalCredentials[j] = byte(255 - ((i + j) % 256))
|
||||
}
|
||||
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: uint64(32000000000 + i),
|
||||
Slashed: i%2 == 0,
|
||||
ActivationEligibilityEpoch: primitives.Epoch(i),
|
||||
ActivationEpoch: primitives.Epoch(i + 1),
|
||||
ExitEpoch: primitives.Epoch(i + 2),
|
||||
WithdrawableEpoch: primitives.Epoch(i + 3),
|
||||
}
|
||||
}
|
||||
|
||||
func makeFixedNestedContainer(value uint64) *sszquerypb.FixedNestedContainer {
|
||||
value2 := make([]byte, 32)
|
||||
for i := range value2 {
|
||||
value2[i] = byte(i)
|
||||
}
|
||||
return &sszquerypb.FixedNestedContainer{
|
||||
Value1: value,
|
||||
Value2: value2,
|
||||
}
|
||||
}
|
||||
|
||||
func makeFixedTestContainer() *sszquerypb.FixedTestContainer {
|
||||
fieldBytes32 := make([]byte, 32)
|
||||
for i := range fieldBytes32 {
|
||||
fieldBytes32[i] = byte(i)
|
||||
}
|
||||
|
||||
vectorField := make([]uint64, 24)
|
||||
for i := range vectorField {
|
||||
vectorField[i] = uint64(i)
|
||||
}
|
||||
|
||||
rows := make([][]byte, 5)
|
||||
for i := range rows {
|
||||
row := make([]byte, 32)
|
||||
for j := range row {
|
||||
row[j] = byte(i) + byte(j)
|
||||
}
|
||||
rows[i] = row
|
||||
}
|
||||
|
||||
bitvector64 := bitfield.NewBitvector64()
|
||||
bitvector64.SetBitAt(1, true)
|
||||
bitvector512 := bitfield.NewBitvector512()
|
||||
bitvector512.SetBitAt(10, true)
|
||||
|
||||
trailing := make([]byte, 56)
|
||||
for i := range trailing {
|
||||
trailing[i] = byte(i)
|
||||
}
|
||||
|
||||
return &sszquerypb.FixedTestContainer{
|
||||
FieldUint32: 1,
|
||||
FieldUint64: 2,
|
||||
FieldBool: true,
|
||||
FieldBytes32: fieldBytes32,
|
||||
Nested: makeFixedNestedContainer(3),
|
||||
VectorField: vectorField,
|
||||
TwoDimensionBytesField: rows,
|
||||
Bitvector64Field: bitvector64,
|
||||
Bitvector512Field: bitvector512,
|
||||
TrailingField: trailing,
|
||||
}
|
||||
}
|
||||
|
||||
func makeVariableTestContainer(list []*sszquerypb.FixedNestedContainer, bitlist bitfield.Bitlist) *sszquerypb.VariableTestContainer {
|
||||
leading := make([]byte, 32)
|
||||
for i := range leading {
|
||||
leading[i] = byte(i)
|
||||
}
|
||||
trailing := make([]byte, 56)
|
||||
for i := range trailing {
|
||||
trailing[i] = byte(255 - i)
|
||||
}
|
||||
|
||||
if bitlist == nil {
|
||||
bitlist = bitfield.NewBitlist(0)
|
||||
}
|
||||
|
||||
return &sszquerypb.VariableTestContainer{
|
||||
LeadingField: leading,
|
||||
FieldListContainer: list,
|
||||
BitlistField: bitlist,
|
||||
TrailingField: trailing,
|
||||
}
|
||||
}
|
||||
@@ -389,6 +389,7 @@ func TestHashTreeRoot(t *testing.T) {
|
||||
require.NoError(t, err, "HashTreeRoot should not return an error")
|
||||
expectedHashTreeRoot, err := tt.obj.HashTreeRoot()
|
||||
require.NoError(t, err, "HashTreeRoot on original object should not return an error")
|
||||
// Verify the Merkle tree root matches with the SSZ generated HashTreeRoot
|
||||
require.Equal(t, expectedHashTreeRoot, hashTreeRoot, "HashTreeRoot from sszInfo should match original object's HashTreeRoot")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -26,21 +26,21 @@ func TestLifecycle(t *testing.T) {
|
||||
port := 1000 + rand.Intn(1000)
|
||||
prometheusService := NewService(t.Context(), fmt.Sprintf(":%d", port), nil)
|
||||
prometheusService.Start()
|
||||
// Actively wait until the service responds on /metrics (faster and less flaky than a fixed sleep)
|
||||
deadline := time.Now().Add(3 * time.Second)
|
||||
for {
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("metrics endpoint not ready within timeout")
|
||||
}
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
if err == nil {
|
||||
_ = resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
break
|
||||
}
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
// Actively wait until the service responds on /metrics (faster and less flaky than a fixed sleep)
|
||||
deadline := time.Now().Add(3 * time.Second)
|
||||
for {
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("metrics endpoint not ready within timeout")
|
||||
}
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
if err == nil {
|
||||
_ = resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
break
|
||||
}
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Query the service to ensure it really started.
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
@@ -49,18 +49,18 @@ func TestLifecycle(t *testing.T) {
|
||||
|
||||
err = prometheusService.Stop()
|
||||
require.NoError(t, err)
|
||||
// Actively wait until the service stops responding on /metrics
|
||||
deadline = time.Now().Add(3 * time.Second)
|
||||
for {
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("metrics endpoint still reachable after timeout")
|
||||
}
|
||||
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
// Actively wait until the service stops responding on /metrics
|
||||
deadline = time.Now().Add(3 * time.Second)
|
||||
for {
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("metrics endpoint still reachable after timeout")
|
||||
}
|
||||
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Query the service to ensure it really stopped.
|
||||
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
|
||||
@@ -144,15 +144,17 @@ func copySignedExecutionPayloadBid(header *SignedExecutionPayloadBid) *SignedExe
|
||||
}
|
||||
if header.Message != nil {
|
||||
copied.Message = &ExecutionPayloadBid{
|
||||
ParentBlockHash: bytesutil.SafeCopyBytes(header.Message.ParentBlockHash),
|
||||
ParentBlockRoot: bytesutil.SafeCopyBytes(header.Message.ParentBlockRoot),
|
||||
BlockHash: bytesutil.SafeCopyBytes(header.Message.BlockHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(header.Message.FeeRecipient),
|
||||
GasLimit: header.Message.GasLimit,
|
||||
BuilderIndex: header.Message.BuilderIndex,
|
||||
Slot: header.Message.Slot,
|
||||
Value: header.Message.Value,
|
||||
BlobKzgCommitmentsRoot: bytesutil.SafeCopyBytes(header.Message.BlobKzgCommitmentsRoot),
|
||||
ParentBlockHash: bytesutil.SafeCopyBytes(header.Message.ParentBlockHash),
|
||||
ParentBlockRoot: bytesutil.SafeCopyBytes(header.Message.ParentBlockRoot),
|
||||
BlockHash: bytesutil.SafeCopyBytes(header.Message.BlockHash),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(header.Message.PrevRandao),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(header.Message.FeeRecipient),
|
||||
GasLimit: header.Message.GasLimit,
|
||||
BuilderIndex: header.Message.BuilderIndex,
|
||||
Slot: header.Message.Slot,
|
||||
Value: header.Message.Value,
|
||||
ExecutionPayment: header.Message.ExecutionPayment,
|
||||
BlobKzgCommitments: bytesutil.SafeCopy2dBytes(header.Message.BlobKzgCommitments),
|
||||
}
|
||||
}
|
||||
return copied
|
||||
|
||||
@@ -1215,15 +1215,15 @@ func genSignedExecutionPayloadBidGloas() *v1alpha1.SignedExecutionPayloadBid {
|
||||
|
||||
func genExecutionPayloadBidGloas() *v1alpha1.ExecutionPayloadBid {
|
||||
return &v1alpha1.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes(32),
|
||||
ParentBlockRoot: bytes(32),
|
||||
BlockHash: bytes(32),
|
||||
FeeRecipient: bytes(20),
|
||||
GasLimit: rand.Uint64(),
|
||||
BuilderIndex: primitives.BuilderIndex(rand.Uint64()),
|
||||
Slot: primitives.Slot(rand.Uint64()),
|
||||
Value: primitives.Gwei(rand.Uint64()),
|
||||
BlobKzgCommitmentsRoot: bytes(32),
|
||||
ParentBlockHash: bytes(32),
|
||||
ParentBlockRoot: bytes(32),
|
||||
BlockHash: bytes(32),
|
||||
FeeRecipient: bytes(20),
|
||||
GasLimit: rand.Uint64(),
|
||||
BuilderIndex: primitives.BuilderIndex(rand.Uint64()),
|
||||
Slot: primitives.Slot(rand.Uint64()),
|
||||
Value: primitives.Gwei(rand.Uint64()),
|
||||
BlobKzgCommitments: [][]byte{bytes(48)},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,17 +10,17 @@ func (header *ExecutionPayloadBid) Copy() *ExecutionPayloadBid {
|
||||
return nil
|
||||
}
|
||||
return &ExecutionPayloadBid{
|
||||
ParentBlockHash: bytesutil.SafeCopyBytes(header.ParentBlockHash),
|
||||
ParentBlockRoot: bytesutil.SafeCopyBytes(header.ParentBlockRoot),
|
||||
BlockHash: bytesutil.SafeCopyBytes(header.BlockHash),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(header.PrevRandao),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(header.FeeRecipient),
|
||||
GasLimit: header.GasLimit,
|
||||
BuilderIndex: header.BuilderIndex,
|
||||
Slot: header.Slot,
|
||||
Value: header.Value,
|
||||
ExecutionPayment: header.ExecutionPayment,
|
||||
BlobKzgCommitmentsRoot: bytesutil.SafeCopyBytes(header.BlobKzgCommitmentsRoot),
|
||||
ParentBlockHash: bytesutil.SafeCopyBytes(header.ParentBlockHash),
|
||||
ParentBlockRoot: bytesutil.SafeCopyBytes(header.ParentBlockRoot),
|
||||
BlockHash: bytesutil.SafeCopyBytes(header.BlockHash),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(header.PrevRandao),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(header.FeeRecipient),
|
||||
GasLimit: header.GasLimit,
|
||||
BuilderIndex: header.BuilderIndex,
|
||||
Slot: header.Slot,
|
||||
Value: header.Value,
|
||||
ExecutionPayment: header.ExecutionPayment,
|
||||
BlobKzgCommitments: bytesutil.SafeCopy2dBytes(header.BlobKzgCommitments),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1089
proto/prysm/v1alpha1/gloas.pb.go
generated
1089
proto/prysm/v1alpha1/gloas.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -33,7 +33,7 @@ option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
|
||||
// slot: Slot
|
||||
// value: Gwei
|
||||
// execution_payment: Gwei
|
||||
// blob_kzg_commitments_root: Root
|
||||
// blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
message ExecutionPayloadBid {
|
||||
bytes parent_block_hash = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes parent_block_root = 2 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
@@ -56,7 +56,10 @@ message ExecutionPayloadBid {
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Gwei"
|
||||
];
|
||||
bytes blob_kzg_commitments_root = 11 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
repeated bytes blob_kzg_commitments = 11 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,48",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
];
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadBid wraps an execution payload bid with a signature.
|
||||
@@ -366,7 +369,6 @@ message BuilderPendingWithdrawal {
|
||||
// class DataColumnSidecar(Container):
|
||||
// index: ColumnIndex
|
||||
// column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
// kzg_commitents: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
// kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
// slot: Slot
|
||||
// beacon_block_root: Root
|
||||
@@ -376,10 +378,6 @@ message DataColumnSidecarGloas {
|
||||
(ethereum.eth.ext.ssz_size) = "?,bytes_per_cell.size",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
];
|
||||
repeated bytes kzg_commitments = 3 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,48",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
];
|
||||
repeated bytes kzg_proofs = 4 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,48",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
@@ -402,7 +400,6 @@ message DataColumnSidecarGloas {
|
||||
// builder_index: BuilderIndex
|
||||
// beacon_block_root: Root
|
||||
// slot: Slot
|
||||
// blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
// state_root: Root
|
||||
message ExecutionPayloadEnvelope {
|
||||
ethereum.engine.v1.ExecutionPayloadDeneb payload = 1;
|
||||
@@ -415,11 +412,7 @@ message ExecutionPayloadEnvelope {
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
|
||||
];
|
||||
repeated bytes blob_kzg_commitments = 6 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,48",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
];
|
||||
bytes state_root = 7 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes state_root = 6 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadEnvelope wraps an execution payload envelope with a signature.
|
||||
|
||||
@@ -15,6 +15,7 @@ func (e *ExecutionPayloadBid) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the ExecutionPayloadBid object to a target array
|
||||
func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(192)
|
||||
|
||||
// Field (0) 'ParentBlockHash'
|
||||
if size := len(e.ParentBlockHash); size != 32 {
|
||||
@@ -66,12 +67,22 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
// Field (9) 'ExecutionPayment'
|
||||
dst = ssz.MarshalUint64(dst, uint64(e.ExecutionPayment))
|
||||
|
||||
// Field (10) 'BlobKzgCommitmentsRoot'
|
||||
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
|
||||
// Offset (10) 'BlobKzgCommitments'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(e.BlobKzgCommitments) * 48
|
||||
|
||||
// Field (10) 'BlobKzgCommitments'
|
||||
if size := len(e.BlobKzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlobKzgCommitmentsRoot...)
|
||||
for ii := 0; ii < len(e.BlobKzgCommitments); ii++ {
|
||||
if size := len(e.BlobKzgCommitments[ii]); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitments[ii]", size, 48)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlobKzgCommitments[ii]...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -80,10 +91,13 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 220 {
|
||||
if size < 192 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o10 uint64
|
||||
|
||||
// Field (0) 'ParentBlockHash'
|
||||
if cap(e.ParentBlockHash) == 0 {
|
||||
e.ParentBlockHash = make([]byte, 0, len(buf[0:32]))
|
||||
@@ -129,18 +143,40 @@ func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
|
||||
// Field (9) 'ExecutionPayment'
|
||||
e.ExecutionPayment = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[180:188]))
|
||||
|
||||
// Field (10) 'BlobKzgCommitmentsRoot'
|
||||
if cap(e.BlobKzgCommitmentsRoot) == 0 {
|
||||
e.BlobKzgCommitmentsRoot = make([]byte, 0, len(buf[188:220]))
|
||||
// Offset (10) 'BlobKzgCommitments'
|
||||
if o10 = ssz.ReadOffset(buf[188:192]); o10 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
e.BlobKzgCommitmentsRoot = append(e.BlobKzgCommitmentsRoot, buf[188:220]...)
|
||||
|
||||
if o10 != 192 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (10) 'BlobKzgCommitments'
|
||||
{
|
||||
buf = tail[o10:]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 4096)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.BlobKzgCommitments = make([][]byte, num)
|
||||
for ii := 0; ii < num; ii++ {
|
||||
if cap(e.BlobKzgCommitments[ii]) == 0 {
|
||||
e.BlobKzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
|
||||
}
|
||||
e.BlobKzgCommitments[ii] = append(e.BlobKzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadBid object
|
||||
func (e *ExecutionPayloadBid) SizeSSZ() (size int) {
|
||||
size = 220
|
||||
size = 192
|
||||
|
||||
// Field (10) 'BlobKzgCommitments'
|
||||
size += len(e.BlobKzgCommitments) * 48
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -203,12 +239,24 @@ func (e *ExecutionPayloadBid) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
// Field (9) 'ExecutionPayment'
|
||||
hh.PutUint64(uint64(e.ExecutionPayment))
|
||||
|
||||
// Field (10) 'BlobKzgCommitmentsRoot'
|
||||
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
|
||||
return
|
||||
// Field (10) 'BlobKzgCommitments'
|
||||
{
|
||||
if size := len(e.BlobKzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
for _, i := range e.BlobKzgCommitments {
|
||||
if len(i) != 48 {
|
||||
err = ssz.ErrBytesLength
|
||||
return
|
||||
}
|
||||
hh.PutBytes(i)
|
||||
}
|
||||
|
||||
numItems := uint64(len(e.BlobKzgCommitments))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
hh.PutBytes(e.BlobKzgCommitmentsRoot)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
@@ -222,14 +270,14 @@ func (s *SignedExecutionPayloadBid) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the SignedExecutionPayloadBid object to a target array
|
||||
func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(100)
|
||||
|
||||
// Field (0) 'Message'
|
||||
// Offset (0) 'Message'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
if s.Message == nil {
|
||||
s.Message = new(ExecutionPayloadBid)
|
||||
}
|
||||
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
offset += s.Message.SizeSSZ()
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if size := len(s.Signature); size != 96 {
|
||||
@@ -238,6 +286,11 @@ func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err er
|
||||
}
|
||||
dst = append(dst, s.Signature...)
|
||||
|
||||
// Field (0) 'Message'
|
||||
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -245,30 +298,51 @@ func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err er
|
||||
func (s *SignedExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 316 {
|
||||
if size < 100 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'Message'
|
||||
if s.Message == nil {
|
||||
s.Message = new(ExecutionPayloadBid)
|
||||
tail := buf
|
||||
var o0 uint64
|
||||
|
||||
// Offset (0) 'Message'
|
||||
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
if err = s.Message.UnmarshalSSZ(buf[0:220]); err != nil {
|
||||
return err
|
||||
|
||||
if o0 != 100 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if cap(s.Signature) == 0 {
|
||||
s.Signature = make([]byte, 0, len(buf[220:316]))
|
||||
s.Signature = make([]byte, 0, len(buf[4:100]))
|
||||
}
|
||||
s.Signature = append(s.Signature, buf[220:316]...)
|
||||
s.Signature = append(s.Signature, buf[4:100]...)
|
||||
|
||||
// Field (0) 'Message'
|
||||
{
|
||||
buf = tail[o0:]
|
||||
if s.Message == nil {
|
||||
s.Message = new(ExecutionPayloadBid)
|
||||
}
|
||||
if err = s.Message.UnmarshalSSZ(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the SignedExecutionPayloadBid object
|
||||
func (s *SignedExecutionPayloadBid) SizeSSZ() (size int) {
|
||||
size = 316
|
||||
size = 100
|
||||
|
||||
// Field (0) 'Message'
|
||||
if s.Message == nil {
|
||||
s.Message = new(ExecutionPayloadBid)
|
||||
}
|
||||
size += s.Message.SizeSSZ()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -742,7 +816,7 @@ func (b *BeaconBlockBodyGloas) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the BeaconBlockBodyGloas object to a target array
|
||||
func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(704)
|
||||
offset := int(392)
|
||||
|
||||
// Field (0) 'RandaoReveal'
|
||||
if size := len(b.RandaoReveal); size != 96 {
|
||||
@@ -804,13 +878,12 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(b.BlsToExecutionChanges) * 172
|
||||
|
||||
// Field (10) 'SignedExecutionPayloadBid'
|
||||
// Offset (10) 'SignedExecutionPayloadBid'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
if b.SignedExecutionPayloadBid == nil {
|
||||
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
|
||||
}
|
||||
if dst, err = b.SignedExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
offset += b.SignedExecutionPayloadBid.SizeSSZ()
|
||||
|
||||
// Offset (11) 'PayloadAttestations'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
@@ -896,6 +969,11 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (10) 'SignedExecutionPayloadBid'
|
||||
if dst, err = b.SignedExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (11) 'PayloadAttestations'
|
||||
if size := len(b.PayloadAttestations); size > 4 {
|
||||
err = ssz.ErrListTooBigFn("--.PayloadAttestations", size, 4)
|
||||
@@ -914,12 +992,12 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
|
||||
func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 704 {
|
||||
if size < 392 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o3, o4, o5, o6, o7, o9, o11 uint64
|
||||
var o3, o4, o5, o6, o7, o9, o10, o11 uint64
|
||||
|
||||
// Field (0) 'RandaoReveal'
|
||||
if cap(b.RandaoReveal) == 0 {
|
||||
@@ -946,7 +1024,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o3 != 704 {
|
||||
if o3 != 392 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -983,16 +1061,13 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (10) 'SignedExecutionPayloadBid'
|
||||
if b.SignedExecutionPayloadBid == nil {
|
||||
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
|
||||
}
|
||||
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf[384:700]); err != nil {
|
||||
return err
|
||||
// Offset (10) 'SignedExecutionPayloadBid'
|
||||
if o10 = ssz.ReadOffset(buf[384:388]); o10 > size || o9 > o10 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Offset (11) 'PayloadAttestations'
|
||||
if o11 = ssz.ReadOffset(buf[700:704]); o11 > size || o9 > o11 {
|
||||
if o11 = ssz.ReadOffset(buf[388:392]); o11 > size || o10 > o11 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
@@ -1096,7 +1171,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// Field (9) 'BlsToExecutionChanges'
|
||||
{
|
||||
buf = tail[o9:o11]
|
||||
buf = tail[o9:o10]
|
||||
num, err := ssz.DivideInt2(len(buf), 172, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1112,6 +1187,17 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Field (10) 'SignedExecutionPayloadBid'
|
||||
{
|
||||
buf = tail[o10:o11]
|
||||
if b.SignedExecutionPayloadBid == nil {
|
||||
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
|
||||
}
|
||||
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Field (11) 'PayloadAttestations'
|
||||
{
|
||||
buf = tail[o11:]
|
||||
@@ -1134,7 +1220,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockBodyGloas object
|
||||
func (b *BeaconBlockBodyGloas) SizeSSZ() (size int) {
|
||||
size = 704
|
||||
size = 392
|
||||
|
||||
// Field (3) 'ProposerSlashings'
|
||||
size += len(b.ProposerSlashings) * 416
|
||||
@@ -1160,6 +1246,12 @@ func (b *BeaconBlockBodyGloas) SizeSSZ() (size int) {
|
||||
// Field (9) 'BlsToExecutionChanges'
|
||||
size += len(b.BlsToExecutionChanges) * 172
|
||||
|
||||
// Field (10) 'SignedExecutionPayloadBid'
|
||||
if b.SignedExecutionPayloadBid == nil {
|
||||
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
|
||||
}
|
||||
size += b.SignedExecutionPayloadBid.SizeSSZ()
|
||||
|
||||
// Field (11) 'PayloadAttestations'
|
||||
size += len(b.PayloadAttestations) * 202
|
||||
|
||||
@@ -1437,7 +1529,7 @@ func (b *BeaconStateGloas) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the BeaconStateGloas object to a target array
|
||||
func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(2741333)
|
||||
offset := int(2741117)
|
||||
|
||||
// Field (0) 'GenesisTime'
|
||||
dst = ssz.MarshalUint64(dst, b.GenesisTime)
|
||||
@@ -1602,13 +1694,12 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (24) 'LatestExecutionPayloadBid'
|
||||
// Offset (24) 'LatestExecutionPayloadBid'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
if b.LatestExecutionPayloadBid == nil {
|
||||
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
|
||||
}
|
||||
if dst, err = b.LatestExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
offset += b.LatestExecutionPayloadBid.SizeSSZ()
|
||||
|
||||
// Field (25) 'NextWithdrawalIndex'
|
||||
dst = ssz.MarshalUint64(dst, b.NextWithdrawalIndex)
|
||||
@@ -1766,6 +1857,11 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = ssz.MarshalUint64(dst, b.InactivityScores[ii])
|
||||
}
|
||||
|
||||
// Field (24) 'LatestExecutionPayloadBid'
|
||||
if dst, err = b.LatestExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (27) 'HistoricalSummaries'
|
||||
if size := len(b.HistoricalSummaries); size > 16777216 {
|
||||
err = ssz.ErrListTooBigFn("--.HistoricalSummaries", size, 16777216)
|
||||
@@ -1850,12 +1946,12 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 2741333 {
|
||||
if size < 2741117 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o7, o9, o11, o12, o15, o16, o21, o27, o34, o35, o36, o38, o42, o44 uint64
|
||||
var o7, o9, o11, o12, o15, o16, o21, o24, o27, o34, o35, o36, o38, o42, o44 uint64
|
||||
|
||||
// Field (0) 'GenesisTime'
|
||||
b.GenesisTime = ssz.UnmarshallUint64(buf[0:8])
|
||||
@@ -1908,7 +2004,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o7 != 2741333 {
|
||||
if o7 != 2741117 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -2014,77 +2110,74 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Field (24) 'LatestExecutionPayloadBid'
|
||||
if b.LatestExecutionPayloadBid == nil {
|
||||
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
|
||||
}
|
||||
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf[2736629:2736849]); err != nil {
|
||||
return err
|
||||
// Offset (24) 'LatestExecutionPayloadBid'
|
||||
if o24 = ssz.ReadOffset(buf[2736629:2736633]); o24 > size || o21 > o24 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (25) 'NextWithdrawalIndex'
|
||||
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736849:2736857])
|
||||
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736633:2736641])
|
||||
|
||||
// Field (26) 'NextWithdrawalValidatorIndex'
|
||||
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736857:2736865]))
|
||||
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736641:2736649]))
|
||||
|
||||
// Offset (27) 'HistoricalSummaries'
|
||||
if o27 = ssz.ReadOffset(buf[2736865:2736869]); o27 > size || o21 > o27 {
|
||||
if o27 = ssz.ReadOffset(buf[2736649:2736653]); o27 > size || o24 > o27 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (28) 'DepositRequestsStartIndex'
|
||||
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736869:2736877])
|
||||
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736653:2736661])
|
||||
|
||||
// Field (29) 'DepositBalanceToConsume'
|
||||
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736877:2736885]))
|
||||
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736661:2736669]))
|
||||
|
||||
// Field (30) 'ExitBalanceToConsume'
|
||||
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736885:2736893]))
|
||||
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736669:2736677]))
|
||||
|
||||
// Field (31) 'EarliestExitEpoch'
|
||||
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736893:2736901]))
|
||||
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736677:2736685]))
|
||||
|
||||
// Field (32) 'ConsolidationBalanceToConsume'
|
||||
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736901:2736909]))
|
||||
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736685:2736693]))
|
||||
|
||||
// Field (33) 'EarliestConsolidationEpoch'
|
||||
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736909:2736917]))
|
||||
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736693:2736701]))
|
||||
|
||||
// Offset (34) 'PendingDeposits'
|
||||
if o34 = ssz.ReadOffset(buf[2736917:2736921]); o34 > size || o27 > o34 {
|
||||
if o34 = ssz.ReadOffset(buf[2736701:2736705]); o34 > size || o27 > o34 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Offset (35) 'PendingPartialWithdrawals'
|
||||
if o35 = ssz.ReadOffset(buf[2736921:2736925]); o35 > size || o34 > o35 {
|
||||
if o35 = ssz.ReadOffset(buf[2736705:2736709]); o35 > size || o34 > o35 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Offset (36) 'PendingConsolidations'
|
||||
if o36 = ssz.ReadOffset(buf[2736925:2736929]); o36 > size || o35 > o36 {
|
||||
if o36 = ssz.ReadOffset(buf[2736709:2736713]); o36 > size || o35 > o36 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (37) 'ProposerLookahead'
|
||||
b.ProposerLookahead = ssz.ExtendUint64(b.ProposerLookahead, 64)
|
||||
for ii := 0; ii < 64; ii++ {
|
||||
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736929:2737441][ii*8 : (ii+1)*8])
|
||||
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736713:2737225][ii*8 : (ii+1)*8])
|
||||
}
|
||||
|
||||
// Offset (38) 'Builders'
|
||||
if o38 = ssz.ReadOffset(buf[2737441:2737445]); o38 > size || o36 > o38 {
|
||||
if o38 = ssz.ReadOffset(buf[2737225:2737229]); o38 > size || o36 > o38 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (39) 'NextWithdrawalBuilderIndex'
|
||||
b.NextWithdrawalBuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[2737445:2737453]))
|
||||
b.NextWithdrawalBuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[2737229:2737237]))
|
||||
|
||||
// Field (40) 'ExecutionPayloadAvailability'
|
||||
if cap(b.ExecutionPayloadAvailability) == 0 {
|
||||
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737453:2738477]))
|
||||
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737237:2738261]))
|
||||
}
|
||||
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737453:2738477]...)
|
||||
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737237:2738261]...)
|
||||
|
||||
// Field (41) 'BuilderPendingPayments'
|
||||
b.BuilderPendingPayments = make([]*BuilderPendingPayment, 64)
|
||||
@@ -2092,24 +2185,24 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
if b.BuilderPendingPayments[ii] == nil {
|
||||
b.BuilderPendingPayments[ii] = new(BuilderPendingPayment)
|
||||
}
|
||||
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738477:2741293][ii*44 : (ii+1)*44]); err != nil {
|
||||
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738261:2741077][ii*44 : (ii+1)*44]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Offset (42) 'BuilderPendingWithdrawals'
|
||||
if o42 = ssz.ReadOffset(buf[2741293:2741297]); o42 > size || o38 > o42 {
|
||||
if o42 = ssz.ReadOffset(buf[2741077:2741081]); o42 > size || o38 > o42 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (43) 'LatestBlockHash'
|
||||
if cap(b.LatestBlockHash) == 0 {
|
||||
b.LatestBlockHash = make([]byte, 0, len(buf[2741297:2741329]))
|
||||
b.LatestBlockHash = make([]byte, 0, len(buf[2741081:2741113]))
|
||||
}
|
||||
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741297:2741329]...)
|
||||
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741081:2741113]...)
|
||||
|
||||
// Offset (44) 'PayloadExpectedWithdrawals'
|
||||
if o44 = ssz.ReadOffset(buf[2741329:2741333]); o44 > size || o42 > o44 {
|
||||
if o44 = ssz.ReadOffset(buf[2741113:2741117]); o44 > size || o42 > o44 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
@@ -2204,7 +2297,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// Field (21) 'InactivityScores'
|
||||
{
|
||||
buf = tail[o21:o27]
|
||||
buf = tail[o21:o24]
|
||||
num, err := ssz.DivideInt2(len(buf), 8, 1099511627776)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2215,6 +2308,17 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Field (24) 'LatestExecutionPayloadBid'
|
||||
{
|
||||
buf = tail[o24:o27]
|
||||
if b.LatestExecutionPayloadBid == nil {
|
||||
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
|
||||
}
|
||||
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Field (27) 'HistoricalSummaries'
|
||||
{
|
||||
buf = tail[o27:o34]
|
||||
@@ -2345,7 +2449,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BeaconStateGloas object
|
||||
func (b *BeaconStateGloas) SizeSSZ() (size int) {
|
||||
size = 2741333
|
||||
size = 2741117
|
||||
|
||||
// Field (7) 'HistoricalRoots'
|
||||
size += len(b.HistoricalRoots) * 32
|
||||
@@ -2368,6 +2472,12 @@ func (b *BeaconStateGloas) SizeSSZ() (size int) {
|
||||
// Field (21) 'InactivityScores'
|
||||
size += len(b.InactivityScores) * 8
|
||||
|
||||
// Field (24) 'LatestExecutionPayloadBid'
|
||||
if b.LatestExecutionPayloadBid == nil {
|
||||
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
|
||||
}
|
||||
size += b.LatestExecutionPayloadBid.SizeSSZ()
|
||||
|
||||
// Field (27) 'HistoricalSummaries'
|
||||
size += len(b.HistoricalSummaries) * 64
|
||||
|
||||
@@ -2981,7 +3091,7 @@ func (d *DataColumnSidecarGloas) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the DataColumnSidecarGloas object to a target array
|
||||
func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(60)
|
||||
offset := int(56)
|
||||
|
||||
// Field (0) 'Index'
|
||||
dst = ssz.MarshalUint64(dst, d.Index)
|
||||
@@ -2990,18 +3100,14 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(d.Column) * 2048
|
||||
|
||||
// Offset (2) 'KzgCommitments'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(d.KzgCommitments) * 48
|
||||
|
||||
// Offset (3) 'KzgProofs'
|
||||
// Offset (2) 'KzgProofs'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(d.KzgProofs) * 48
|
||||
|
||||
// Field (4) 'Slot'
|
||||
// Field (3) 'Slot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(d.Slot))
|
||||
|
||||
// Field (5) 'BeaconBlockRoot'
|
||||
// Field (4) 'BeaconBlockRoot'
|
||||
if size := len(d.BeaconBlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BeaconBlockRoot", size, 32)
|
||||
return
|
||||
@@ -3021,20 +3127,7 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
|
||||
dst = append(dst, d.Column[ii]...)
|
||||
}
|
||||
|
||||
// Field (2) 'KzgCommitments'
|
||||
if size := len(d.KzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(d.KzgCommitments); ii++ {
|
||||
if size := len(d.KzgCommitments[ii]); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.KzgCommitments[ii]", size, 48)
|
||||
return
|
||||
}
|
||||
dst = append(dst, d.KzgCommitments[ii]...)
|
||||
}
|
||||
|
||||
// Field (3) 'KzgProofs'
|
||||
// Field (2) 'KzgProofs'
|
||||
if size := len(d.KzgProofs); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 4096)
|
||||
return
|
||||
@@ -3054,12 +3147,12 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
|
||||
func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 60 {
|
||||
if size < 56 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o1, o2, o3 uint64
|
||||
var o1, o2 uint64
|
||||
|
||||
// Field (0) 'Index'
|
||||
d.Index = ssz.UnmarshallUint64(buf[0:8])
|
||||
@@ -3069,28 +3162,23 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o1 != 60 {
|
||||
if o1 != 56 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Offset (2) 'KzgCommitments'
|
||||
// Offset (2) 'KzgProofs'
|
||||
if o2 = ssz.ReadOffset(buf[12:16]); o2 > size || o1 > o2 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Offset (3) 'KzgProofs'
|
||||
if o3 = ssz.ReadOffset(buf[16:20]); o3 > size || o2 > o3 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
// Field (3) 'Slot'
|
||||
d.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[16:24]))
|
||||
|
||||
// Field (4) 'Slot'
|
||||
d.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[20:28]))
|
||||
|
||||
// Field (5) 'BeaconBlockRoot'
|
||||
// Field (4) 'BeaconBlockRoot'
|
||||
if cap(d.BeaconBlockRoot) == 0 {
|
||||
d.BeaconBlockRoot = make([]byte, 0, len(buf[28:60]))
|
||||
d.BeaconBlockRoot = make([]byte, 0, len(buf[24:56]))
|
||||
}
|
||||
d.BeaconBlockRoot = append(d.BeaconBlockRoot, buf[28:60]...)
|
||||
d.BeaconBlockRoot = append(d.BeaconBlockRoot, buf[24:56]...)
|
||||
|
||||
// Field (1) 'Column'
|
||||
{
|
||||
@@ -3108,25 +3196,9 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Field (2) 'KzgCommitments'
|
||||
// Field (2) 'KzgProofs'
|
||||
{
|
||||
buf = tail[o2:o3]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 4096)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.KzgCommitments = make([][]byte, num)
|
||||
for ii := 0; ii < num; ii++ {
|
||||
if cap(d.KzgCommitments[ii]) == 0 {
|
||||
d.KzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
|
||||
}
|
||||
d.KzgCommitments[ii] = append(d.KzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (3) 'KzgProofs'
|
||||
{
|
||||
buf = tail[o3:]
|
||||
buf = tail[o2:]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 4096)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -3144,15 +3216,12 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the DataColumnSidecarGloas object
|
||||
func (d *DataColumnSidecarGloas) SizeSSZ() (size int) {
|
||||
size = 60
|
||||
size = 56
|
||||
|
||||
// Field (1) 'Column'
|
||||
size += len(d.Column) * 2048
|
||||
|
||||
// Field (2) 'KzgCommitments'
|
||||
size += len(d.KzgCommitments) * 48
|
||||
|
||||
// Field (3) 'KzgProofs'
|
||||
// Field (2) 'KzgProofs'
|
||||
size += len(d.KzgProofs) * 48
|
||||
|
||||
return
|
||||
@@ -3189,26 +3258,7 @@ func (d *DataColumnSidecarGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
|
||||
// Field (2) 'KzgCommitments'
|
||||
{
|
||||
if size := len(d.KzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
for _, i := range d.KzgCommitments {
|
||||
if len(i) != 48 {
|
||||
err = ssz.ErrBytesLength
|
||||
return
|
||||
}
|
||||
hh.PutBytes(i)
|
||||
}
|
||||
|
||||
numItems := uint64(len(d.KzgCommitments))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
|
||||
// Field (3) 'KzgProofs'
|
||||
// Field (2) 'KzgProofs'
|
||||
{
|
||||
if size := len(d.KzgProofs); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 4096)
|
||||
@@ -3227,10 +3277,10 @@ func (d *DataColumnSidecarGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
|
||||
// Field (4) 'Slot'
|
||||
// Field (3) 'Slot'
|
||||
hh.PutUint64(uint64(d.Slot))
|
||||
|
||||
// Field (5) 'BeaconBlockRoot'
|
||||
// Field (4) 'BeaconBlockRoot'
|
||||
if size := len(d.BeaconBlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BeaconBlockRoot", size, 32)
|
||||
return
|
||||
@@ -3249,7 +3299,7 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZ() ([]byte, error) {
|
||||
// MarshalSSZTo ssz marshals the ExecutionPayloadEnvelope object to a target array
|
||||
func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(92)
|
||||
offset := int(88)
|
||||
|
||||
// Offset (0) 'Payload'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
@@ -3278,11 +3328,7 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
|
||||
// Field (4) 'Slot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
|
||||
|
||||
// Offset (5) 'BlobKzgCommitments'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(e.BlobKzgCommitments) * 48
|
||||
|
||||
// Field (6) 'StateRoot'
|
||||
// Field (5) 'StateRoot'
|
||||
if size := len(e.StateRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.StateRoot", size, 32)
|
||||
return
|
||||
@@ -3299,19 +3345,6 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
|
||||
return
|
||||
}
|
||||
|
||||
// Field (5) 'BlobKzgCommitments'
|
||||
if size := len(e.BlobKzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(e.BlobKzgCommitments); ii++ {
|
||||
if size := len(e.BlobKzgCommitments[ii]); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitments[ii]", size, 48)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlobKzgCommitments[ii]...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3319,19 +3352,19 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
|
||||
func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 92 {
|
||||
if size < 88 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o0, o1, o5 uint64
|
||||
var o0, o1 uint64
|
||||
|
||||
// Offset (0) 'Payload'
|
||||
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 92 {
|
||||
if o0 != 88 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -3352,16 +3385,11 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
// Field (4) 'Slot'
|
||||
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[48:56]))
|
||||
|
||||
// Offset (5) 'BlobKzgCommitments'
|
||||
if o5 = ssz.ReadOffset(buf[56:60]); o5 > size || o1 > o5 {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
// Field (6) 'StateRoot'
|
||||
// Field (5) 'StateRoot'
|
||||
if cap(e.StateRoot) == 0 {
|
||||
e.StateRoot = make([]byte, 0, len(buf[60:92]))
|
||||
e.StateRoot = make([]byte, 0, len(buf[56:88]))
|
||||
}
|
||||
e.StateRoot = append(e.StateRoot, buf[60:92]...)
|
||||
e.StateRoot = append(e.StateRoot, buf[56:88]...)
|
||||
|
||||
// Field (0) 'Payload'
|
||||
{
|
||||
@@ -3376,7 +3404,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// Field (1) 'ExecutionRequests'
|
||||
{
|
||||
buf = tail[o1:o5]
|
||||
buf = tail[o1:]
|
||||
if e.ExecutionRequests == nil {
|
||||
e.ExecutionRequests = new(v1.ExecutionRequests)
|
||||
}
|
||||
@@ -3384,28 +3412,12 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Field (5) 'BlobKzgCommitments'
|
||||
{
|
||||
buf = tail[o5:]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 4096)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.BlobKzgCommitments = make([][]byte, num)
|
||||
for ii := 0; ii < num; ii++ {
|
||||
if cap(e.BlobKzgCommitments[ii]) == 0 {
|
||||
e.BlobKzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
|
||||
}
|
||||
e.BlobKzgCommitments[ii] = append(e.BlobKzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadEnvelope object
|
||||
func (e *ExecutionPayloadEnvelope) SizeSSZ() (size int) {
|
||||
size = 92
|
||||
size = 88
|
||||
|
||||
// Field (0) 'Payload'
|
||||
if e.Payload == nil {
|
||||
@@ -3419,9 +3431,6 @@ func (e *ExecutionPayloadEnvelope) SizeSSZ() (size int) {
|
||||
}
|
||||
size += e.ExecutionRequests.SizeSSZ()
|
||||
|
||||
// Field (5) 'BlobKzgCommitments'
|
||||
size += len(e.BlobKzgCommitments) * 48
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3457,26 +3466,7 @@ func (e *ExecutionPayloadEnvelope) HashTreeRootWith(hh *ssz.Hasher) (err error)
|
||||
// Field (4) 'Slot'
|
||||
hh.PutUint64(uint64(e.Slot))
|
||||
|
||||
// Field (5) 'BlobKzgCommitments'
|
||||
{
|
||||
if size := len(e.BlobKzgCommitments); size > 4096 {
|
||||
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
for _, i := range e.BlobKzgCommitments {
|
||||
if len(i) != 48 {
|
||||
err = ssz.ErrBytesLength
|
||||
return
|
||||
}
|
||||
hh.PutBytes(i)
|
||||
}
|
||||
|
||||
numItems := uint64(len(e.BlobKzgCommitments))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
|
||||
// Field (6) 'StateRoot'
|
||||
// Field (5) 'StateRoot'
|
||||
if size := len(e.StateRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.StateRoot", size, 32)
|
||||
return
|
||||
|
||||
@@ -23,17 +23,17 @@ func TestExecutionPayloadBid_Copy(t *testing.T) {
|
||||
{
|
||||
name: "fully populated bid",
|
||||
bid: &ExecutionPayloadBid{
|
||||
ParentBlockHash: []byte("parent_block_hash_32_bytes_long!"),
|
||||
ParentBlockRoot: []byte("parent_block_root_32_bytes_long!"),
|
||||
BlockHash: []byte("block_hash_32_bytes_are_long!!"),
|
||||
PrevRandao: []byte("prev_randao_32_bytes_long!!!"),
|
||||
FeeRecipient: []byte("fee_recipient_20_byt"),
|
||||
GasLimit: 15000000,
|
||||
BuilderIndex: primitives.BuilderIndex(42),
|
||||
Slot: primitives.Slot(12345),
|
||||
ExecutionPayment: 5645654,
|
||||
Value: 1000000000000000000,
|
||||
BlobKzgCommitmentsRoot: []byte("blob_kzg_commitments_32_bytes!!"),
|
||||
ParentBlockHash: []byte("parent_block_hash_32_bytes_long!"),
|
||||
ParentBlockRoot: []byte("parent_block_root_32_bytes_long!"),
|
||||
BlockHash: []byte("block_hash_32_bytes_are_long!!"),
|
||||
PrevRandao: []byte("prev_randao_32_bytes_long!!!"),
|
||||
FeeRecipient: []byte("fee_recipient_20_byt"),
|
||||
GasLimit: 15000000,
|
||||
BuilderIndex: primitives.BuilderIndex(42),
|
||||
Slot: primitives.Slot(12345),
|
||||
ExecutionPayment: 5645654,
|
||||
Value: 1000000000000000000,
|
||||
BlobKzgCommitments: [][]byte{[]byte("blob_kzg_commitments_48_bytes_longer_than_needed")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
35
specrefs/README.md
Normal file
35
specrefs/README.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Specification References
|
||||
|
||||
This directory contains specification reference tracking files managed by
|
||||
[ethspecify](https://github.com/jtraglia/ethspecify).
|
||||
|
||||
## Installation
|
||||
|
||||
Install `ethspecify` with the following command:
|
||||
|
||||
```bash
|
||||
pipx install ethspecify
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> You can run `ethspecify <cmd>` in the `specrefs` directory or
|
||||
> `ethspecify <cmd> --path=specrefs` from the project's root directory.
|
||||
|
||||
## Maintenance
|
||||
|
||||
When adding support for a new specification version, follow these steps:
|
||||
|
||||
0. Change directory into the `specrefs` directory.
|
||||
1. Update the version in `.ethspecify.yml` configuration.
|
||||
2. Run `ethspecify process` to update/populate specrefs.
|
||||
3. Run `ethspecify check` to check specrefs.
|
||||
4. If there are errors, use the error message as a guide to fix the issue. If
|
||||
there are new specrefs with empty sources, implement/locate each item and
|
||||
update each specref source list. If you choose not to implement an item,
|
||||
add an exception to the appropriate section the the `.ethspecify.yml`
|
||||
configuration.
|
||||
5. Repeat steps 3 and 4 until `ethspecify check` passes.
|
||||
6. Run `git diff` to view updated specrefs. If an object/function/etc has
|
||||
changed, make the necessary updates to the implementation.
|
||||
7. Lastly, in the project's root directory, run `act -j check-specrefs` to
|
||||
ensure everything is correct.
|
||||
@@ -47,7 +47,6 @@ go_library(
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
"@org_golang_x_sys//unix:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
cmdshared "github.com/OffchainLabs/prysm/v7/cmd"
|
||||
@@ -36,12 +35,11 @@ var _ e2etypes.BeaconNodeSet = (*BeaconNodeSet)(nil)
|
||||
// BeaconNodeSet represents set of beacon nodes.
|
||||
type BeaconNodeSet struct {
|
||||
e2etypes.ComponentRunner
|
||||
config *e2etypes.E2EConfig
|
||||
nodes []e2etypes.ComponentRunner
|
||||
enr string
|
||||
ids []string
|
||||
multiAddrs []string
|
||||
started chan struct{}
|
||||
config *e2etypes.E2EConfig
|
||||
nodes []e2etypes.ComponentRunner
|
||||
enr string
|
||||
ids []string
|
||||
started chan struct{}
|
||||
}
|
||||
|
||||
// SetENR assigns ENR to the set of beacon nodes.
|
||||
@@ -76,10 +74,8 @@ func (s *BeaconNodeSet) Start(ctx context.Context) error {
|
||||
if s.config.UseFixedPeerIDs {
|
||||
for i := range nodes {
|
||||
s.ids = append(s.ids, nodes[i].(*BeaconNode).peerID)
|
||||
s.multiAddrs = append(s.multiAddrs, nodes[i].(*BeaconNode).multiAddr)
|
||||
}
|
||||
s.config.PeerIDs = s.ids
|
||||
s.config.PeerMultiAddrs = s.multiAddrs
|
||||
}
|
||||
// All nodes started, close channel, so that all services waiting on a set, can proceed.
|
||||
close(s.started)
|
||||
@@ -145,14 +141,6 @@ func (s *BeaconNodeSet) StopAtIndex(i int) error {
|
||||
return s.nodes[i].Stop()
|
||||
}
|
||||
|
||||
// RestartAtIndex restarts the component at the desired index.
|
||||
func (s *BeaconNodeSet) RestartAtIndex(ctx context.Context, i int) error {
|
||||
if i >= len(s.nodes) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
|
||||
}
|
||||
return s.nodes[i].(*BeaconNode).Restart(ctx)
|
||||
}
|
||||
|
||||
// ComponentAtIndex returns the component at the provided index.
|
||||
func (s *BeaconNodeSet) ComponentAtIndex(i int) (e2etypes.ComponentRunner, error) {
|
||||
if i >= len(s.nodes) {
|
||||
@@ -164,14 +152,12 @@ func (s *BeaconNodeSet) ComponentAtIndex(i int) (e2etypes.ComponentRunner, error
|
||||
// BeaconNode represents beacon node.
|
||||
type BeaconNode struct {
|
||||
e2etypes.ComponentRunner
|
||||
config *e2etypes.E2EConfig
|
||||
started chan struct{}
|
||||
index int
|
||||
enr string
|
||||
peerID string
|
||||
multiAddr string
|
||||
cmd *exec.Cmd
|
||||
args []string
|
||||
config *e2etypes.E2EConfig
|
||||
started chan struct{}
|
||||
index int
|
||||
enr string
|
||||
peerID string
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
|
||||
// NewBeaconNode creates and returns a beacon node.
|
||||
@@ -304,7 +290,6 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
args = append(args, fmt.Sprintf("--%s=%s:%d", flags.MevRelayEndpoint.Name, "http://127.0.0.1", e2e.TestParams.Ports.Eth1ProxyPort+index))
|
||||
}
|
||||
args = append(args, config.BeaconFlags...)
|
||||
node.args = args
|
||||
|
||||
cmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Safe
|
||||
// Write stderr to log files.
|
||||
@@ -333,18 +318,6 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
return fmt.Errorf("could not find peer id: %w", err)
|
||||
}
|
||||
node.peerID = peerId
|
||||
|
||||
// Extract QUIC multiaddr for Lighthouse to connect to this node.
|
||||
// Prysm logs: msg="Node started p2p server" multiAddr="/ip4/192.168.0.14/udp/4250/quic-v1/p2p/16Uiu2..."
|
||||
// We prefer QUIC over TCP as it's more reliable in E2E tests.
|
||||
multiAddr, err := helpers.FindFollowingTextInFile(stdOutFile, "multiAddr=\"/ip4/192.168.0.14/udp/")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find QUIC multiaddr: %w", err)
|
||||
}
|
||||
// The extracted text will be like: 4250/quic-v1/p2p/16Uiu2..."
|
||||
// We need to prepend "/ip4/192.168.0.14/udp/" and strip the trailing quote
|
||||
multiAddr = strings.TrimSuffix(multiAddr, "\"")
|
||||
node.multiAddr = "/ip4/192.168.0.14/udp/" + multiAddr
|
||||
}
|
||||
|
||||
// Mark node as ready.
|
||||
@@ -374,96 +347,6 @@ func (node *BeaconNode) Stop() error {
|
||||
return node.cmd.Process.Kill()
|
||||
}
|
||||
|
||||
// Restart gracefully stops the beacon node and starts a new process.
|
||||
// This is useful for testing resilience as it allows the P2P layer to reinitialize
|
||||
// and discover peers again (unlike SIGSTOP/SIGCONT which breaks QUIC connections permanently).
|
||||
func (node *BeaconNode) Restart(ctx context.Context) error {
|
||||
binaryPath, found := bazel.FindBinary("cmd/beacon-chain", "beacon-chain")
|
||||
if !found {
|
||||
return errors.New("beacon chain binary not found")
|
||||
}
|
||||
|
||||
// First, continue the process if it's stopped (from PauseAtIndex).
|
||||
// A stopped process (SIGSTOP) cannot receive SIGTERM until continued.
|
||||
_ = node.cmd.Process.Signal(syscall.SIGCONT)
|
||||
|
||||
if err := node.cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
return fmt.Errorf("failed to send SIGTERM: %w", err)
|
||||
}
|
||||
|
||||
// Wait for process to exit by polling. We can't call cmd.Wait() here because
|
||||
// the Start() method's goroutine is already waiting on the command, and calling
|
||||
// Wait() twice on the same process causes "waitid: no child processes" error.
|
||||
// Instead, poll using Signal(0) which returns an error when the process no longer exists.
|
||||
processExited := false
|
||||
for range 100 {
|
||||
if err := node.cmd.Process.Signal(syscall.Signal(0)); err != nil {
|
||||
processExited = true
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if !processExited {
|
||||
log.Warnf("Beacon node %d did not exit within 10 seconds after SIGTERM, proceeding with restart anyway", node.index)
|
||||
}
|
||||
|
||||
restartArgs := make([]string, 0, len(node.args))
|
||||
for _, arg := range node.args {
|
||||
if !strings.Contains(arg, cmdshared.ForceClearDB.Name) {
|
||||
restartArgs = append(restartArgs, arg)
|
||||
}
|
||||
}
|
||||
|
||||
stdOutFile, err := os.OpenFile(
|
||||
path.Join(e2e.TestParams.LogPath, fmt.Sprintf(e2e.BeaconNodeLogFileName, node.index)),
|
||||
os.O_APPEND|os.O_WRONLY,
|
||||
0644,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open log file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := stdOutFile.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close stdout file")
|
||||
}
|
||||
}()
|
||||
|
||||
cmd := exec.CommandContext(ctx, binaryPath, restartArgs...)
|
||||
stderr, err := os.OpenFile(
|
||||
path.Join(e2e.TestParams.LogPath, fmt.Sprintf("beacon_node_%d_stderr.log", node.index)),
|
||||
os.O_APPEND|os.O_WRONLY|os.O_CREATE,
|
||||
0644,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open stderr file: %w", err)
|
||||
}
|
||||
cmd.Stderr = stderr
|
||||
|
||||
log.Infof("Restarting beacon chain %d with flags: %s", node.index, strings.Join(restartArgs, " "))
|
||||
if err = cmd.Start(); err != nil {
|
||||
if closeErr := stderr.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Error("Failed to close stderr file")
|
||||
}
|
||||
return fmt.Errorf("failed to restart beacon node: %w", err)
|
||||
}
|
||||
// Close the parent's file handle after Start(). The child process has its own
|
||||
// copy of the file descriptor via fork/exec, so this won't affect its ability to write.
|
||||
if err := stderr.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close stderr file")
|
||||
}
|
||||
|
||||
if err = helpers.WaitForTextInFile(stdOutFile, "Beacon chain gRPC server listening"); err != nil {
|
||||
return fmt.Errorf("beacon node %d failed to restart properly: %w", node.index, err)
|
||||
}
|
||||
|
||||
node.cmd = cmd
|
||||
go func() {
|
||||
_ = cmd.Wait()
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *BeaconNode) UnderlyingProcess() *os.Process {
|
||||
return node.cmd.Process
|
||||
}
|
||||
|
||||
@@ -108,17 +108,6 @@ func (s *BuilderSet) StopAtIndex(i int) error {
|
||||
return s.builders[i].Stop()
|
||||
}
|
||||
|
||||
// RestartAtIndex for builders just does pause/resume.
|
||||
func (s *BuilderSet) RestartAtIndex(_ context.Context, i int) error {
|
||||
if i >= len(s.builders) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.builders))
|
||||
}
|
||||
if err := s.builders[i].Pause(); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.builders[i].Resume()
|
||||
}
|
||||
|
||||
// ComponentAtIndex returns the component at the provided index.
|
||||
func (s *BuilderSet) ComponentAtIndex(i int) (e2etypes.ComponentRunner, error) {
|
||||
if i >= len(s.builders) {
|
||||
|
||||
@@ -111,17 +111,6 @@ func (s *NodeSet) StopAtIndex(i int) error {
|
||||
return s.nodes[i].Stop()
|
||||
}
|
||||
|
||||
// RestartAtIndex for eth1 nodes just does pause/resume.
|
||||
func (s *NodeSet) RestartAtIndex(_ context.Context, i int) error {
|
||||
if i >= len(s.nodes) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
|
||||
}
|
||||
if err := s.nodes[i].Pause(); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.nodes[i].Resume()
|
||||
}
|
||||
|
||||
// ComponentAtIndex returns the component at the provided index.
|
||||
func (s *NodeSet) ComponentAtIndex(i int) (e2etypes.ComponentRunner, error) {
|
||||
if i >= len(s.nodes) {
|
||||
|
||||
@@ -108,17 +108,6 @@ func (s *ProxySet) StopAtIndex(i int) error {
|
||||
return s.proxies[i].Stop()
|
||||
}
|
||||
|
||||
// RestartAtIndex for proxies just does pause/resume.
|
||||
func (s *ProxySet) RestartAtIndex(_ context.Context, i int) error {
|
||||
if i >= len(s.proxies) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.proxies))
|
||||
}
|
||||
if err := s.proxies[i].Pause(); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.proxies[i].Resume()
|
||||
}
|
||||
|
||||
// ComponentAtIndex returns the component at the provided index.
|
||||
func (s *ProxySet) ComponentAtIndex(i int) (e2etypes.ComponentRunner, error) {
|
||||
if i >= len(s.proxies) {
|
||||
|
||||
@@ -127,17 +127,6 @@ func (s *LighthouseBeaconNodeSet) StopAtIndex(i int) error {
|
||||
return s.nodes[i].Stop()
|
||||
}
|
||||
|
||||
// RestartAtIndex for Lighthouse just does pause/resume.
|
||||
func (s *LighthouseBeaconNodeSet) RestartAtIndex(_ context.Context, i int) error {
|
||||
if i >= len(s.nodes) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
|
||||
}
|
||||
if err := s.nodes[i].Pause(); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.nodes[i].Resume()
|
||||
}
|
||||
|
||||
// ComponentAtIndex returns the component at the provided index.
|
||||
func (s *LighthouseBeaconNodeSet) ComponentAtIndex(i int) (e2etypes.ComponentRunner, error) {
|
||||
if i >= len(s.nodes) {
|
||||
@@ -205,10 +194,9 @@ func (node *LighthouseBeaconNode) Start(ctx context.Context) error {
|
||||
"--suggested-fee-recipient=0x878705ba3f8bc32fcf7f4caa1a35e72af65cf766",
|
||||
}
|
||||
if node.config.UseFixedPeerIDs {
|
||||
// Use libp2p-addresses with full multiaddrs instead of trusted-peers with just peer IDs.
|
||||
// This allows Lighthouse to connect directly to Prysm nodes without relying on DHT discovery.
|
||||
flagVal := strings.Join(node.config.PeerMultiAddrs, ",")
|
||||
args = append(args, fmt.Sprintf("--libp2p-addresses=%s", flagVal))
|
||||
flagVal := strings.Join(node.config.PeerIDs, ",")
|
||||
args = append(args,
|
||||
fmt.Sprintf("--trusted-peers=%s", flagVal))
|
||||
}
|
||||
if node.config.UseBuilder {
|
||||
args = append(args, fmt.Sprintf("--builder=%s:%d", "http://127.0.0.1", e2e.TestParams.Ports.Eth1ProxyPort+prysmNodeCount+index))
|
||||
|
||||
@@ -132,17 +132,6 @@ func (s *LighthouseValidatorNodeSet) StopAtIndex(i int) error {
|
||||
return s.nodes[i].Stop()
|
||||
}
|
||||
|
||||
// RestartAtIndex for Lighthouse validators just does pause/resume.
|
||||
func (s *LighthouseValidatorNodeSet) RestartAtIndex(_ context.Context, i int) error {
|
||||
if i >= len(s.nodes) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
|
||||
}
|
||||
if err := s.nodes[i].Pause(); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.nodes[i].Resume()
|
||||
}
|
||||
|
||||
// ComponentAtIndex returns the component at the provided index.
|
||||
func (s *LighthouseValidatorNodeSet) ComponentAtIndex(i int) (types.ComponentRunner, error) {
|
||||
if i >= len(s.nodes) {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -16,7 +15,6 @@ import (
|
||||
e2e "github.com/OffchainLabs/prysm/v7/testing/endtoend/params"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/endtoend/types"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var _ types.ComponentRunner = &TracingSink{}
|
||||
@@ -34,7 +32,6 @@ var _ types.ComponentRunner = &TracingSink{}
|
||||
type TracingSink struct {
|
||||
cancel context.CancelFunc
|
||||
started chan struct{}
|
||||
stopped chan struct{}
|
||||
endpoint string
|
||||
server *http.Server
|
||||
}
|
||||
@@ -43,7 +40,6 @@ type TracingSink struct {
|
||||
func NewTracingSink(endpoint string) *TracingSink {
|
||||
return &TracingSink{
|
||||
started: make(chan struct{}, 1),
|
||||
stopped: make(chan struct{}),
|
||||
endpoint: endpoint,
|
||||
}
|
||||
}
|
||||
@@ -77,99 +73,62 @@ func (ts *TracingSink) Resume() error {
|
||||
|
||||
// Stop stops the component and its underlying process.
|
||||
func (ts *TracingSink) Stop() error {
|
||||
if ts.cancel != nil {
|
||||
ts.cancel()
|
||||
}
|
||||
// Wait for server to actually shut down before returning
|
||||
<-ts.stopped
|
||||
ts.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// reusePortListener creates a TCP listener with SO_REUSEADDR set, allowing
|
||||
// the port to be reused immediately after the previous listener closes.
|
||||
// This is essential for sequential E2E tests that reuse the same port.
|
||||
func reusePortListener(addr string) (net.Listener, error) {
|
||||
lc := net.ListenConfig{
|
||||
Control: func(network, address string, c syscall.RawConn) error {
|
||||
var setSockOptErr error
|
||||
err := c.Control(func(fd uintptr) {
|
||||
setSockOptErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR, 1)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return setSockOptErr
|
||||
},
|
||||
}
|
||||
return lc.Listen(context.Background(), "tcp", addr)
|
||||
}
|
||||
|
||||
// Initialize an http handler that writes all requests to a file.
|
||||
func (ts *TracingSink) initializeSink(ctx context.Context) {
|
||||
defer close(ts.stopped)
|
||||
|
||||
mux := &http.ServeMux{}
|
||||
ts.server = &http.Server{
|
||||
Addr: ts.endpoint,
|
||||
Handler: mux,
|
||||
ReadHeaderTimeout: time.Second,
|
||||
}
|
||||
// Disable keep-alives to ensure connections close immediately
|
||||
ts.server.SetKeepAlivesEnabled(false)
|
||||
|
||||
// Create listener with SO_REUSEADDR to allow port reuse between tests
|
||||
listener, err := reusePortListener(ts.endpoint)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create listener")
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := ts.server.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close http server")
|
||||
return
|
||||
}
|
||||
}()
|
||||
stdOutFile, err := helpers.DeleteAndCreateFile(e2e.TestParams.LogPath, e2e.TracingRequestSinkFileName)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create stdout file")
|
||||
if closeErr := listener.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Error("Failed to close listener after file creation error")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
if err := stdOutFile.Close(); err != nil {
|
||||
log.WithError(err).Error("Could not close stdout file")
|
||||
}
|
||||
// Use Shutdown for graceful shutdown that releases the port
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := ts.server.Shutdown(shutdownCtx); err != nil {
|
||||
log.WithError(err).Error("Could not gracefully shutdown http server")
|
||||
// Force close if shutdown times out
|
||||
if err := ts.server.Close(); err != nil {
|
||||
log.WithError(err).Error("Could not close http server")
|
||||
}
|
||||
if err := ts.server.Close(); err != nil {
|
||||
log.WithError(err).Error("Could not close http server")
|
||||
}
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
mux.HandleFunc("/", func(_ http.ResponseWriter, r *http.Request) {
|
||||
if err := captureRequest(stdOutFile, r); err != nil {
|
||||
log.WithError(err).Error("Failed to capture http request")
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-sigs:
|
||||
return
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cleanup()
|
||||
return
|
||||
case <-sigs:
|
||||
cleanup()
|
||||
return
|
||||
default:
|
||||
// Sleep for 100ms and do nothing while waiting for
|
||||
// cancellation.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Use Serve with our custom listener instead of ListenAndServe
|
||||
if err := ts.server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||
if err := ts.server.ListenAndServe(); err != http.ErrServerClosed {
|
||||
log.WithError(err).Error("Failed to serve http")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,17 +134,6 @@ func (s *ValidatorNodeSet) StopAtIndex(i int) error {
|
||||
return s.nodes[i].Stop()
|
||||
}
|
||||
|
||||
// RestartAtIndex for validators just does pause/resume since they don't have P2P issues.
|
||||
func (s *ValidatorNodeSet) RestartAtIndex(_ context.Context, i int) error {
|
||||
if i >= len(s.nodes) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
|
||||
}
|
||||
if err := s.nodes[i].Pause(); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.nodes[i].Resume()
|
||||
}
|
||||
|
||||
// ComponentAtIndex returns the component at the provided index.
|
||||
func (s *ValidatorNodeSet) ComponentAtIndex(i int) (e2etypes.ComponentRunner, error) {
|
||||
if i >= len(s.nodes) {
|
||||
|
||||
@@ -48,6 +48,7 @@ const (
|
||||
// allNodesStartTimeout defines period after which nodes are considered
|
||||
// stalled (safety measure for nodes stuck at startup, shouldn't normally happen).
|
||||
allNodesStartTimeout = 5 * time.Minute
|
||||
|
||||
// errGeneralCode is used to represent the string value for all general process errors.
|
||||
errGeneralCode = "exit status 1"
|
||||
)
|
||||
@@ -194,20 +195,12 @@ func (r *testRunner) runEvaluators(ec *e2etypes.EvaluationContext, conns []*grpc
|
||||
secondsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
ticker := helpers.NewEpochTicker(tickingStartTime, secondsPerEpoch)
|
||||
for currentEpoch := range ticker.C() {
|
||||
log.WithField("epoch", currentEpoch).Info("Processing epoch")
|
||||
if config.EvalInterceptor(ec, currentEpoch, conns) {
|
||||
log.WithField("epoch", currentEpoch).Info("Interceptor returned true, skipping evaluators")
|
||||
continue
|
||||
}
|
||||
r.executeProvidedEvaluators(ec, currentEpoch, conns, config.Evaluators)
|
||||
|
||||
if t.Failed() || currentEpoch >= config.EpochsToRun-1 {
|
||||
log.WithFields(log.Fields{
|
||||
"currentEpoch": currentEpoch,
|
||||
"EpochsToRun": config.EpochsToRun,
|
||||
"testFailed": t.Failed(),
|
||||
"epochLimitHit": currentEpoch >= config.EpochsToRun-1,
|
||||
}).Info("Stopping evaluator loop")
|
||||
ticker.Done()
|
||||
if t.Failed() {
|
||||
return errors.New("test failed")
|
||||
@@ -232,9 +225,9 @@ func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
|
||||
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{r.depositor}); err != nil {
|
||||
return errors.Wrap(err, "testDepositsAndTx unable to run, depositor did not Start")
|
||||
}
|
||||
go func() {
|
||||
if r.config.TestDeposits {
|
||||
log.Info("Running deposit tests")
|
||||
go func() {
|
||||
if r.config.TestDeposits {
|
||||
log.Info("Running deposit tests")
|
||||
// The validators with an index < minGenesisActiveCount all have deposits already from the chain start.
|
||||
// Skip all of those chain start validators by seeking to minGenesisActiveCount in the validator list
|
||||
// for further deposit testing.
|
||||
@@ -245,13 +238,12 @@ func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
|
||||
r.t.Error(errors.Wrap(err, "depositor.SendAndMine failed"))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Only generate background transactions when relevant for the test.
|
||||
// Checkpoint sync and REST API tests need EL blocks to advance, so include them.
|
||||
if r.config.TestDeposits || r.config.TestFeature || r.config.UseBuilder || r.config.TestCheckpointSync || r.config.UseBeaconRestApi {
|
||||
r.testTxGeneration(ctx, g, keystorePath, []e2etypes.ComponentRunner{})
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Only generate background transactions when relevant for the test.
|
||||
if r.config.TestDeposits || r.config.TestFeature || r.config.UseBuilder {
|
||||
r.testTxGeneration(ctx, g, keystorePath, []e2etypes.ComponentRunner{})
|
||||
}
|
||||
}()
|
||||
if r.config.TestDeposits {
|
||||
return depositCheckValidator.Start(ctx)
|
||||
}
|
||||
@@ -630,7 +622,7 @@ func (r *testRunner) scenarioRun() error {
|
||||
tickingStartTime := helpers.EpochTickerStartTime(genesis)
|
||||
|
||||
ec := e2etypes.NewEvaluationContext(r.depositor.History())
|
||||
log.WithField("EpochsToRun", r.config.EpochsToRun).Info("Starting evaluators")
|
||||
// Run assigned evaluators.
|
||||
return r.runEvaluators(ec, conns, tickingStartTime)
|
||||
}
|
||||
|
||||
@@ -676,9 +668,9 @@ func (r *testRunner) multiScenarioMulticlient(ec *e2etypes.EvaluationContext, ep
|
||||
freezeStartEpoch := lastForkEpoch + 1
|
||||
freezeEndEpoch := lastForkEpoch + 2
|
||||
optimisticStartEpoch := lastForkEpoch + 6
|
||||
optimisticEndEpoch := lastForkEpoch + 8
|
||||
optimisticEndEpoch := lastForkEpoch + 7
|
||||
recoveryEpochStart, recoveryEpochEnd := lastForkEpoch+3, lastForkEpoch+4
|
||||
secondRecoveryEpochStart, secondRecoveryEpochMid, secondRecoveryEpochEnd := lastForkEpoch+9, lastForkEpoch+10, lastForkEpoch+11
|
||||
secondRecoveryEpochStart, secondRecoveryEpochEnd := lastForkEpoch+8, lastForkEpoch+9
|
||||
|
||||
newPayloadMethod := "engine_newPayloadV4"
|
||||
forkChoiceUpdatedMethod := "engine_forkchoiceUpdatedV3"
|
||||
@@ -688,18 +680,13 @@ func (r *testRunner) multiScenarioMulticlient(ec *e2etypes.EvaluationContext, ep
|
||||
forkChoiceUpdatedMethod = "engine_forkchoiceUpdatedV3"
|
||||
}
|
||||
|
||||
// Skip evaluators during optimistic sync window (between start and end, exclusive)
|
||||
if primitives.Epoch(epoch) > optimisticStartEpoch && primitives.Epoch(epoch) < optimisticEndEpoch {
|
||||
return true
|
||||
}
|
||||
|
||||
switch primitives.Epoch(epoch) {
|
||||
case freezeStartEpoch:
|
||||
require.NoError(r.t, r.comHandler.beaconNodes.PauseAtIndex(0))
|
||||
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(0))
|
||||
return true
|
||||
case freezeEndEpoch:
|
||||
require.NoError(r.t, r.comHandler.beaconNodes.RestartAtIndex(r.comHandler.ctx, 0))
|
||||
require.NoError(r.t, r.comHandler.beaconNodes.ResumeAtIndex(0))
|
||||
require.NoError(r.t, r.comHandler.validatorNodes.ResumeAtIndex(0))
|
||||
return true
|
||||
case optimisticStartEpoch:
|
||||
@@ -714,19 +701,6 @@ func (r *testRunner) multiScenarioMulticlient(ec *e2etypes.EvaluationContext, ep
|
||||
}, func() bool {
|
||||
return true
|
||||
})
|
||||
// Also intercept forkchoiceUpdated for prysm beacon node to prevent
|
||||
// SetOptimisticToValid from clearing the optimistic status.
|
||||
component.(e2etypes.EngineProxy).AddRequestInterceptor(forkChoiceUpdatedMethod, func() any {
|
||||
return &ForkchoiceUpdatedResponse{
|
||||
Status: &enginev1.PayloadStatus{
|
||||
Status: enginev1.PayloadStatus_SYNCING,
|
||||
LatestValidHash: nil,
|
||||
},
|
||||
PayloadId: nil,
|
||||
}
|
||||
}, func() bool {
|
||||
return true
|
||||
})
|
||||
// Set it for lighthouse beacon node.
|
||||
component, err = r.comHandler.eth1Proxy.ComponentAtIndex(2)
|
||||
require.NoError(r.t, err)
|
||||
@@ -760,7 +734,6 @@ func (r *testRunner) multiScenarioMulticlient(ec *e2etypes.EvaluationContext, ep
|
||||
engineProxy, ok := component.(e2etypes.EngineProxy)
|
||||
require.Equal(r.t, true, ok)
|
||||
engineProxy.RemoveRequestInterceptor(newPayloadMethod)
|
||||
engineProxy.RemoveRequestInterceptor(forkChoiceUpdatedMethod)
|
||||
engineProxy.ReleaseBackedUpRequests(newPayloadMethod)
|
||||
|
||||
// Remove for lighthouse too
|
||||
@@ -774,8 +747,8 @@ func (r *testRunner) multiScenarioMulticlient(ec *e2etypes.EvaluationContext, ep
|
||||
|
||||
return true
|
||||
case recoveryEpochStart, recoveryEpochEnd,
|
||||
secondRecoveryEpochStart, secondRecoveryEpochMid, secondRecoveryEpochEnd:
|
||||
// Allow epochs for the network to finalize again after optimistic sync test.
|
||||
secondRecoveryEpochStart, secondRecoveryEpochEnd:
|
||||
// Allow 2 epochs for the network to finalize again.
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -809,39 +782,31 @@ func (r *testRunner) eeOffline(_ *e2etypes.EvaluationContext, epoch uint64, _ []
|
||||
// as expected.
|
||||
func (r *testRunner) multiScenario(ec *e2etypes.EvaluationContext, epoch uint64, conns []*grpc.ClientConn) bool {
|
||||
lastForkEpoch := params.LastForkEpoch()
|
||||
// Freeze/restart scenario is skipped in minimal test: With only 2 beacon nodes,
|
||||
// when one node restarts it enters initial sync mode. During initial sync, the
|
||||
// restarting node doesn't subscribe to gossip topics, leaving the other node with
|
||||
// 0 gossip peers. This causes a deadlock where the network can't produce blocks
|
||||
// consistently (no gossip mesh) and the restarting node can't complete initial sync
|
||||
// (no blocks being produced). This scenario works in multiclient test (4 nodes)
|
||||
// where 3 healthy nodes maintain the gossip mesh while 1 node syncs.
|
||||
freezeStartEpoch := lastForkEpoch + 1
|
||||
freezeEndEpoch := lastForkEpoch + 2
|
||||
valOfflineStartEpoch := lastForkEpoch + 6
|
||||
valOfflineEndEpoch := lastForkEpoch + 7
|
||||
optimisticStartEpoch := lastForkEpoch + 11
|
||||
optimisticEndEpoch := lastForkEpoch + 13
|
||||
optimisticEndEpoch := lastForkEpoch + 12
|
||||
|
||||
recoveryEpochStart, recoveryEpochEnd := lastForkEpoch+3, lastForkEpoch+4
|
||||
secondRecoveryEpochStart, secondRecoveryEpochEnd := lastForkEpoch+8, lastForkEpoch+9
|
||||
thirdRecoveryEpochStart, thirdRecoveryEpochEnd := lastForkEpoch+14, lastForkEpoch+15
|
||||
|
||||
type ForkchoiceUpdatedResponse struct {
|
||||
Status *enginev1.PayloadStatus `json:"payloadStatus"`
|
||||
PayloadId *enginev1.PayloadIDBytes `json:"payloadId"`
|
||||
}
|
||||
thirdRecoveryEpochStart, thirdRecoveryEpochEnd := lastForkEpoch+13, lastForkEpoch+14
|
||||
|
||||
newPayloadMethod := "engine_newPayloadV4"
|
||||
forkChoiceUpdatedMethod := "engine_forkchoiceUpdatedV3"
|
||||
// Fallback if Electra is not set.
|
||||
if params.BeaconConfig().ElectraForkEpoch == math.MaxUint64 {
|
||||
newPayloadMethod = "engine_newPayloadV3"
|
||||
}
|
||||
|
||||
// Skip evaluators during optimistic sync window (between start and end, exclusive)
|
||||
if primitives.Epoch(epoch) > optimisticStartEpoch && primitives.Epoch(epoch) < optimisticEndEpoch {
|
||||
return true
|
||||
}
|
||||
|
||||
switch primitives.Epoch(epoch) {
|
||||
case freezeStartEpoch:
|
||||
require.NoError(r.t, r.comHandler.beaconNodes.PauseAtIndex(0))
|
||||
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(0))
|
||||
return true
|
||||
case freezeEndEpoch:
|
||||
require.NoError(r.t, r.comHandler.beaconNodes.ResumeAtIndex(0))
|
||||
require.NoError(r.t, r.comHandler.validatorNodes.ResumeAtIndex(0))
|
||||
return true
|
||||
case valOfflineStartEpoch:
|
||||
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(0))
|
||||
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(1))
|
||||
@@ -861,36 +826,23 @@ func (r *testRunner) multiScenario(ec *e2etypes.EvaluationContext, epoch uint64,
|
||||
}, func() bool {
|
||||
return true
|
||||
})
|
||||
// Also intercept forkchoiceUpdated to prevent SetOptimisticToValid from
|
||||
// clearing the optimistic status when the beacon node receives VALID.
|
||||
component.(e2etypes.EngineProxy).AddRequestInterceptor(forkChoiceUpdatedMethod, func() any {
|
||||
return &ForkchoiceUpdatedResponse{
|
||||
Status: &enginev1.PayloadStatus{
|
||||
Status: enginev1.PayloadStatus_SYNCING,
|
||||
LatestValidHash: nil,
|
||||
},
|
||||
PayloadId: nil,
|
||||
}
|
||||
}, func() bool {
|
||||
return true
|
||||
})
|
||||
return true
|
||||
case optimisticEndEpoch:
|
||||
evs := []e2etypes.Evaluator{ev.OptimisticSyncEnabled}
|
||||
r.executeProvidedEvaluators(ec, epoch, []*grpc.ClientConn{conns[0]}, evs)
|
||||
// Disable Interceptors
|
||||
// Disable Interceptor
|
||||
component, err := r.comHandler.eth1Proxy.ComponentAtIndex(0)
|
||||
require.NoError(r.t, err)
|
||||
engineProxy, ok := component.(e2etypes.EngineProxy)
|
||||
require.Equal(r.t, true, ok)
|
||||
engineProxy.RemoveRequestInterceptor(newPayloadMethod)
|
||||
engineProxy.ReleaseBackedUpRequests(newPayloadMethod)
|
||||
engineProxy.RemoveRequestInterceptor(forkChoiceUpdatedMethod)
|
||||
engineProxy.ReleaseBackedUpRequests(forkChoiceUpdatedMethod)
|
||||
|
||||
return true
|
||||
case secondRecoveryEpochStart, secondRecoveryEpochEnd,
|
||||
case recoveryEpochStart, recoveryEpochEnd,
|
||||
secondRecoveryEpochStart, secondRecoveryEpochEnd,
|
||||
thirdRecoveryEpochStart, thirdRecoveryEpochEnd:
|
||||
// Allow 2 epochs for the network to finalize again.
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -82,7 +82,7 @@ var metricComparisonTests = []comparisonTest{
|
||||
name: "hot state cache",
|
||||
topic1: "hot_state_cache_miss",
|
||||
topic2: "hot_state_cache_hit",
|
||||
expectedComparison: 0.02,
|
||||
expectedComparison: 0.01,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -168,15 +168,20 @@ func metricCheckLessThan(pageContent, topic string, value int) error {
|
||||
|
||||
func metricCheckComparison(pageContent, topic1, topic2 string, comparison float64) error {
|
||||
topic2Value, err := valueOfTopic(pageContent, topic2)
|
||||
if err != nil || topic2Value == -1 {
|
||||
// If we can't find the denominator (hits/received total), assume test passes
|
||||
// If we can't find the first topic (error metrics), then assume the test passes.
|
||||
if topic2Value != -1 {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
topic1Value, err := valueOfTopic(pageContent, topic1)
|
||||
if err != nil || topic1Value == -1 {
|
||||
// If we can't find the numerator (misses/failures), assume test passes (no errors)
|
||||
if topic1Value != -1 {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
topicComparison := float64(topic1Value) / float64(topic2Value)
|
||||
if topicComparison >= comparison {
|
||||
return fmt.Errorf(
|
||||
|
||||
@@ -101,44 +101,16 @@ func peersConnect(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) erro
|
||||
return nil
|
||||
}
|
||||
ctx := context.Background()
|
||||
expectedPeers := len(conns) - 1 + e2e.TestParams.LighthouseBeaconNodeCount
|
||||
|
||||
// Wait up to 60 seconds for all nodes to discover peers.
|
||||
// Peer discovery via DHT can take time, especially for nodes that start later.
|
||||
timeout := 60 * time.Second
|
||||
pollInterval := 1 * time.Second
|
||||
|
||||
for _, conn := range conns {
|
||||
nodeClient := eth.NewNodeClient(conn)
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
var peersResp *eth.Peers
|
||||
var err error
|
||||
for time.Now().Before(deadline) {
|
||||
peersResp, err = nodeClient.ListPeers(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
time.Sleep(pollInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(peersResp.Peers) >= expectedPeers {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(pollInterval)
|
||||
}
|
||||
|
||||
peersResp, err := nodeClient.ListPeers(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list peers after %v: %w", timeout, err)
|
||||
return err
|
||||
}
|
||||
expectedPeers := len(conns) - 1 + e2e.TestParams.LighthouseBeaconNodeCount
|
||||
if expectedPeers != len(peersResp.Peers) {
|
||||
peerIDs := make([]string, 0, len(peersResp.Peers))
|
||||
for _, p := range peersResp.Peers {
|
||||
peerIDs = append(peerIDs, p.Address[len(p.Address)-10:])
|
||||
}
|
||||
return fmt.Errorf("unexpected amount of peers after %v timeout, expected %d, received %d (connected to: %v)", timeout, expectedPeers, len(peersResp.Peers), peerIDs)
|
||||
return fmt.Errorf("unexpected amount of peers, expected %d, received %d", expectedPeers, len(peersResp.Peers))
|
||||
}
|
||||
|
||||
time.Sleep(connTimeDelay)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -38,8 +38,8 @@ func TestEndToEnd_MinimalConfig(t *testing.T) {
|
||||
r := e2eMinimal(t, cfg,
|
||||
types.WithCheckpointSync(),
|
||||
types.WithEpochs(10),
|
||||
types.WithExitEpoch(4), // Minimum due to ShardCommitteePeriod=4
|
||||
types.WithLargeBlobs(), // Use large blob transactions for BPO testing
|
||||
types.WithExitEpoch(4), // Minimum due to ShardCommitteePeriod=4
|
||||
types.WithLargeBlobs(), // Use large blob transactions for BPO testing
|
||||
)
|
||||
r.run()
|
||||
}
|
||||
}
|
||||
@@ -117,7 +117,6 @@ type E2EConfig struct {
|
||||
BeaconFlags []string
|
||||
ValidatorFlags []string
|
||||
PeerIDs []string
|
||||
PeerMultiAddrs []string
|
||||
ExtraEpochs uint64
|
||||
}
|
||||
|
||||
@@ -223,8 +222,6 @@ type MultipleComponentRunners interface {
|
||||
ResumeAtIndex(i int) error
|
||||
// StopAtIndex stops the grouped component element at the desired index.
|
||||
StopAtIndex(i int) error
|
||||
// RestartAtIndex restarts the grouped component element at the desired index.
|
||||
RestartAtIndex(ctx context.Context, i int) error
|
||||
}
|
||||
|
||||
type EngineProxy interface {
|
||||
|
||||
@@ -21,10 +21,14 @@ There are tests for mainnet and minimal config, so for each config we will add a
|
||||
|
||||
## Running nightly spectests
|
||||
|
||||
Since [PR 15312](https://github.com/OffchainLabs/prysm/pull/15312), Prysm has support to download "nightly" spectests from github via a starlark rule configuration by environment variable.
|
||||
Set `--repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly` when running spectest to download the "nightly" spectests.
|
||||
Note: A GITHUB_TOKEN environment variable is required to be set. The github token must be a [fine grained token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-fine-grained-personal-access-token).
|
||||
Since [PR 15312](https://github.com/OffchainLabs/prysm/pull/15312), Prysm has support to download "nightly" spectests from github via a starlark rule configuration by environment variable.
|
||||
Set `--repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly` or `--repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly-<run_id>` when running spectest to download the "nightly" spectests.
|
||||
Note: A GITHUB_TOKEN environment variable is required to be set. The github token does not need to be associated with your main account; it can be from a "burner account". And the token does not need to be a fine-grained token; it can be a classic token.
|
||||
|
||||
```
|
||||
bazel test //... --test_tag_filters=spectest --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly
|
||||
```
|
||||
|
||||
```
|
||||
bazel test //... --test_tag_filters=spectest --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly-21422848633
|
||||
```
|
||||
|
||||
@@ -1568,8 +1568,8 @@ func HydrateExecutionPayloadBid(b *ethpb.ExecutionPayloadBid) *ethpb.ExecutionPa
|
||||
if b.FeeRecipient == nil {
|
||||
b.FeeRecipient = make([]byte, fieldparams.FeeRecipientLength)
|
||||
}
|
||||
if b.BlobKzgCommitmentsRoot == nil {
|
||||
b.BlobKzgCommitmentsRoot = make([]byte, fieldparams.RootLength)
|
||||
if b.BlobKzgCommitments == nil {
|
||||
b.BlobKzgCommitments = make([][]byte, 0)
|
||||
}
|
||||
return b
|
||||
}
|
||||
@@ -1636,7 +1636,7 @@ func GenerateTestSignedExecutionPayloadBid(slot primitives.Slot) *ethpb.SignedEx
|
||||
blockHash := bytesutil.PadTo([]byte{0x03}, fieldparams.RootLength)
|
||||
prevRandao := bytesutil.PadTo([]byte{0x04}, fieldparams.RootLength)
|
||||
feeRecipient := bytesutil.PadTo([]byte{0x05}, fieldparams.FeeRecipientLength)
|
||||
blobKzgRoot := bytesutil.PadTo([]byte{0x06}, fieldparams.RootLength)
|
||||
blobKzgCommitment := bytesutil.PadTo([]byte{0x06}, fieldparams.BLSPubkeyLength)
|
||||
signature := bytesutil.PadTo([]byte{0x07}, fieldparams.BLSSignatureLength)
|
||||
|
||||
return ðpb.SignedExecutionPayloadBid{
|
||||
@@ -1650,7 +1650,7 @@ func GenerateTestSignedExecutionPayloadBid(slot primitives.Slot) *ethpb.SignedEx
|
||||
PrevRandao: prevRandao,
|
||||
FeeRecipient: feeRecipient,
|
||||
Value: 1000000,
|
||||
BlobKzgCommitmentsRoot: blobKzgRoot,
|
||||
BlobKzgCommitments: [][]byte{blobKzgCommitment},
|
||||
},
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
@@ -420,7 +420,8 @@ func TestGenerateTestSignedExecutionPayloadBid(t *testing.T) {
|
||||
require.NotNil(t, bid.Message.BlockHash)
|
||||
require.NotNil(t, bid.Message.PrevRandao)
|
||||
require.NotNil(t, bid.Message.FeeRecipient)
|
||||
require.NotNil(t, bid.Message.BlobKzgCommitmentsRoot)
|
||||
require.NotNil(t, bid.Message.BlobKzgCommitments)
|
||||
require.Equal(t, 1, len(bid.Message.BlobKzgCommitments))
|
||||
|
||||
// Verify HashTreeRoot works
|
||||
_, err := bid.HashTreeRoot()
|
||||
|
||||
@@ -564,7 +564,7 @@ func NewBeaconStateGloas(options ...func(state *ethpb.BeaconStateGloas) error) (
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
},
|
||||
Builders: make([]*ethpb.Builder, 0),
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# bazel build @consensus_spec_tests//:test_data
|
||||
# bazel build @consensus_spec_tests//:test_data --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly
|
||||
# bazel build @consensus_spec_tests//:test_data --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly-<run_id>
|
||||
|
||||
def _get_redirected_url(repository_ctx, url, headers):
|
||||
if not repository_ctx.which("curl"):
|
||||
@@ -24,7 +25,7 @@ def _impl(repository_ctx):
|
||||
version = repository_ctx.getenv("CONSENSUS_SPEC_TESTS_VERSION") or repository_ctx.attr.version
|
||||
token = repository_ctx.getenv("GITHUB_TOKEN") or ""
|
||||
|
||||
if version == "nightly":
|
||||
if version == "nightly" or version.startswith("nightly-"):
|
||||
print("Downloading nightly tests")
|
||||
if not token:
|
||||
fail("Error GITHUB_TOKEN is not set")
|
||||
@@ -34,16 +35,22 @@ def _impl(repository_ctx):
|
||||
"Accept": "application/vnd.github+json",
|
||||
}
|
||||
|
||||
repository_ctx.download(
|
||||
"https://api.github.com/repos/%s/actions/workflows/%s/runs?branch=%s&status=success&per_page=1"
|
||||
% (repository_ctx.attr.repo, repository_ctx.attr.workflow, repository_ctx.attr.branch),
|
||||
headers = headers,
|
||||
output = "runs.json"
|
||||
)
|
||||
if version.startswith("nightly-"):
|
||||
run_id = version.split("nightly-", 1)[1]
|
||||
if not run_id:
|
||||
fail("Error invalid run id")
|
||||
else:
|
||||
repository_ctx.download(
|
||||
"https://api.github.com/repos/%s/actions/workflows/%s/runs?branch=%s&status=success&per_page=1"
|
||||
% (repository_ctx.attr.repo, repository_ctx.attr.workflow, repository_ctx.attr.branch),
|
||||
headers = headers,
|
||||
output = "runs.json"
|
||||
)
|
||||
|
||||
run_id = json.decode(repository_ctx.read("runs.json"))["workflow_runs"][0]["id"]
|
||||
repository_ctx.delete("runs.json")
|
||||
run_id = json.decode(repository_ctx.read("runs.json"))["workflow_runs"][0]["id"]
|
||||
repository_ctx.delete("runs.json")
|
||||
|
||||
print("Run id:", run_id)
|
||||
repository_ctx.download(
|
||||
"https://api.github.com/repos/%s/actions/runs/%s/artifacts"
|
||||
% (repository_ctx.attr.repo, run_id),
|
||||
@@ -108,8 +115,8 @@ consensus_spec_tests = repository_rule(
|
||||
"version": attr.string(mandatory = True),
|
||||
"flavors": attr.string_dict(mandatory = True),
|
||||
"repo": attr.string(default = "ethereum/consensus-specs"),
|
||||
"workflow": attr.string(default = "generate_vectors.yml"),
|
||||
"branch": attr.string(default = "dev"),
|
||||
"workflow": attr.string(default = "nightly-reftests.yml"),
|
||||
"branch": attr.string(default = "master"),
|
||||
"release_url_template": attr.string(default = "https://github.com/ethereum/consensus-specs/releases/download/%s"),
|
||||
},
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user