Compare commits

..

2 Commits

Author SHA1 Message Date
james-prysm
d256410582 Merge branch 'develop' into improve-events 2026-02-04 11:49:23 -08:00
james-prysm
ba0b57209b wip 2026-02-04 09:49:41 -06:00
77 changed files with 2240 additions and 1581 deletions

View File

@@ -1,4 +1,4 @@
version: v1.7.0-alpha.2
version: v1.7.0-alpha.1
style: full
specrefs:
@@ -146,6 +146,7 @@ exceptions:
- g1_lincomb#deneb
- hash_to_bls_field#deneb
- is_power_of_two#deneb
- multi_exp#deneb
- reverse_bits#deneb
- validate_kzg_g1#deneb
- verify_blob_kzg_proof#deneb
@@ -385,8 +386,7 @@ exceptions:
- convert_builder_index_to_validator_index#gloas
- convert_validator_index_to_builder_index#gloas
- get_attestation_score#gloas
- get_attestation_score#phase0
- get_balance_after_withdrawals#capella
- get_builder_from_deposit#gloas
- get_builder_withdrawals#gloas
- get_builders_sweep_withdrawals#gloas
- get_index_for_new_builder#gloas
@@ -396,18 +396,13 @@ exceptions:
- initiate_builder_exit#gloas
- is_active_builder#gloas
- is_builder_index#gloas
- is_data_available#gloas
- is_eligible_for_partial_withdrawals#electra
- is_head_late#gloas
- is_head_weak#gloas
- is_parent_strong#gloas
- is_valid_proposal_slot#gloas
- onboard_builders_from_pending_deposits#gloas
- process_deposit_request#gloas
- process_voluntary_exit#gloas
- record_block_timeliness#gloas
- record_block_timeliness#phase0
- verify_data_column_sidecar_kzg_proofs#gloas
- should_apply_proposer_boost#gloas
- update_builder_pending_withdrawals#gloas
- update_next_withdrawal_builder_index#gloas

View File

@@ -273,16 +273,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.7.0-alpha.2"
consensus_spec_version = "v1.7.0-alpha.1"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
},
version = consensus_spec_version,
)
@@ -298,7 +298,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -34,6 +34,17 @@ type Event struct {
Data []byte
}
// Send sends an event to the channel, respecting context cancellation.
// Returns true if the event was sent, false if the context was cancelled.
func Send(ctx context.Context, ch chan<- *Event, e *Event) bool {
select {
case ch <- e:
return true
case <-ctx.Done():
return false
}
}
// EventStream is responsible for subscribing to the Beacon API events endpoint
// and dispatching received events to subscribers.
type EventStream struct {
@@ -67,19 +78,20 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
fullUrl := h.host + "/eth/v1/events?topics=" + allTopics
req, err := http.NewRequestWithContext(h.ctx, http.MethodGet, fullUrl, nil)
if err != nil {
eventsChannel <- &Event{
Send(h.ctx, eventsChannel, &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
}
})
return
}
req.Header.Set("Accept", api.EventStreamMediaType)
req.Header.Set("Connection", api.KeepAlive)
resp, err := h.httpClient.Do(req)
if err != nil {
eventsChannel <- &Event{
Send(h.ctx, eventsChannel, &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
}
})
return
}
@@ -97,42 +109,31 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
// Iterate over lines of the event stream
for scanner.Scan() {
select {
case <-h.ctx.Done():
log.Info("Context canceled, stopping event stream")
close(eventsChannel)
return
default:
line := scanner.Text()
// Handle the event based on your specific format
if line == "" {
// Empty line indicates the end of an event
if eventType != "" && data != "" {
// Process the event when both eventType and data are set
eventsChannel <- &Event{EventType: eventType, Data: []byte(data)}
line := scanner.Text()
if line == "" {
// Empty line indicates the end of an event
if eventType != "" && data != "" {
if !Send(h.ctx, eventsChannel, &Event{EventType: eventType, Data: []byte(data)}) {
return
}
// Reset eventType and data for the next event
eventType, data = "", ""
continue
}
et, ok := strings.CutPrefix(line, "event: ")
if ok {
// Extract event type from the "event" field
eventType = et
}
d, ok := strings.CutPrefix(line, "data: ")
if ok {
// Extract data from the "data" field
data = d
}
eventType, data = "", ""
continue
}
et, ok := strings.CutPrefix(line, "event: ")
if ok {
eventType = et
}
d, ok := strings.CutPrefix(line, "data: ")
if ok {
data = d
}
}
if err := scanner.Err(); err != nil {
eventsChannel <- &Event{
Send(h.ctx, eventsChannel, &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, errors.Wrap(client.ErrConnectionIssue, "scanner failed").Error()).Error()),
}
})
}
}

View File

@@ -1,6 +1,7 @@
package event
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
@@ -94,5 +95,86 @@ func TestEventStreamRequestError(t *testing.T) {
if event.EventType != EventConnectionError {
t.Errorf("Expected event type %q, got %q", EventConnectionError, event.EventType)
}
}
func TestEventStream_ContextCancelDuringBlockedSend(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
require.Equal(t, true, ok)
// Send events continuously until the client disconnects.
for i := 0; ; i++ {
_, err := fmt.Fprintf(w, "event: head\ndata: data%d\n\n", i)
if err != nil {
return
}
flusher.Flush()
time.Sleep(10 * time.Millisecond)
}
})
server := httptest.NewServer(mux)
defer server.Close()
// Use an unbuffered channel so sends will block.
eventsChannel := make(chan *Event)
ctx, cancel := context.WithCancel(t.Context())
stream, err := NewEventStream(ctx, http.DefaultClient, server.URL, []string{"head"})
require.NoError(t, err)
done := make(chan struct{})
go func() {
stream.Subscribe(eventsChannel)
close(done)
}()
// Cancel the context while the goroutine is trying to send on the blocked channel.
cancel()
// The goroutine should exit promptly.
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatal("Subscribe goroutine did not exit after context cancel")
}
}
func TestEventStream_DoesNotCloseChannel(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
require.Equal(t, true, ok)
_, err := fmt.Fprintf(w, "event: head\ndata: data1\n\n")
if err != nil {
return
}
flusher.Flush()
// Close the connection after one event to end the scanner loop.
})
server := httptest.NewServer(mux)
defer server.Close()
eventsChannel := make(chan *Event, 10)
stream, err := NewEventStream(t.Context(), http.DefaultClient, server.URL, []string{"head"})
require.NoError(t, err)
done := make(chan struct{})
go func() {
stream.Subscribe(eventsChannel)
close(done)
}()
// Wait for Subscribe to finish.
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatal("Subscribe goroutine did not exit")
}
// Channel should still be open (not closed). Verify by sending to it.
select {
case eventsChannel <- &Event{EventType: "test"}:
// Successfully sent, channel is open.
default:
t.Fatal("Channel appears to be closed or blocked unexpectedly")
}
}

View File

@@ -509,17 +509,17 @@ func (s *SignedBlindedBeaconBlockFulu) SigString() string {
// ----------------------------------------------------------------------------
type ExecutionPayloadBid struct {
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
}
type SignedExecutionPayloadBid struct {

View File

@@ -2939,22 +2939,18 @@ func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *S
}
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
blobKzgCommitments := make([]string, len(b.BlobKzgCommitments))
for i := range b.BlobKzgCommitments {
blobKzgCommitments[i] = hexutil.Encode(b.BlobKzgCommitments[i])
}
return &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitments: blobKzgCommitments,
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
}
}
@@ -3191,30 +3187,22 @@ func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayment")
}
err = slice.VerifyMaxLength(b.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "BlobKzgCommitments")
}
blobKzgCommitments := make([][]byte, len(b.BlobKzgCommitments))
for i, commitment := range b.BlobKzgCommitments {
kzg, err := bytesutil.DecodeHexWithLength(commitment, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("BlobKzgCommitments[%d]", i))
}
blobKzgCommitments[i] = kzg
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
}
return &eth.ExecutionPayloadBid{
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitments: blobKzgCommitments,
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
}, nil
}

View File

@@ -85,7 +85,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/logs:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/io/logs"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
prysmTime "github.com/OffchainLabs/prysm/v7/time"
@@ -88,45 +87,36 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
startTime, err := slots.StartTime(genesis, block.Slot())
if err != nil {
return errors.Wrap(err, "failed to get slot start time")
return err
}
parentRoot := block.ParentRoot()
blkRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8])
finalizedRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8])
sinceSlotStartTime := prysmTime.Now().Sub(startTime)
lessFields := logrus.Fields{
"slot": block.Slot(),
"block": blkRoot,
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"epoch": slots.ToEpoch(block.Slot()),
"sinceSlotStartTime": sinceSlotStartTime,
}
moreFields := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": blkRoot,
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": sinceSlotStartTime,
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
level := logs.PackageVerbosity("beacon-chain/blockchain")
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
log.WithFields(moreFields).Info("Synced new block")
return nil
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
log.WithFields(lf).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
}
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
return nil
}

View File

@@ -18,7 +18,7 @@ import (
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
//
// <spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
// <spec fn="process_execution_payload_bid" fork="gloas" hash="6dc696bb">
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
@@ -37,12 +37,6 @@ import (
// # Verify that the bid signature is valid
// assert verify_execution_payload_bid_signature(state, signed_bid)
//
// # Verify commitments are under limit
// assert (
// len(bid.blob_kzg_commitments)
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
// )
//
// # Verify that the bid is for the current slot
// assert bid.slot == block.slot
// # Verify that the bid is for the right parent block
@@ -115,12 +109,6 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
}
}
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(block.Slot()))
commitmentCount := bid.BlobKzgCommitmentCount()
if commitmentCount > uint64(maxBlobsPerBlock) {
return fmt.Errorf("bid has %d blob KZG commitments over max %d", commitmentCount, maxBlobsPerBlock)
}
if err := validateBidConsistency(st, bid, block); err != nil {
return errors.Wrap(err, "bid consistency validation failed")
}

View File

@@ -184,28 +184,6 @@ func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid,
return out
}
func blobCommitmentsForSlot(slot primitives.Slot, count int) [][]byte {
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
if count > max {
count = max
}
commitments := make([][]byte, count)
for i := range commitments {
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
}
return commitments
}
func tooManyBlobCommitmentsForSlot(slot primitives.Slot) [][]byte {
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
count := max + 1
commitments := make([][]byte, count)
for i := range commitments {
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
}
return commitments
}
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
slot := primitives.Slot(12)
proposerIdx := primitives.ValidatorIndex(0)
@@ -216,17 +194,17 @@ func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
@@ -258,16 +236,16 @@ func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
@@ -302,17 +280,17 @@ func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
@@ -363,17 +341,17 @@ func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -416,17 +394,17 @@ func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -458,17 +436,17 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
// Use an invalid signature.
invalidSig := [96]byte{1}
@@ -485,42 +463,6 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
require.ErrorContains(t, "bid signature validation failed", err)
}
func TestProcessExecutionPayloadBid_TooManyBlobCommitments(t *testing.T) {
slot := primitives.Slot(9)
proposerIdx := primitives.ValidatorIndex(0)
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
pubKey := [48]byte{}
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
BlobKzgCommitments: tooManyBlobCommitmentsForSlot(slot),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: common.InfiniteSignature[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err := ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "blob KZG commitments over max", err)
}
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
slot := primitives.Slot(10)
builderIdx := primitives.BuilderIndex(1)
@@ -536,17 +478,17 @@ func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -578,17 +520,17 @@ func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -621,17 +563,17 @@ func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
parentRoot := bytes.Repeat([]byte{0x22}, 32)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -663,17 +605,17 @@ func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)

View File

@@ -264,24 +264,24 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
//
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="d76e0f89">
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="cf1e65b5">
// def is_valid_indexed_payload_attestation(
// state: BeaconState, attestation: IndexedPayloadAttestation
// state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation
// ) -> bool:
// """
// Check if ``attestation`` is non-empty, has sorted indices, and has
// Check if ``indexed_payload_attestation`` is non-empty, has sorted indices, and has
// a valid aggregate signature.
// """
// # Verify indices are non-empty and sorted
// indices = attestation.attesting_indices
// indices = indexed_payload_attestation.attesting_indices
// if len(indices) == 0 or not indices == sorted(indices):
// return False
//
// # Verify aggregate signature
// pubkeys = [state.validators[i].pubkey for i in indices]
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
// signing_root = compute_signing_root(attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, attestation.signature)
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, None)
// signing_root = compute_signing_root(indexed_payload_attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_payload_attestation.signature)
// </spec>
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
indices := att.AttestingIndices

View File

@@ -78,7 +78,7 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
BlobKzgCommitmentsRoot: make([]byte, 32),
},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),

View File

@@ -2,7 +2,6 @@ package kv
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
@@ -34,9 +33,6 @@ func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
if err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(stateSlotIndicesBucket)
_, blockRoot = bkt.Cursor().Last()
if len(blockRoot) > 0 {
blockRoot = slices.Clone(blockRoot)
}
return nil
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err) // lint:nopanic -- View never returns an error.
@@ -55,9 +51,6 @@ func (s *Store) ArchivedPointRoot(ctx context.Context, slot primitives.Slot) [32
if err := s.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateSlotIndicesBucket)
blockRoot = bucket.Get(bytesutil.SlotToBytesBigEndian(slot))
if len(blockRoot) > 0 {
blockRoot = slices.Clone(blockRoot)
}
return nil
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err) // lint:nopanic -- View never returns an error.

View File

@@ -812,10 +812,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
var addr []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
stored := bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
if len(stored) > 0 {
addr = slices.Clone(stored)
}
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
// IF the fee recipient is not found in the standard fee recipient bucket, then
// check the registration bucket. The fee recipient may be there.
// This is to resolve imcompatility until we fully migrate to the registration bucket.
@@ -829,7 +826,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
if err := decode(ctx, enc, reg); err != nil {
return err
}
addr = slices.Clone(reg.FeeRecipient)
addr = reg.FeeRecipient
}
return nil
})

View File

@@ -3,7 +3,6 @@ package kv
import (
"context"
"fmt"
"slices"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/ethereum/go-ethereum/common"
@@ -18,10 +17,7 @@ func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
var addr []byte
if err := s.db.View(func(tx *bolt.Tx) error {
chainInfo := tx.Bucket(chainMetadataBucket)
stored := chainInfo.Get(depositContractAddressKey)
if len(stored) > 0 {
addr = slices.Clone(stored)
}
addr = chainInfo.Get(depositContractAddressKey)
return nil
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err) // lint:nopanic -- View never returns an error.

View File

@@ -199,7 +199,7 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
func stateBucketKeys(stateBucket *bolt.Bucket) ([][]byte, error) {
var keys [][]byte
if err := stateBucket.ForEach(func(pubKey, v []byte) error {
keys = append(keys, bytes.Clone(pubKey))
keys = append(keys, pubKey)
return nil
}); err != nil {
return nil, err

View File

@@ -2,7 +2,6 @@ package kv
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
@@ -188,23 +187,20 @@ func (s *Store) getDiff(lvl int, slot uint64) (hdiff.HdiffBytes, error) {
return bolt.ErrBucketNotFound
}
buf := append(key, stateSuffix...)
rawStateDiff := bucket.Get(buf)
if len(rawStateDiff) == 0 {
stateDiff = bucket.Get(buf)
if stateDiff == nil {
return errors.New("state diff not found")
}
stateDiff = slices.Clone(rawStateDiff)
buf = append(key, validatorSuffix...)
rawValidatorDiff := bucket.Get(buf)
if len(rawValidatorDiff) == 0 {
validatorDiff = bucket.Get(buf)
if validatorDiff == nil {
return errors.New("validator diff not found")
}
validatorDiff = slices.Clone(rawValidatorDiff)
buf = append(key, balancesSuffix...)
rawBalancesDiff := bucket.Get(buf)
if len(rawBalancesDiff) == 0 {
balancesDiff = bucket.Get(buf)
if balancesDiff == nil {
return errors.New("balances diff not found")
}
balancesDiff = slices.Clone(rawBalancesDiff)
return nil
})
@@ -228,11 +224,10 @@ func (s *Store) getFullSnapshot(slot uint64) (state.BeaconState, error) {
if bucket == nil {
return bolt.ErrBucketNotFound
}
rawEnc := bucket.Get(key)
if rawEnc == nil {
enc = bucket.Get(key)
if enc == nil {
return errors.New("state not found")
}
enc = slices.Clone(rawEnc)
return nil
})

View File

@@ -2,7 +2,6 @@ package kv
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
@@ -48,11 +47,7 @@ func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.St
}
var enc []byte
if err := s.db.View(func(tx *bolt.Tx) error {
rawEnc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
if len(rawEnc) == 0 {
return nil
}
enc = slices.Clone(rawEnc)
enc = tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
return nil
}); err != nil {
return nil, err

View File

@@ -6,6 +6,7 @@ go_library(
"doc.go",
"errors.go",
"forkchoice.go",
"last_root.go",
"log.go",
"metrics.go",
"node.go",
@@ -50,6 +51,7 @@ go_test(
srcs = [
"ffg_update_test.go",
"forkchoice_test.go",
"last_root_test.go",
"no_vote_test.go",
"node_test.go",
"on_tick_test.go",

View File

@@ -32,6 +32,7 @@ func New() *ForkChoice {
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
slashedIndices: make(map[primitives.ValidatorIndex]bool),
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
}

View File

@@ -0,0 +1,26 @@
package doublylinkedtree
import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
// LastRoot returns the last canonical block root in the given epoch
func (f *ForkChoice) LastRoot(epoch primitives.Epoch) [32]byte {
head := f.store.headNode
headEpoch := slots.ToEpoch(head.slot)
epochEnd, err := slots.EpochEnd(epoch)
if err != nil {
return [32]byte{}
}
if headEpoch <= epoch {
return head.root
}
for head != nil && head.slot > epochEnd {
head = head.parent
}
if head == nil {
return [32]byte{}
}
return head.root
}

View File

@@ -0,0 +1,38 @@
package doublylinkedtree
import (
"testing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestLastRoot(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, [32]byte{'1'}, [32]byte{'2'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, [32]byte{'3'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 32, [32]byte{'4'}, [32]byte{'3'}, [32]byte{'4'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte{'5'}, [32]byte{'2'}, [32]byte{'5'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 34, [32]byte{'6'}, [32]byte{'5'}, [32]byte{'6'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
headNode := f.store.nodeByRoot[[32]byte{'6'}]
f.store.headNode = headNode
require.Equal(t, [32]byte{'6'}, f.store.headNode.root)
require.Equal(t, [32]byte{'2'}, f.LastRoot(0))
require.Equal(t, [32]byte{'6'}, f.LastRoot(1))
require.Equal(t, [32]byte{'6'}, f.LastRoot(2))
}

View File

@@ -94,5 +94,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
s.previousProposerBoostScore = 0
}
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return invalidRoots, nil
}

View File

@@ -113,6 +113,7 @@ func (s *Store) insert(ctx context.Context,
}
}
s.nodeByPayload[payloadHash] = n
s.nodeByRoot[root] = n
if parent == nil {
if s.treeRootNode == nil {
@@ -121,6 +122,7 @@ func (s *Store) insert(ctx context.Context,
s.highestReceivedNode = n
} else {
delete(s.nodeByRoot, root)
delete(s.nodeByPayload, payloadHash)
return nil, errInvalidParentRoot
}
} else {
@@ -189,6 +191,7 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
node.children = nil
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return nil
}
@@ -270,6 +273,21 @@ func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
return f.store.highestReceivedNode.slot
}
// HighestReceivedBlockDelay returns the number of slots that the highest
// received block was late when receiving it. For example, a block was late by 12 slots,
// then this method is expected to return 12.
func (f *ForkChoice) HighestReceivedBlockDelay() primitives.Slot {
n := f.store.highestReceivedNode
if n == nil {
return 0
}
sss, err := slots.SinceSlotStart(n.slot, f.store.genesisTime, n.timestamp)
if err != nil {
return 0
}
return primitives.Slot(uint64(sss/time.Second) / params.BeaconConfig().SecondsPerSlot)
}
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
count := uint64(0)

View File

@@ -128,9 +128,10 @@ func TestStore_Insert(t *testing.T) {
// The new node does not have a parent.
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
payloadHash := [32]byte{'a'}
ctx := t.Context()
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
@@ -237,6 +238,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.nodeByPayload), 1)
}
// This test starts with the following branching diagram
@@ -317,6 +319,8 @@ func TestStore_PruneMapsNodes(t *testing.T) {
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.nodeByPayload), 1)
}
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
@@ -335,6 +339,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1), count)
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
// 64
// Received block last epoch is 1
@@ -347,6 +352,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1), count)
require.Equal(t, primitives.Slot(64), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
// 64 65
// Received block last epoch is 2
@@ -359,6 +365,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(2), count)
require.Equal(t, primitives.Slot(65), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockDelay())
// 64 65 66
// Received block last epoch is 3
@@ -710,3 +717,17 @@ func TestStore_CleanupInserting(t *testing.T) {
require.NotNil(t, f.InsertNode(ctx, st, blk))
require.Equal(t, false, f.HasNode(blk.Root()))
}
func TestStore_HighestReceivedBlockDelay(t *testing.T) {
f := ForkChoice{
store: &Store{
genesisTime: time.Unix(0, 0),
highestReceivedNode: &Node{
slot: 10,
timestamp: time.Unix(int64(((10 + 12) * params.BeaconConfig().SecondsPerSlot)), 0), // 12 slots late
},
},
}
require.Equal(t, primitives.Slot(12), f.HighestReceivedBlockDelay())
}

View File

@@ -36,6 +36,7 @@ type Store struct {
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
genesisTime time.Time

View File

@@ -67,11 +67,13 @@ type FastGetter interface {
HasNode([32]byte) bool
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
HighestReceivedBlockDelay() primitives.Slot
IsCanonical(root [32]byte) bool
IsOptimistic(root [32]byte) (bool, error)
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
JustifiedPayloadBlockHash() [32]byte
LastRoot(primitives.Epoch) [32]byte
NodeCount() int
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
ProposerBoost() [fieldparams.RootLength]byte

View File

@@ -121,6 +121,13 @@ func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
return ro.getter.HighestReceivedBlockRoot()
}
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.HighestReceivedBlockDelay()
}
// ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
ro.l.RLock()
@@ -156,6 +163,13 @@ func (ro *ROForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
return ro.getter.Slot(root)
}
// LastRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) LastRoot(e primitives.Epoch) [32]byte {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.LastRoot(e)
}
// DependentRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
ro.l.RLock()

View File

@@ -30,6 +30,7 @@ const (
nodeCountCalled
highestReceivedBlockSlotCalled
highestReceivedBlockRootCalled
highestReceivedBlockDelayCalled
receivedBlocksLastEpochCalled
weightCalled
isOptimisticCalled
@@ -117,6 +118,11 @@ func TestROLocking(t *testing.T) {
call: highestReceivedBlockSlotCalled,
cb: func(g FastGetter) { g.HighestReceivedBlockSlot() },
},
{
name: "highestReceivedBlockDelayCalled",
call: highestReceivedBlockDelayCalled,
cb: func(g FastGetter) { g.HighestReceivedBlockDelay() },
},
{
name: "receivedBlocksLastEpochCalled",
call: receivedBlocksLastEpochCalled,
@@ -142,6 +148,11 @@ func TestROLocking(t *testing.T) {
call: slotCalled,
cb: func(g FastGetter) { _, err := g.Slot([32]byte{}); _discard(t, err) },
},
{
name: "lastRootCalled",
call: lastRootCalled,
cb: func(g FastGetter) { g.LastRoot(0) },
},
{
name: "targetRootForEpochCalled",
call: targetRootForEpochCalled,
@@ -254,6 +265,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
return [32]byte{}
}
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
return 0
}
func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) {
ro.calls = append(ro.calls, receivedBlocksLastEpochCalled)
return 0, nil
@@ -279,6 +295,11 @@ func (ro *mockROForkchoice) Slot(_ [32]byte) (primitives.Slot, error) {
return 0, nil
}
func (ro *mockROForkchoice) LastRoot(_ primitives.Epoch) [32]byte {
ro.calls = append(ro.calls, lastRootCalled)
return [32]byte{}
}
// DependentRoot impoements FastGetter.
func (ro *mockROForkchoice) DependentRoot(_ primitives.Epoch) ([32]byte, error) {
ro.calls = append(ro.calls, dependentRootCalled)

View File

@@ -138,9 +138,6 @@ func connect(a, b host.Host) error {
func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
require.NoError(p.t, err)
p.t.Cleanup(func() {
require.NoError(p.t, h.Close())
})
if err := connect(h, p.BHost); err != nil {
p.t.Fatalf("Failed to connect two peers for RPC: %v", err)
}
@@ -172,9 +169,6 @@ func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
require.NoError(p.t, err)
p.t.Cleanup(func() {
require.NoError(p.t, h.Close())
})
ps, err := pubsub.NewFloodSub(context.Background(), h,
pubsub.WithMessageSigning(false),
pubsub.WithStrictSignatureVerification(false),

View File

@@ -163,11 +163,7 @@ func (s *Server) GetBlockV2(w http.ResponseWriter, r *http.Request) {
if blk.Version() >= version.Bellatrix && blk.IsBlinded() {
blk, err = s.ExecutionReconstructor.ReconstructFullBlock(ctx, blk)
if err != nil {
if errors.Is(err, blocks.ErrNonCanonicalBlock) {
httputil.HandleError(w, fmt.Sprintf("no canonical block found for block %s: execution payload is unavailable (block may have been orphaned)", blockId), http.StatusNotFound)
} else {
httputil.HandleError(w, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block").Error(), http.StatusInternalServerError)
}
httputil.HandleError(w, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block").Error(), http.StatusBadRequest)
return
}
}

View File

@@ -82,20 +82,20 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
parentBlockRoot := h.ParentBlockRoot()
blockHash := h.BlockHash()
randao := h.PrevRandao()
blobKzgCommitments := h.BlobKzgCommitments()
blobKzgCommitmentsRoot := h.BlobKzgCommitmentsRoot()
feeRecipient := h.FeeRecipient()
b.latestExecutionPayloadBid = &ethpb.ExecutionPayloadBid{
ParentBlockHash: parentBlockHash[:],
ParentBlockRoot: parentBlockRoot[:],
BlockHash: blockHash[:],
PrevRandao: randao[:],
GasLimit: h.GasLimit(),
BuilderIndex: h.BuilderIndex(),
Slot: h.Slot(),
Value: h.Value(),
ExecutionPayment: h.ExecutionPayment(),
BlobKzgCommitments: blobKzgCommitments,
FeeRecipient: feeRecipient[:],
ParentBlockHash: parentBlockHash[:],
ParentBlockRoot: parentBlockRoot[:],
BlockHash: blockHash[:],
PrevRandao: randao[:],
GasLimit: h.GasLimit(),
BuilderIndex: h.BuilderIndex(),
Slot: h.Slot(),
Value: h.Value(),
ExecutionPayment: h.ExecutionPayment(),
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot[:],
FeeRecipient: feeRecipient[:],
}
b.markFieldAsDirty(types.LatestExecutionPayloadBid)

View File

@@ -14,17 +14,17 @@ import (
)
type testExecutionPayloadBid struct {
parentBlockHash [32]byte
parentBlockRoot [32]byte
blockHash [32]byte
prevRandao [32]byte
blobKzgCommitments [][]byte
feeRecipient [20]byte
gasLimit uint64
builderIndex primitives.BuilderIndex
slot primitives.Slot
value primitives.Gwei
executionPayment primitives.Gwei
parentBlockHash [32]byte
parentBlockRoot [32]byte
blockHash [32]byte
prevRandao [32]byte
blobKzgCommitmentsRoot [32]byte
feeRecipient [20]byte
gasLimit uint64
builderIndex primitives.BuilderIndex
slot primitives.Slot
value primitives.Gwei
executionPayment primitives.Gwei
}
func (t testExecutionPayloadBid) ParentBlockHash() [32]byte { return t.parentBlockHash }
@@ -40,12 +40,9 @@ func (t testExecutionPayloadBid) Value() primitives.Gwei { return t.value }
func (t testExecutionPayloadBid) ExecutionPayment() primitives.Gwei {
return t.executionPayment
}
func (t testExecutionPayloadBid) BlobKzgCommitments() [][]byte { return t.blobKzgCommitments }
func (t testExecutionPayloadBid) BlobKzgCommitmentCount() uint64 {
return uint64(len(t.blobKzgCommitments))
}
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
func (t testExecutionPayloadBid) IsNil() bool { return false }
func (t testExecutionPayloadBid) BlobKzgCommitmentsRoot() [32]byte { return t.blobKzgCommitmentsRoot }
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
func (t testExecutionPayloadBid) IsNil() bool { return false }
func TestSetExecutionPayloadBid(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
@@ -60,7 +57,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
parentBlockRoot = [32]byte(bytes.Repeat([]byte{0xCD}, 32))
blockHash = [32]byte(bytes.Repeat([]byte{0xEF}, 32))
prevRandao = [32]byte(bytes.Repeat([]byte{0x11}, 32))
blobCommitments = [][]byte{bytes.Repeat([]byte{0x22}, 48)}
blobRoot = [32]byte(bytes.Repeat([]byte{0x22}, 32))
feeRecipient [20]byte
)
copy(feeRecipient[:], bytes.Repeat([]byte{0x33}, len(feeRecipient)))
@@ -69,17 +66,17 @@ func TestSetExecutionPayloadBid(t *testing.T) {
dirtyFields: make(map[types.FieldIndex]bool),
}
bid := testExecutionPayloadBid{
parentBlockHash: parentBlockHash,
parentBlockRoot: parentBlockRoot,
blockHash: blockHash,
prevRandao: prevRandao,
blobKzgCommitments: blobCommitments,
feeRecipient: feeRecipient,
gasLimit: 123,
builderIndex: 7,
slot: 9,
value: 11,
executionPayment: 22,
parentBlockHash: parentBlockHash,
parentBlockRoot: parentBlockRoot,
blockHash: blockHash,
prevRandao: prevRandao,
blobKzgCommitmentsRoot: blobRoot,
feeRecipient: feeRecipient,
gasLimit: 123,
builderIndex: 7,
slot: 9,
value: 11,
executionPayment: 22,
}
require.NoError(t, st.SetExecutionPayloadBid(bid))
@@ -89,7 +86,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
require.DeepEqual(t, parentBlockRoot[:], st.latestExecutionPayloadBid.ParentBlockRoot)
require.DeepEqual(t, blockHash[:], st.latestExecutionPayloadBid.BlockHash)
require.DeepEqual(t, prevRandao[:], st.latestExecutionPayloadBid.PrevRandao)
require.DeepEqual(t, blobCommitments, st.latestExecutionPayloadBid.BlobKzgCommitments)
require.DeepEqual(t, blobRoot[:], st.latestExecutionPayloadBid.BlobKzgCommitmentsRoot)
require.DeepEqual(t, feeRecipient[:], st.latestExecutionPayloadBid.FeeRecipient)
require.Equal(t, uint64(123), st.latestExecutionPayloadBid.GasLimit)
require.Equal(t, primitives.BuilderIndex(7), st.latestExecutionPayloadBid.BuilderIndex)

View File

@@ -1,3 +0,0 @@
### Fixed
- Fixed a bug where `cmd/beacon-chain/execution` was being ignored by `hack/gen-logs.sh` due to a `.gitignore` rule.

View File

@@ -1,3 +0,0 @@
### Changed
- Fixed the logging issue described in #16314.

View File

@@ -1,2 +0,0 @@
### Ignored
- Remove unused `HighestBlockDelay` method in forkchoice.

View File

@@ -1,2 +0,0 @@
### Ignored
- Remove unused method in forkchoice.

View File

@@ -1,2 +0,0 @@
### Ignored
- Remove unused map in forkchoice.

View File

@@ -1,3 +0,0 @@
### Fixed
- Fixed some database slices that were used outside of a read transaction. See [bbolt README](https://github.com/etcd-io/bbolt/blob/7b38172caf8cde993d187be4b8738fbe9266fde8/README.md?plain=1#L852) for more on this caveat.

View File

@@ -1,2 +0,0 @@
### Ignored
- Close opened host in test helpers

View File

@@ -1,3 +0,0 @@
### Changed
- Moved blob KZG commitments into `ExecutionPayloadBid` and removed them from `ExecutionPayloadEnvelope` for Gloas.

View File

@@ -1,9 +1,5 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package execution
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "cmd/beacon-chain/execution")
var log = logrus.WithField("prefix", "execution")

View File

@@ -188,8 +188,8 @@ func before(ctx *cli.Context) error {
return errors.Wrap(err, "failed to parse log vmodule")
}
// set the global logging level and data
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
// set the global logging level to allow for the highest verbosity requested
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
format := ctx.String(cmd.LogFormat.Name)
switch format {
@@ -210,7 +210,6 @@ func before(ctx *cli.Context) error {
Formatter: formatter,
Writer: os.Stderr,
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
Identifier: logs.LogTargetUser,
})
case "fluentd":
f := joonix.NewFormatter()

View File

@@ -164,8 +164,8 @@ func main() {
return errors.Wrap(err, "failed to parse log vmodule")
}
// set the global logging level and data
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
// set the global logging level to allow for the highest verbosity requested
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
logFileName := ctx.String(cmd.LogFileName.Name)
@@ -188,7 +188,6 @@ func main() {
Formatter: formatter,
Writer: os.Stderr,
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
Identifier: logs.LogTargetUser,
})
case "fluentd":
f := joonix.NewFormatter()

View File

@@ -27,11 +27,6 @@ var (
// ErrNilBeaconBlock is returned when a nil beacon block is received.
ErrNilBeaconBlock = errors.New("beacon block can't be nil")
errNonBlindedSignedBeaconBlock = errors.New("can only build signed beacon block from blinded format")
// ErrNonCanonicalBlock is returned when a reconstructed execution payload does
// not match the expected payload header. This occurs when the execution layer
// returns the canonical block's payload at the same height instead of the
// requested (orphaned/reorged) block's payload.
ErrNonCanonicalBlock = errors.New("no canonical block found for payload header")
)
// NewSignedBeaconBlock creates a signed beacon block from a protobuf signed beacon block.
@@ -313,11 +308,11 @@ func checkPayloadAgainstHeader(wrappedPayload, payloadHeader interfaces.Executio
return errors.Wrap(err, "could not hash tree root payload header")
}
if payloadRoot != payloadHeaderRoot {
return errors.Wrap(ErrNonCanonicalBlock, fmt.Sprintf(
return fmt.Errorf(
"payload %#x and header %#x roots do not match",
payloadRoot,
payloadHeaderRoot,
))
)
}
return nil
}

View File

@@ -671,7 +671,7 @@ func hydrateBeaconBlockBodyGloas() *eth.BeaconBlockBodyGloas {
BlockHash: make([]byte, fieldparams.RootLength),
PrevRandao: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, fieldparams.BLSPubkeyLength)},
BlobKzgCommitmentsRoot: make([]byte, fieldparams.RootLength),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},

View File

@@ -5,7 +5,6 @@ import (
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
@@ -44,16 +43,11 @@ func (h executionPayloadBidGloas) IsNil() bool {
len(h.payload.ParentBlockRoot) != 32 ||
len(h.payload.BlockHash) != 32 ||
len(h.payload.PrevRandao) != 32 ||
len(h.payload.BlobKzgCommitmentsRoot) != 32 ||
len(h.payload.FeeRecipient) != 20 {
return true
}
for _, commitment := range h.payload.BlobKzgCommitments {
if len(commitment) != 48 {
return true
}
}
return false
}
@@ -137,14 +131,9 @@ func (h executionPayloadBidGloas) ExecutionPayment() primitives.Gwei {
return primitives.Gwei(h.payload.ExecutionPayment)
}
// BlobKzgCommitments returns the KZG commitments for blobs.
func (h executionPayloadBidGloas) BlobKzgCommitments() [][]byte {
return bytesutil.SafeCopy2dBytes(h.payload.BlobKzgCommitments)
}
// BlobKzgCommitmentCount returns the number of blob KZG commitments.
func (h executionPayloadBidGloas) BlobKzgCommitmentCount() uint64 {
return uint64(len(h.payload.BlobKzgCommitments))
// BlobKzgCommitmentsRoot returns the root of the KZG commitments for blobs.
func (h executionPayloadBidGloas) BlobKzgCommitmentsRoot() [32]byte {
return [32]byte(h.payload.BlobKzgCommitmentsRoot)
}
// FeeRecipient returns the execution address that will receive the builder payment.

View File

@@ -15,17 +15,17 @@ import (
func validExecutionPayloadBid() *ethpb.ExecutionPayloadBid {
return &ethpb.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
BlockHash: bytes.Repeat([]byte{0x03}, 32),
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
GasLimit: 123,
BuilderIndex: 5,
Slot: 6,
Value: 7,
ExecutionPayment: 8,
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x05}, 48)},
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
BlockHash: bytes.Repeat([]byte{0x03}, 32),
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
GasLimit: 123,
BuilderIndex: 5,
Slot: 6,
Value: 7,
ExecutionPayment: 8,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
}
}
@@ -52,8 +52,8 @@ func TestWrappedROExecutionPayloadBid(t *testing.T) {
mutate: func(b *ethpb.ExecutionPayloadBid) { b.PrevRandao = []byte{0x04} },
},
{
name: "blob kzg commitments length",
mutate: func(b *ethpb.ExecutionPayloadBid) { b.BlobKzgCommitments = [][]byte{[]byte{0x05}} },
name: "blob kzg commitments root",
mutate: func(b *ethpb.ExecutionPayloadBid) { b.BlobKzgCommitmentsRoot = []byte{0x05} },
},
{
name: "fee recipient",
@@ -85,8 +85,7 @@ func TestWrappedROExecutionPayloadBid(t *testing.T) {
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x02}, 32)), wrapped.ParentBlockRoot())
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x03}, 32)), wrapped.BlockHash())
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x04}, 32)), wrapped.PrevRandao())
assert.DeepEqual(t, [][]byte{bytes.Repeat([]byte{0x05}, 48)}, wrapped.BlobKzgCommitments())
require.Equal(t, uint64(1), wrapped.BlobKzgCommitmentCount())
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x05}, 32)), wrapped.BlobKzgCommitmentsRoot())
assert.DeepEqual(t, [20]byte(bytes.Repeat([]byte{0x06}, 20)), wrapped.FeeRecipient())
})
}

View File

@@ -22,8 +22,7 @@ type ROExecutionPayloadBid interface {
Slot() primitives.Slot
Value() primitives.Gwei
ExecutionPayment() primitives.Gwei
BlobKzgCommitments() [][]byte
BlobKzgCommitmentCount() uint64
BlobKzgCommitmentsRoot() [32]byte
FeeRecipient() [20]byte
IsNil() bool
}

View File

@@ -31,11 +31,6 @@ EXCLUDED_PATH_PREFIXES=(
".vscode"
)
# Gitignore overrides: paths that should still be scanned even if ignored by VCS.
GITIGNORE_OVERRIDES=(
"cmd/beacon-chain/execution"
)
# The logrus import path
LOGRUS_IMPORT="github.com/sirupsen/logrus"
# ----------------------------
@@ -75,14 +70,6 @@ rg_args=(
-0 # NUL-delimited output
)
if [[ ${#GITIGNORE_OVERRIDES[@]} -gt 0 ]]; then
# Disable VCS ignores so overrides are honored.
rg_args+=( --no-ignore-vcs )
for ov in "${GITIGNORE_OVERRIDES[@]}"; do
rg_args+=( --glob "$ov/**" )
done
fi
for ex in "${EXCLUDED_PATH_PREFIXES[@]}"; do
rg_args+=( --glob "!$ex/**" )
done

View File

@@ -6,28 +6,20 @@ import (
"github.com/sirupsen/logrus"
)
type HookIdentifier string
type WriterHook struct {
AllowedLevels []logrus.Level
Writer io.Writer
Formatter logrus.Formatter
Identifier HookIdentifier
}
func (hook *WriterHook) Levels() []logrus.Level {
if len(hook.AllowedLevels) == 0 {
if hook.AllowedLevels == nil || len(hook.AllowedLevels) == 0 {
return logrus.AllLevels
}
return hook.AllowedLevels
}
func (hook *WriterHook) Fire(entry *logrus.Entry) error {
val, ok := entry.Data[LogTargetField]
if ok && val != hook.Identifier {
return nil
}
line, err := hook.Formatter.Format(entry)
if err != nil {
return err

View File

@@ -17,43 +17,11 @@ import (
"gopkg.in/natefinch/lumberjack.v2"
)
var (
userVerbosity = logrus.InfoLevel
vmodule = make(map[string]logrus.Level)
)
var ephemeralLogFileVerbosity = logrus.DebugLevel
const (
ephemeralLogFileVerbosity = logrus.DebugLevel
LogTargetField = "log_target"
LogTargetEphemeral HookIdentifier = "ephemeral"
LogTargetUser HookIdentifier = "user"
)
// SetLoggingLevelAndData sets the base logging level for logrus.
func SetLoggingLevelAndData(baseVerbosity logrus.Level, vmoduleMap map[string]logrus.Level, maxVmoduleLevel logrus.Level, disableEphemeral bool) {
userVerbosity = baseVerbosity
vmodule = vmoduleMap
globalLevel := max(baseVerbosity, maxVmoduleLevel)
if !disableEphemeral {
globalLevel = max(globalLevel, ephemeralLogFileVerbosity)
}
logrus.SetLevel(globalLevel)
}
// PackageVerbosity returns the verbosity of a given package.
func PackageVerbosity(packagePath string) logrus.Level {
bestLen := 0
bestLevel := userVerbosity
for k, v := range vmodule {
if k == packagePath || strings.HasPrefix(packagePath, k+"/") {
if len(k) > bestLen {
bestLen = len(k)
bestLevel = v
}
}
}
return bestLevel
// SetLoggingLevel sets the base logging level for logrus.
func SetLoggingLevel(lvl logrus.Level) {
logrus.SetLevel(max(lvl, ephemeralLogFileVerbosity))
}
func addLogWriter(w io.Writer) {
@@ -100,7 +68,6 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
Formatter: formatter,
Writer: f,
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
Identifier: LogTargetUser,
})
logrus.Debug("File logging initialized")
@@ -134,7 +101,6 @@ func ConfigureEphemeralLogFile(datadirPath string, app string) error {
Formatter: formatter,
Writer: debugWriter,
AllowedLevels: logrus.AllLevels[:ephemeralLogFileVerbosity+1],
Identifier: LogTargetEphemeral,
})
logrus.WithField("path", logFilePath).Debug("Ephemeral log file initialized")

288
proto/engine/v1/gloas.pb.go generated Executable file
View File

@@ -0,0 +1,288 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.3
// protoc v3.21.7
// source: proto/engine/v1/gloas.proto
package enginev1
import (
reflect "reflect"
sync "sync"
github_com_OffchainLabs_prysm_v6_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ExecutionPayloadEnvelope struct {
state protoimpl.MessageState `protogen:"open.v1"`
Payload *ExecutionPayloadDeneb `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
ExecutionRequests *ExecutionRequests `protobuf:"bytes,2,opt,name=execution_requests,json=executionRequests,proto3" json:"execution_requests,omitempty"`
BuilderIndex github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex `protobuf:"varint,3,opt,name=builder_index,json=builderIndex,proto3" json:"builder_index,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ValidatorIndex"`
BeaconBlockRoot []byte `protobuf:"bytes,4,opt,name=beacon_block_root,json=beaconBlockRoot,proto3" json:"beacon_block_root,omitempty" ssz-size:"32"`
Slot github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot `protobuf:"varint,5,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"`
BlobKzgCommitments [][]byte `protobuf:"bytes,6,rep,name=blob_kzg_commitments,json=blobKzgCommitments,proto3" json:"blob_kzg_commitments,omitempty" ssz-max:"4096" ssz-size:"?,48"`
StateRoot []byte `protobuf:"bytes,7,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty" ssz-size:"32"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExecutionPayloadEnvelope) Reset() {
*x = ExecutionPayloadEnvelope{}
mi := &file_proto_engine_v1_gloas_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExecutionPayloadEnvelope) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExecutionPayloadEnvelope) ProtoMessage() {}
func (x *ExecutionPayloadEnvelope) ProtoReflect() protoreflect.Message {
mi := &file_proto_engine_v1_gloas_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExecutionPayloadEnvelope.ProtoReflect.Descriptor instead.
func (*ExecutionPayloadEnvelope) Descriptor() ([]byte, []int) {
return file_proto_engine_v1_gloas_proto_rawDescGZIP(), []int{0}
}
func (x *ExecutionPayloadEnvelope) GetPayload() *ExecutionPayloadDeneb {
if x != nil {
return x.Payload
}
return nil
}
func (x *ExecutionPayloadEnvelope) GetExecutionRequests() *ExecutionRequests {
if x != nil {
return x.ExecutionRequests
}
return nil
}
func (x *ExecutionPayloadEnvelope) GetBuilderIndex() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex {
if x != nil {
return x.BuilderIndex
}
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex(0)
}
func (x *ExecutionPayloadEnvelope) GetBeaconBlockRoot() []byte {
if x != nil {
return x.BeaconBlockRoot
}
return nil
}
func (x *ExecutionPayloadEnvelope) GetSlot() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot {
if x != nil {
return x.Slot
}
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot(0)
}
func (x *ExecutionPayloadEnvelope) GetBlobKzgCommitments() [][]byte {
if x != nil {
return x.BlobKzgCommitments
}
return nil
}
func (x *ExecutionPayloadEnvelope) GetStateRoot() []byte {
if x != nil {
return x.StateRoot
}
return nil
}
type SignedExecutionPayloadEnvelope struct {
state protoimpl.MessageState `protogen:"open.v1"`
Message *ExecutionPayloadEnvelope `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty" ssz-size:"96"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SignedExecutionPayloadEnvelope) Reset() {
*x = SignedExecutionPayloadEnvelope{}
mi := &file_proto_engine_v1_gloas_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SignedExecutionPayloadEnvelope) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignedExecutionPayloadEnvelope) ProtoMessage() {}
func (x *SignedExecutionPayloadEnvelope) ProtoReflect() protoreflect.Message {
mi := &file_proto_engine_v1_gloas_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignedExecutionPayloadEnvelope.ProtoReflect.Descriptor instead.
func (*SignedExecutionPayloadEnvelope) Descriptor() ([]byte, []int) {
return file_proto_engine_v1_gloas_proto_rawDescGZIP(), []int{1}
}
func (x *SignedExecutionPayloadEnvelope) GetMessage() *ExecutionPayloadEnvelope {
if x != nil {
return x.Message
}
return nil
}
func (x *SignedExecutionPayloadEnvelope) GetSignature() []byte {
if x != nil {
return x.Signature
}
return nil
}
var File_proto_engine_v1_gloas_proto protoreflect.FileDescriptor
var file_proto_engine_v1_gloas_proto_rawDesc = []byte{
0x0a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76,
0x31, 0x2f, 0x67, 0x6c, 0x6f, 0x61, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x65,
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76,
0x31, 0x1a, 0x26, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f,
0x76, 0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x67,
0x69, 0x6e, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x6c, 0x65, 0x63, 0x74,
0x72, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x04, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f,
0x70, 0x65, 0x12, 0x43, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x65, 0x6e, 0x65, 0x62, 0x52, 0x07,
0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x54, 0x0a, 0x12, 0x65, 0x78, 0x65, 0x63, 0x75,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63,
0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x73, 0x0a,
0x0d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03,
0x20, 0x01, 0x28, 0x04, 0x42, 0x4e, 0x82, 0xb5, 0x18, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62,
0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65,
0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69,
0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49,
0x6e, 0x64, 0x65, 0x78, 0x52, 0x0c, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x64,
0x65, 0x78, 0x12, 0x32, 0x0a, 0x11, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f,
0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a,
0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f,
0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x05,
0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62,
0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65,
0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69,
0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74,
0x12, 0x42, 0x0a, 0x14, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d,
0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10,
0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36,
0x52, 0x12, 0x62, 0x6c, 0x6f, 0x62, 0x4b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d,
0x65, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f,
0x6f, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32,
0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x1e,
0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x46,
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e,
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61,
0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x52, 0x07, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39,
0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x3b, 0x5a, 0x39,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68,
0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31,
0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_proto_engine_v1_gloas_proto_rawDescOnce sync.Once
file_proto_engine_v1_gloas_proto_rawDescData = file_proto_engine_v1_gloas_proto_rawDesc
)
func file_proto_engine_v1_gloas_proto_rawDescGZIP() []byte {
file_proto_engine_v1_gloas_proto_rawDescOnce.Do(func() {
file_proto_engine_v1_gloas_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_engine_v1_gloas_proto_rawDescData)
})
return file_proto_engine_v1_gloas_proto_rawDescData
}
var file_proto_engine_v1_gloas_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_proto_engine_v1_gloas_proto_goTypes = []any{
(*ExecutionPayloadEnvelope)(nil), // 0: ethereum.engine.v1.ExecutionPayloadEnvelope
(*SignedExecutionPayloadEnvelope)(nil), // 1: ethereum.engine.v1.SignedExecutionPayloadEnvelope
(*ExecutionPayloadDeneb)(nil), // 2: ethereum.engine.v1.ExecutionPayloadDeneb
(*ExecutionRequests)(nil), // 3: ethereum.engine.v1.ExecutionRequests
}
var file_proto_engine_v1_gloas_proto_depIdxs = []int32{
2, // 0: ethereum.engine.v1.ExecutionPayloadEnvelope.payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
3, // 1: ethereum.engine.v1.ExecutionPayloadEnvelope.execution_requests:type_name -> ethereum.engine.v1.ExecutionRequests
0, // 2: ethereum.engine.v1.SignedExecutionPayloadEnvelope.message:type_name -> ethereum.engine.v1.ExecutionPayloadEnvelope
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_proto_engine_v1_gloas_proto_init() }
func file_proto_engine_v1_gloas_proto_init() {
if File_proto_engine_v1_gloas_proto != nil {
return
}
file_proto_engine_v1_execution_engine_proto_init()
file_proto_engine_v1_electra_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_engine_v1_gloas_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_proto_engine_v1_gloas_proto_goTypes,
DependencyIndexes: file_proto_engine_v1_gloas_proto_depIdxs,
MessageInfos: file_proto_engine_v1_gloas_proto_msgTypes,
}.Build()
File_proto_engine_v1_gloas_proto = out.File
file_proto_engine_v1_gloas_proto_rawDesc = nil
file_proto_engine_v1_gloas_proto_goTypes = nil
file_proto_engine_v1_gloas_proto_depIdxs = nil
}

View File

@@ -144,17 +144,15 @@ func copySignedExecutionPayloadBid(header *SignedExecutionPayloadBid) *SignedExe
}
if header.Message != nil {
copied.Message = &ExecutionPayloadBid{
ParentBlockHash: bytesutil.SafeCopyBytes(header.Message.ParentBlockHash),
ParentBlockRoot: bytesutil.SafeCopyBytes(header.Message.ParentBlockRoot),
BlockHash: bytesutil.SafeCopyBytes(header.Message.BlockHash),
PrevRandao: bytesutil.SafeCopyBytes(header.Message.PrevRandao),
FeeRecipient: bytesutil.SafeCopyBytes(header.Message.FeeRecipient),
GasLimit: header.Message.GasLimit,
BuilderIndex: header.Message.BuilderIndex,
Slot: header.Message.Slot,
Value: header.Message.Value,
ExecutionPayment: header.Message.ExecutionPayment,
BlobKzgCommitments: bytesutil.SafeCopy2dBytes(header.Message.BlobKzgCommitments),
ParentBlockHash: bytesutil.SafeCopyBytes(header.Message.ParentBlockHash),
ParentBlockRoot: bytesutil.SafeCopyBytes(header.Message.ParentBlockRoot),
BlockHash: bytesutil.SafeCopyBytes(header.Message.BlockHash),
FeeRecipient: bytesutil.SafeCopyBytes(header.Message.FeeRecipient),
GasLimit: header.Message.GasLimit,
BuilderIndex: header.Message.BuilderIndex,
Slot: header.Message.Slot,
Value: header.Message.Value,
BlobKzgCommitmentsRoot: bytesutil.SafeCopyBytes(header.Message.BlobKzgCommitmentsRoot),
}
}
return copied

View File

@@ -1215,16 +1215,15 @@ func genSignedExecutionPayloadBidGloas() *v1alpha1.SignedExecutionPayloadBid {
func genExecutionPayloadBidGloas() *v1alpha1.ExecutionPayloadBid {
return &v1alpha1.ExecutionPayloadBid{
ParentBlockHash: bytes(32),
ParentBlockRoot: bytes(32),
BlockHash: bytes(32),
FeeRecipient: bytes(20),
GasLimit: rand.Uint64(),
BuilderIndex: primitives.BuilderIndex(rand.Uint64()),
Slot: primitives.Slot(rand.Uint64()),
Value: primitives.Gwei(rand.Uint64()),
ExecutionPayment: primitives.Gwei(rand.Uint64()),
BlobKzgCommitments: [][]byte{bytes(48)},
ParentBlockHash: bytes(32),
ParentBlockRoot: bytes(32),
BlockHash: bytes(32),
FeeRecipient: bytes(20),
GasLimit: rand.Uint64(),
BuilderIndex: primitives.BuilderIndex(rand.Uint64()),
Slot: primitives.Slot(rand.Uint64()),
Value: primitives.Gwei(rand.Uint64()),
BlobKzgCommitmentsRoot: bytes(32),
}
}

View File

@@ -10,17 +10,17 @@ func (header *ExecutionPayloadBid) Copy() *ExecutionPayloadBid {
return nil
}
return &ExecutionPayloadBid{
ParentBlockHash: bytesutil.SafeCopyBytes(header.ParentBlockHash),
ParentBlockRoot: bytesutil.SafeCopyBytes(header.ParentBlockRoot),
BlockHash: bytesutil.SafeCopyBytes(header.BlockHash),
PrevRandao: bytesutil.SafeCopyBytes(header.PrevRandao),
FeeRecipient: bytesutil.SafeCopyBytes(header.FeeRecipient),
GasLimit: header.GasLimit,
BuilderIndex: header.BuilderIndex,
Slot: header.Slot,
Value: header.Value,
ExecutionPayment: header.ExecutionPayment,
BlobKzgCommitments: bytesutil.SafeCopy2dBytes(header.BlobKzgCommitments),
ParentBlockHash: bytesutil.SafeCopyBytes(header.ParentBlockHash),
ParentBlockRoot: bytesutil.SafeCopyBytes(header.ParentBlockRoot),
BlockHash: bytesutil.SafeCopyBytes(header.BlockHash),
PrevRandao: bytesutil.SafeCopyBytes(header.PrevRandao),
FeeRecipient: bytesutil.SafeCopyBytes(header.FeeRecipient),
GasLimit: header.GasLimit,
BuilderIndex: header.BuilderIndex,
Slot: header.Slot,
Value: header.Value,
ExecutionPayment: header.ExecutionPayment,
BlobKzgCommitmentsRoot: bytesutil.SafeCopyBytes(header.BlobKzgCommitmentsRoot),
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -33,7 +33,7 @@ option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
// slot: Slot
// value: Gwei
// execution_payment: Gwei
// blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
// blob_kzg_commitments_root: Root
message ExecutionPayloadBid {
bytes parent_block_hash = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
bytes parent_block_root = 2 [ (ethereum.eth.ext.ssz_size) = "32" ];
@@ -56,10 +56,7 @@ message ExecutionPayloadBid {
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Gwei"
];
repeated bytes blob_kzg_commitments = 11 [
(ethereum.eth.ext.ssz_size) = "?,48",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
bytes blob_kzg_commitments_root = 11 [ (ethereum.eth.ext.ssz_size) = "32" ];
}
// SignedExecutionPayloadBid wraps an execution payload bid with a signature.
@@ -369,6 +366,7 @@ message BuilderPendingWithdrawal {
// class DataColumnSidecar(Container):
// index: ColumnIndex
// column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
// kzg_commitents: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
// kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
// slot: Slot
// beacon_block_root: Root
@@ -378,6 +376,10 @@ message DataColumnSidecarGloas {
(ethereum.eth.ext.ssz_size) = "?,bytes_per_cell.size",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
repeated bytes kzg_commitments = 3 [
(ethereum.eth.ext.ssz_size) = "?,48",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
repeated bytes kzg_proofs = 4 [
(ethereum.eth.ext.ssz_size) = "?,48",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
@@ -400,6 +402,7 @@ message DataColumnSidecarGloas {
// builder_index: BuilderIndex
// beacon_block_root: Root
// slot: Slot
// blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
// state_root: Root
message ExecutionPayloadEnvelope {
ethereum.engine.v1.ExecutionPayloadDeneb payload = 1;
@@ -412,7 +415,11 @@ message ExecutionPayloadEnvelope {
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
];
bytes state_root = 6 [ (ethereum.eth.ext.ssz_size) = "32" ];
repeated bytes blob_kzg_commitments = 6 [
(ethereum.eth.ext.ssz_size) = "?,48",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
bytes state_root = 7 [ (ethereum.eth.ext.ssz_size) = "32" ];
}
// SignedExecutionPayloadEnvelope wraps an execution payload envelope with a signature.

View File

@@ -15,7 +15,6 @@ func (e *ExecutionPayloadBid) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the ExecutionPayloadBid object to a target array
func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(192)
// Field (0) 'ParentBlockHash'
if size := len(e.ParentBlockHash); size != 32 {
@@ -67,22 +66,12 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
// Field (9) 'ExecutionPayment'
dst = ssz.MarshalUint64(dst, uint64(e.ExecutionPayment))
// Offset (10) 'BlobKzgCommitments'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.BlobKzgCommitments) * 48
// Field (10) 'BlobKzgCommitments'
if size := len(e.BlobKzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
// Field (10) 'BlobKzgCommitmentsRoot'
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
return
}
for ii := 0; ii < len(e.BlobKzgCommitments); ii++ {
if size := len(e.BlobKzgCommitments[ii]); size != 48 {
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitments[ii]", size, 48)
return
}
dst = append(dst, e.BlobKzgCommitments[ii]...)
}
dst = append(dst, e.BlobKzgCommitmentsRoot...)
return
}
@@ -91,13 +80,10 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 192 {
if size != 220 {
return ssz.ErrSize
}
tail := buf
var o10 uint64
// Field (0) 'ParentBlockHash'
if cap(e.ParentBlockHash) == 0 {
e.ParentBlockHash = make([]byte, 0, len(buf[0:32]))
@@ -143,40 +129,18 @@ func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
// Field (9) 'ExecutionPayment'
e.ExecutionPayment = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[180:188]))
// Offset (10) 'BlobKzgCommitments'
if o10 = ssz.ReadOffset(buf[188:192]); o10 > size {
return ssz.ErrOffset
// Field (10) 'BlobKzgCommitmentsRoot'
if cap(e.BlobKzgCommitmentsRoot) == 0 {
e.BlobKzgCommitmentsRoot = make([]byte, 0, len(buf[188:220]))
}
e.BlobKzgCommitmentsRoot = append(e.BlobKzgCommitmentsRoot, buf[188:220]...)
if o10 != 192 {
return ssz.ErrInvalidVariableOffset
}
// Field (10) 'BlobKzgCommitments'
{
buf = tail[o10:]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
}
e.BlobKzgCommitments = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(e.BlobKzgCommitments[ii]) == 0 {
e.BlobKzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
}
e.BlobKzgCommitments[ii] = append(e.BlobKzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadBid object
func (e *ExecutionPayloadBid) SizeSSZ() (size int) {
size = 192
// Field (10) 'BlobKzgCommitments'
size += len(e.BlobKzgCommitments) * 48
size = 220
return
}
@@ -239,24 +203,12 @@ func (e *ExecutionPayloadBid) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (9) 'ExecutionPayment'
hh.PutUint64(uint64(e.ExecutionPayment))
// Field (10) 'BlobKzgCommitments'
{
if size := len(e.BlobKzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range e.BlobKzgCommitments {
if len(i) != 48 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(e.BlobKzgCommitments))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
// Field (10) 'BlobKzgCommitmentsRoot'
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
return
}
hh.PutBytes(e.BlobKzgCommitmentsRoot)
hh.Merkleize(indx)
return
@@ -270,14 +222,14 @@ func (s *SignedExecutionPayloadBid) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the SignedExecutionPayloadBid object to a target array
func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(100)
// Offset (0) 'Message'
dst = ssz.WriteOffset(dst, offset)
// Field (0) 'Message'
if s.Message == nil {
s.Message = new(ExecutionPayloadBid)
}
offset += s.Message.SizeSSZ()
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
return
}
// Field (1) 'Signature'
if size := len(s.Signature); size != 96 {
@@ -286,11 +238,6 @@ func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err er
}
dst = append(dst, s.Signature...)
// Field (0) 'Message'
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
return
}
return
}
@@ -298,51 +245,30 @@ func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err er
func (s *SignedExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 100 {
if size != 316 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'Message'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 100 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'Signature'
if cap(s.Signature) == 0 {
s.Signature = make([]byte, 0, len(buf[4:100]))
}
s.Signature = append(s.Signature, buf[4:100]...)
// Field (0) 'Message'
{
buf = tail[o0:]
if s.Message == nil {
s.Message = new(ExecutionPayloadBid)
}
if err = s.Message.UnmarshalSSZ(buf); err != nil {
return err
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the SignedExecutionPayloadBid object
func (s *SignedExecutionPayloadBid) SizeSSZ() (size int) {
size = 100
// Field (0) 'Message'
if s.Message == nil {
s.Message = new(ExecutionPayloadBid)
}
size += s.Message.SizeSSZ()
if err = s.Message.UnmarshalSSZ(buf[0:220]); err != nil {
return err
}
// Field (1) 'Signature'
if cap(s.Signature) == 0 {
s.Signature = make([]byte, 0, len(buf[220:316]))
}
s.Signature = append(s.Signature, buf[220:316]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the SignedExecutionPayloadBid object
func (s *SignedExecutionPayloadBid) SizeSSZ() (size int) {
size = 316
return
}
@@ -816,7 +742,7 @@ func (b *BeaconBlockBodyGloas) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the BeaconBlockBodyGloas object to a target array
func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(392)
offset := int(704)
// Field (0) 'RandaoReveal'
if size := len(b.RandaoReveal); size != 96 {
@@ -878,12 +804,13 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
dst = ssz.WriteOffset(dst, offset)
offset += len(b.BlsToExecutionChanges) * 172
// Offset (10) 'SignedExecutionPayloadBid'
dst = ssz.WriteOffset(dst, offset)
// Field (10) 'SignedExecutionPayloadBid'
if b.SignedExecutionPayloadBid == nil {
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
}
offset += b.SignedExecutionPayloadBid.SizeSSZ()
if dst, err = b.SignedExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
return
}
// Offset (11) 'PayloadAttestations'
dst = ssz.WriteOffset(dst, offset)
@@ -969,11 +896,6 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
}
}
// Field (10) 'SignedExecutionPayloadBid'
if dst, err = b.SignedExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
return
}
// Field (11) 'PayloadAttestations'
if size := len(b.PayloadAttestations); size > 4 {
err = ssz.ErrListTooBigFn("--.PayloadAttestations", size, 4)
@@ -992,12 +914,12 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 392 {
if size < 704 {
return ssz.ErrSize
}
tail := buf
var o3, o4, o5, o6, o7, o9, o10, o11 uint64
var o3, o4, o5, o6, o7, o9, o11 uint64
// Field (0) 'RandaoReveal'
if cap(b.RandaoReveal) == 0 {
@@ -1024,7 +946,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o3 != 392 {
if o3 != 704 {
return ssz.ErrInvalidVariableOffset
}
@@ -1061,13 +983,16 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
// Offset (10) 'SignedExecutionPayloadBid'
if o10 = ssz.ReadOffset(buf[384:388]); o10 > size || o9 > o10 {
return ssz.ErrOffset
// Field (10) 'SignedExecutionPayloadBid'
if b.SignedExecutionPayloadBid == nil {
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
}
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf[384:700]); err != nil {
return err
}
// Offset (11) 'PayloadAttestations'
if o11 = ssz.ReadOffset(buf[388:392]); o11 > size || o10 > o11 {
if o11 = ssz.ReadOffset(buf[700:704]); o11 > size || o9 > o11 {
return ssz.ErrOffset
}
@@ -1171,7 +1096,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
// Field (9) 'BlsToExecutionChanges'
{
buf = tail[o9:o10]
buf = tail[o9:o11]
num, err := ssz.DivideInt2(len(buf), 172, 16)
if err != nil {
return err
@@ -1187,17 +1112,6 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
}
}
// Field (10) 'SignedExecutionPayloadBid'
{
buf = tail[o10:o11]
if b.SignedExecutionPayloadBid == nil {
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
}
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf); err != nil {
return err
}
}
// Field (11) 'PayloadAttestations'
{
buf = tail[o11:]
@@ -1220,7 +1134,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockBodyGloas object
func (b *BeaconBlockBodyGloas) SizeSSZ() (size int) {
size = 392
size = 704
// Field (3) 'ProposerSlashings'
size += len(b.ProposerSlashings) * 416
@@ -1246,12 +1160,6 @@ func (b *BeaconBlockBodyGloas) SizeSSZ() (size int) {
// Field (9) 'BlsToExecutionChanges'
size += len(b.BlsToExecutionChanges) * 172
// Field (10) 'SignedExecutionPayloadBid'
if b.SignedExecutionPayloadBid == nil {
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
}
size += b.SignedExecutionPayloadBid.SizeSSZ()
// Field (11) 'PayloadAttestations'
size += len(b.PayloadAttestations) * 202
@@ -1529,7 +1437,7 @@ func (b *BeaconStateGloas) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the BeaconStateGloas object to a target array
func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(2741117)
offset := int(2741333)
// Field (0) 'GenesisTime'
dst = ssz.MarshalUint64(dst, b.GenesisTime)
@@ -1694,12 +1602,13 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
return
}
// Offset (24) 'LatestExecutionPayloadBid'
dst = ssz.WriteOffset(dst, offset)
// Field (24) 'LatestExecutionPayloadBid'
if b.LatestExecutionPayloadBid == nil {
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
}
offset += b.LatestExecutionPayloadBid.SizeSSZ()
if dst, err = b.LatestExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
return
}
// Field (25) 'NextWithdrawalIndex'
dst = ssz.MarshalUint64(dst, b.NextWithdrawalIndex)
@@ -1857,11 +1766,6 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = ssz.MarshalUint64(dst, b.InactivityScores[ii])
}
// Field (24) 'LatestExecutionPayloadBid'
if dst, err = b.LatestExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
return
}
// Field (27) 'HistoricalSummaries'
if size := len(b.HistoricalSummaries); size > 16777216 {
err = ssz.ErrListTooBigFn("--.HistoricalSummaries", size, 16777216)
@@ -1946,12 +1850,12 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 2741117 {
if size < 2741333 {
return ssz.ErrSize
}
tail := buf
var o7, o9, o11, o12, o15, o16, o21, o24, o27, o34, o35, o36, o38, o42, o44 uint64
var o7, o9, o11, o12, o15, o16, o21, o27, o34, o35, o36, o38, o42, o44 uint64
// Field (0) 'GenesisTime'
b.GenesisTime = ssz.UnmarshallUint64(buf[0:8])
@@ -2004,7 +1908,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o7 != 2741117 {
if o7 != 2741333 {
return ssz.ErrInvalidVariableOffset
}
@@ -2110,74 +2014,77 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
return err
}
// Offset (24) 'LatestExecutionPayloadBid'
if o24 = ssz.ReadOffset(buf[2736629:2736633]); o24 > size || o21 > o24 {
return ssz.ErrOffset
// Field (24) 'LatestExecutionPayloadBid'
if b.LatestExecutionPayloadBid == nil {
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
}
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf[2736629:2736849]); err != nil {
return err
}
// Field (25) 'NextWithdrawalIndex'
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736633:2736641])
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736849:2736857])
// Field (26) 'NextWithdrawalValidatorIndex'
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736641:2736649]))
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736857:2736865]))
// Offset (27) 'HistoricalSummaries'
if o27 = ssz.ReadOffset(buf[2736649:2736653]); o27 > size || o24 > o27 {
if o27 = ssz.ReadOffset(buf[2736865:2736869]); o27 > size || o21 > o27 {
return ssz.ErrOffset
}
// Field (28) 'DepositRequestsStartIndex'
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736653:2736661])
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736869:2736877])
// Field (29) 'DepositBalanceToConsume'
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736661:2736669]))
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736877:2736885]))
// Field (30) 'ExitBalanceToConsume'
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736669:2736677]))
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736885:2736893]))
// Field (31) 'EarliestExitEpoch'
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736677:2736685]))
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736893:2736901]))
// Field (32) 'ConsolidationBalanceToConsume'
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736685:2736693]))
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736901:2736909]))
// Field (33) 'EarliestConsolidationEpoch'
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736693:2736701]))
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736909:2736917]))
// Offset (34) 'PendingDeposits'
if o34 = ssz.ReadOffset(buf[2736701:2736705]); o34 > size || o27 > o34 {
if o34 = ssz.ReadOffset(buf[2736917:2736921]); o34 > size || o27 > o34 {
return ssz.ErrOffset
}
// Offset (35) 'PendingPartialWithdrawals'
if o35 = ssz.ReadOffset(buf[2736705:2736709]); o35 > size || o34 > o35 {
if o35 = ssz.ReadOffset(buf[2736921:2736925]); o35 > size || o34 > o35 {
return ssz.ErrOffset
}
// Offset (36) 'PendingConsolidations'
if o36 = ssz.ReadOffset(buf[2736709:2736713]); o36 > size || o35 > o36 {
if o36 = ssz.ReadOffset(buf[2736925:2736929]); o36 > size || o35 > o36 {
return ssz.ErrOffset
}
// Field (37) 'ProposerLookahead'
b.ProposerLookahead = ssz.ExtendUint64(b.ProposerLookahead, 64)
for ii := 0; ii < 64; ii++ {
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736713:2737225][ii*8 : (ii+1)*8])
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736929:2737441][ii*8 : (ii+1)*8])
}
// Offset (38) 'Builders'
if o38 = ssz.ReadOffset(buf[2737225:2737229]); o38 > size || o36 > o38 {
if o38 = ssz.ReadOffset(buf[2737441:2737445]); o38 > size || o36 > o38 {
return ssz.ErrOffset
}
// Field (39) 'NextWithdrawalBuilderIndex'
b.NextWithdrawalBuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[2737229:2737237]))
b.NextWithdrawalBuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[2737445:2737453]))
// Field (40) 'ExecutionPayloadAvailability'
if cap(b.ExecutionPayloadAvailability) == 0 {
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737237:2738261]))
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737453:2738477]))
}
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737237:2738261]...)
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737453:2738477]...)
// Field (41) 'BuilderPendingPayments'
b.BuilderPendingPayments = make([]*BuilderPendingPayment, 64)
@@ -2185,24 +2092,24 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
if b.BuilderPendingPayments[ii] == nil {
b.BuilderPendingPayments[ii] = new(BuilderPendingPayment)
}
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738261:2741077][ii*44 : (ii+1)*44]); err != nil {
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738477:2741293][ii*44 : (ii+1)*44]); err != nil {
return err
}
}
// Offset (42) 'BuilderPendingWithdrawals'
if o42 = ssz.ReadOffset(buf[2741077:2741081]); o42 > size || o38 > o42 {
if o42 = ssz.ReadOffset(buf[2741293:2741297]); o42 > size || o38 > o42 {
return ssz.ErrOffset
}
// Field (43) 'LatestBlockHash'
if cap(b.LatestBlockHash) == 0 {
b.LatestBlockHash = make([]byte, 0, len(buf[2741081:2741113]))
b.LatestBlockHash = make([]byte, 0, len(buf[2741297:2741329]))
}
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741081:2741113]...)
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741297:2741329]...)
// Offset (44) 'PayloadExpectedWithdrawals'
if o44 = ssz.ReadOffset(buf[2741113:2741117]); o44 > size || o42 > o44 {
if o44 = ssz.ReadOffset(buf[2741329:2741333]); o44 > size || o42 > o44 {
return ssz.ErrOffset
}
@@ -2297,7 +2204,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
// Field (21) 'InactivityScores'
{
buf = tail[o21:o24]
buf = tail[o21:o27]
num, err := ssz.DivideInt2(len(buf), 8, 1099511627776)
if err != nil {
return err
@@ -2308,17 +2215,6 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
}
}
// Field (24) 'LatestExecutionPayloadBid'
{
buf = tail[o24:o27]
if b.LatestExecutionPayloadBid == nil {
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
}
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf); err != nil {
return err
}
}
// Field (27) 'HistoricalSummaries'
{
buf = tail[o27:o34]
@@ -2449,7 +2345,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
// SizeSSZ returns the ssz encoded size in bytes for the BeaconStateGloas object
func (b *BeaconStateGloas) SizeSSZ() (size int) {
size = 2741117
size = 2741333
// Field (7) 'HistoricalRoots'
size += len(b.HistoricalRoots) * 32
@@ -2472,12 +2368,6 @@ func (b *BeaconStateGloas) SizeSSZ() (size int) {
// Field (21) 'InactivityScores'
size += len(b.InactivityScores) * 8
// Field (24) 'LatestExecutionPayloadBid'
if b.LatestExecutionPayloadBid == nil {
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
}
size += b.LatestExecutionPayloadBid.SizeSSZ()
// Field (27) 'HistoricalSummaries'
size += len(b.HistoricalSummaries) * 64
@@ -3091,7 +2981,7 @@ func (d *DataColumnSidecarGloas) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the DataColumnSidecarGloas object to a target array
func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(56)
offset := int(60)
// Field (0) 'Index'
dst = ssz.MarshalUint64(dst, d.Index)
@@ -3100,14 +2990,18 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
dst = ssz.WriteOffset(dst, offset)
offset += len(d.Column) * 2048
// Offset (2) 'KzgProofs'
// Offset (2) 'KzgCommitments'
dst = ssz.WriteOffset(dst, offset)
offset += len(d.KzgCommitments) * 48
// Offset (3) 'KzgProofs'
dst = ssz.WriteOffset(dst, offset)
offset += len(d.KzgProofs) * 48
// Field (3) 'Slot'
// Field (4) 'Slot'
dst = ssz.MarshalUint64(dst, uint64(d.Slot))
// Field (4) 'BeaconBlockRoot'
// Field (5) 'BeaconBlockRoot'
if size := len(d.BeaconBlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BeaconBlockRoot", size, 32)
return
@@ -3127,7 +3021,20 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
dst = append(dst, d.Column[ii]...)
}
// Field (2) 'KzgProofs'
// Field (2) 'KzgCommitments'
if size := len(d.KzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
return
}
for ii := 0; ii < len(d.KzgCommitments); ii++ {
if size := len(d.KzgCommitments[ii]); size != 48 {
err = ssz.ErrBytesLengthFn("--.KzgCommitments[ii]", size, 48)
return
}
dst = append(dst, d.KzgCommitments[ii]...)
}
// Field (3) 'KzgProofs'
if size := len(d.KzgProofs); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 4096)
return
@@ -3147,12 +3054,12 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 56 {
if size < 60 {
return ssz.ErrSize
}
tail := buf
var o1, o2 uint64
var o1, o2, o3 uint64
// Field (0) 'Index'
d.Index = ssz.UnmarshallUint64(buf[0:8])
@@ -3162,23 +3069,28 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o1 != 56 {
if o1 != 60 {
return ssz.ErrInvalidVariableOffset
}
// Offset (2) 'KzgProofs'
// Offset (2) 'KzgCommitments'
if o2 = ssz.ReadOffset(buf[12:16]); o2 > size || o1 > o2 {
return ssz.ErrOffset
}
// Field (3) 'Slot'
d.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[16:24]))
// Field (4) 'BeaconBlockRoot'
if cap(d.BeaconBlockRoot) == 0 {
d.BeaconBlockRoot = make([]byte, 0, len(buf[24:56]))
// Offset (3) 'KzgProofs'
if o3 = ssz.ReadOffset(buf[16:20]); o3 > size || o2 > o3 {
return ssz.ErrOffset
}
d.BeaconBlockRoot = append(d.BeaconBlockRoot, buf[24:56]...)
// Field (4) 'Slot'
d.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[20:28]))
// Field (5) 'BeaconBlockRoot'
if cap(d.BeaconBlockRoot) == 0 {
d.BeaconBlockRoot = make([]byte, 0, len(buf[28:60]))
}
d.BeaconBlockRoot = append(d.BeaconBlockRoot, buf[28:60]...)
// Field (1) 'Column'
{
@@ -3196,9 +3108,25 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
}
}
// Field (2) 'KzgProofs'
// Field (2) 'KzgCommitments'
{
buf = tail[o2:]
buf = tail[o2:o3]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
}
d.KzgCommitments = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(d.KzgCommitments[ii]) == 0 {
d.KzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
}
d.KzgCommitments[ii] = append(d.KzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
}
}
// Field (3) 'KzgProofs'
{
buf = tail[o3:]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
@@ -3216,12 +3144,15 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
// SizeSSZ returns the ssz encoded size in bytes for the DataColumnSidecarGloas object
func (d *DataColumnSidecarGloas) SizeSSZ() (size int) {
size = 56
size = 60
// Field (1) 'Column'
size += len(d.Column) * 2048
// Field (2) 'KzgProofs'
// Field (2) 'KzgCommitments'
size += len(d.KzgCommitments) * 48
// Field (3) 'KzgProofs'
size += len(d.KzgProofs) * 48
return
@@ -3258,7 +3189,26 @@ func (d *DataColumnSidecarGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (2) 'KzgProofs'
// Field (2) 'KzgCommitments'
{
if size := len(d.KzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range d.KzgCommitments {
if len(i) != 48 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(d.KzgCommitments))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (3) 'KzgProofs'
{
if size := len(d.KzgProofs); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 4096)
@@ -3277,10 +3227,10 @@ func (d *DataColumnSidecarGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (3) 'Slot'
// Field (4) 'Slot'
hh.PutUint64(uint64(d.Slot))
// Field (4) 'BeaconBlockRoot'
// Field (5) 'BeaconBlockRoot'
if size := len(d.BeaconBlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BeaconBlockRoot", size, 32)
return
@@ -3299,7 +3249,7 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the ExecutionPayloadEnvelope object to a target array
func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(88)
offset := int(92)
// Offset (0) 'Payload'
dst = ssz.WriteOffset(dst, offset)
@@ -3328,7 +3278,11 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
// Field (4) 'Slot'
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
// Field (5) 'StateRoot'
// Offset (5) 'BlobKzgCommitments'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.BlobKzgCommitments) * 48
// Field (6) 'StateRoot'
if size := len(e.StateRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.StateRoot", size, 32)
return
@@ -3345,6 +3299,19 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
return
}
// Field (5) 'BlobKzgCommitments'
if size := len(e.BlobKzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
return
}
for ii := 0; ii < len(e.BlobKzgCommitments); ii++ {
if size := len(e.BlobKzgCommitments[ii]); size != 48 {
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitments[ii]", size, 48)
return
}
dst = append(dst, e.BlobKzgCommitments[ii]...)
}
return
}
@@ -3352,19 +3319,19 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 88 {
if size < 92 {
return ssz.ErrSize
}
tail := buf
var o0, o1 uint64
var o0, o1, o5 uint64
// Offset (0) 'Payload'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 88 {
if o0 != 92 {
return ssz.ErrInvalidVariableOffset
}
@@ -3385,11 +3352,16 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
// Field (4) 'Slot'
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[48:56]))
// Field (5) 'StateRoot'
if cap(e.StateRoot) == 0 {
e.StateRoot = make([]byte, 0, len(buf[56:88]))
// Offset (5) 'BlobKzgCommitments'
if o5 = ssz.ReadOffset(buf[56:60]); o5 > size || o1 > o5 {
return ssz.ErrOffset
}
e.StateRoot = append(e.StateRoot, buf[56:88]...)
// Field (6) 'StateRoot'
if cap(e.StateRoot) == 0 {
e.StateRoot = make([]byte, 0, len(buf[60:92]))
}
e.StateRoot = append(e.StateRoot, buf[60:92]...)
// Field (0) 'Payload'
{
@@ -3404,7 +3376,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
// Field (1) 'ExecutionRequests'
{
buf = tail[o1:]
buf = tail[o1:o5]
if e.ExecutionRequests == nil {
e.ExecutionRequests = new(v1.ExecutionRequests)
}
@@ -3412,12 +3384,28 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
return err
}
}
// Field (5) 'BlobKzgCommitments'
{
buf = tail[o5:]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
}
e.BlobKzgCommitments = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(e.BlobKzgCommitments[ii]) == 0 {
e.BlobKzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
}
e.BlobKzgCommitments[ii] = append(e.BlobKzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadEnvelope object
func (e *ExecutionPayloadEnvelope) SizeSSZ() (size int) {
size = 88
size = 92
// Field (0) 'Payload'
if e.Payload == nil {
@@ -3431,6 +3419,9 @@ func (e *ExecutionPayloadEnvelope) SizeSSZ() (size int) {
}
size += e.ExecutionRequests.SizeSSZ()
// Field (5) 'BlobKzgCommitments'
size += len(e.BlobKzgCommitments) * 48
return
}
@@ -3466,7 +3457,26 @@ func (e *ExecutionPayloadEnvelope) HashTreeRootWith(hh *ssz.Hasher) (err error)
// Field (4) 'Slot'
hh.PutUint64(uint64(e.Slot))
// Field (5) 'StateRoot'
// Field (5) 'BlobKzgCommitments'
{
if size := len(e.BlobKzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range e.BlobKzgCommitments {
if len(i) != 48 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(e.BlobKzgCommitments))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (6) 'StateRoot'
if size := len(e.StateRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.StateRoot", size, 32)
return

View File

@@ -23,17 +23,17 @@ func TestExecutionPayloadBid_Copy(t *testing.T) {
{
name: "fully populated bid",
bid: &ExecutionPayloadBid{
ParentBlockHash: []byte("parent_block_hash_32_bytes_long!"),
ParentBlockRoot: []byte("parent_block_root_32_bytes_long!"),
BlockHash: []byte("block_hash_32_bytes_are_long!!"),
PrevRandao: []byte("prev_randao_32_bytes_long!!!"),
FeeRecipient: []byte("fee_recipient_20_byt"),
GasLimit: 15000000,
BuilderIndex: primitives.BuilderIndex(42),
Slot: primitives.Slot(12345),
Value: 1000000000000000000,
ExecutionPayment: 5645654,
BlobKzgCommitments: [][]byte{[]byte("blob_kzg_commitments_48_bytes_longer_than_needed")},
ParentBlockHash: []byte("parent_block_hash_32_bytes_long!"),
ParentBlockRoot: []byte("parent_block_root_32_bytes_long!"),
BlockHash: []byte("block_hash_32_bytes_are_long!!"),
PrevRandao: []byte("prev_randao_32_bytes_long!!!"),
FeeRecipient: []byte("fee_recipient_20_byt"),
GasLimit: 15000000,
BuilderIndex: primitives.BuilderIndex(42),
Slot: primitives.Slot(12345),
ExecutionPayment: 5645654,
Value: 1000000000000000000,
BlobKzgCommitmentsRoot: []byte("blob_kzg_commitments_32_bytes!!"),
},
},
}

View File

@@ -334,7 +334,7 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys
_, err = fmt.Fprintf(b, "%s %s%s "+messageFormat, colorScheme.TimestampColor(timestamp), level, prefix, message)
}
for _, k := range keys {
if k != "package" && k != "log_target" {
if k != "package" {
v := entry.Data[k]
format := "%+v"

View File

@@ -512,8 +512,8 @@
- name: MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
sources: []
spec: |
<spec config_var="MIN_BUILDER_WITHDRAWABILITY_DELAY" fork="gloas" hash="be7f8473">
MIN_BUILDER_WITHDRAWABILITY_DELAY: uint64 = 64
<spec config_var="MIN_BUILDER_WITHDRAWABILITY_DELAY" fork="gloas" hash="d378428f">
MIN_BUILDER_WITHDRAWABILITY_DELAY: uint64 = 4096
</spec>
- name: MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS#deneb

View File

@@ -738,12 +738,11 @@
- name: DataColumnSidecar#gloas
sources: []
spec: |
<spec ssz_object="DataColumnSidecar" fork="gloas" hash="332c7cfc">
<spec ssz_object="DataColumnSidecar" fork="gloas" hash="8028928b">
class DataColumnSidecar(Container):
index: ColumnIndex
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
# [Modified in Gloas:EIP7732]
# Removed `kzg_commitments`
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
# [Modified in Gloas:EIP7732]
# Removed `signed_block_header`
@@ -917,7 +916,7 @@
- name: ExecutionPayloadBid#gloas
sources: []
spec: |
<spec ssz_object="ExecutionPayloadBid" fork="gloas" hash="1a7b9dea">
<spec ssz_object="ExecutionPayloadBid" fork="gloas" hash="aa71ba16">
class ExecutionPayloadBid(Container):
parent_block_hash: Hash32
parent_block_root: Root
@@ -929,19 +928,20 @@
slot: Slot
value: Gwei
execution_payment: Gwei
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
blob_kzg_commitments_root: Root
</spec>
- name: ExecutionPayloadEnvelope#gloas
sources: []
spec: |
<spec ssz_object="ExecutionPayloadEnvelope" fork="gloas" hash="ec5c0233">
<spec ssz_object="ExecutionPayloadEnvelope" fork="gloas" hash="cd522f7f">
class ExecutionPayloadEnvelope(Container):
payload: ExecutionPayload
execution_requests: ExecutionRequests
builder_index: BuilderIndex
beacon_block_root: Root
slot: Slot
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
state_root: Root
</spec>

View File

@@ -1,26 +1,13 @@
- name: add_builder_to_registry#gloas
sources: []
spec: |
<spec fn="add_builder_to_registry" fork="gloas" hash="cd0414c9">
<spec fn="add_builder_to_registry" fork="gloas" hash="938224ec">
def add_builder_to_registry(
state: BeaconState,
pubkey: BLSPubkey,
withdrawal_credentials: Bytes32,
amount: uint64,
slot: Slot,
state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64
) -> None:
set_or_append_list(
state.builders,
get_index_for_new_builder(state),
Builder(
pubkey=pubkey,
version=uint8(withdrawal_credentials[0]),
execution_address=ExecutionAddress(withdrawal_credentials[12:]),
balance=amount,
deposit_epoch=compute_epoch_at_slot(slot),
withdrawable_epoch=FAR_FUTURE_EPOCH,
),
)
index = get_index_for_new_builder(state)
builder = get_builder_from_deposit(state, pubkey, withdrawal_credentials, amount)
set_or_append_list(state.builders, index, builder)
</spec>
- name: add_flag#altair
@@ -158,20 +145,19 @@
- name: apply_deposit_for_builder#gloas
sources: []
spec: |
<spec fn="apply_deposit_for_builder" fork="gloas" hash="e4bc98c7">
<spec fn="apply_deposit_for_builder" fork="gloas" hash="eae84bc2">
def apply_deposit_for_builder(
state: BeaconState,
pubkey: BLSPubkey,
withdrawal_credentials: Bytes32,
amount: uint64,
signature: BLSSignature,
slot: Slot,
) -> None:
builder_pubkeys = [b.pubkey for b in state.builders]
if pubkey not in builder_pubkeys:
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
add_builder_to_registry(state, pubkey, withdrawal_credentials, amount, slot)
add_builder_to_registry(state, pubkey, withdrawal_credentials, amount)
else:
# Increase balance by deposit amount
builder_index = builder_pubkeys.index(pubkey)
@@ -565,11 +551,9 @@
- file: beacon-chain/core/signing/signing_root.go
search: func ComputeDomain(
spec: |
<spec fn="compute_domain" fork="phase0" hash="a78b32e4">
<spec fn="compute_domain" fork="phase0" hash="948e1334">
def compute_domain(
domain_type: DomainType,
fork_version: Optional[Version] = None,
genesis_validators_root: Optional[Root] = None,
domain_type: DomainType, fork_version: Version = None, genesis_validators_root: Root = None
) -> Domain:
"""
Return the domain for the ``domain_type`` and ``fork_version``.
@@ -2328,6 +2312,23 @@
return bls.Sign(privkey, signing_root)
</spec>
- name: get_builder_from_deposit#gloas
sources: []
spec: |
<spec fn="get_builder_from_deposit" fork="gloas" hash="7f914af6">
def get_builder_from_deposit(
state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64
) -> Builder:
return Builder(
pubkey=pubkey,
version=uint8(withdrawal_credentials[0]),
execution_address=ExecutionAddress(withdrawal_credentials[12:]),
balance=amount,
deposit_epoch=get_current_epoch(state),
withdrawable_epoch=FAR_FUTURE_EPOCH,
)
</spec>
- name: get_builder_payment_quorum_threshold#gloas
sources: []
spec: |
@@ -2344,20 +2345,19 @@
- name: get_builder_withdrawals#gloas
sources: []
spec: |
<spec fn="get_builder_withdrawals" fork="gloas" hash="d54dd146">
<spec fn="get_builder_withdrawals" fork="gloas" hash="35cd32cd">
def get_builder_withdrawals(
state: BeaconState,
withdrawal_index: WithdrawalIndex,
prior_withdrawals: Sequence[Withdrawal],
) -> Tuple[Sequence[Withdrawal], WithdrawalIndex, uint64]:
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
assert len(prior_withdrawals) <= withdrawals_limit
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD
processed_count: uint64 = 0
withdrawals: List[Withdrawal] = []
for withdrawal in state.builder_pending_withdrawals:
all_withdrawals = prior_withdrawals + withdrawals
has_reached_limit = len(all_withdrawals) >= withdrawals_limit
has_reached_limit = len(all_withdrawals) == withdrawals_limit
if has_reached_limit:
break
@@ -2379,7 +2379,7 @@
- name: get_builders_sweep_withdrawals#gloas
sources: []
spec: |
<spec fn="get_builders_sweep_withdrawals" fork="gloas" hash="04c1cb10">
<spec fn="get_builders_sweep_withdrawals" fork="gloas" hash="028d161d">
def get_builders_sweep_withdrawals(
state: BeaconState,
withdrawal_index: WithdrawalIndex,
@@ -2387,15 +2387,14 @@
) -> Tuple[Sequence[Withdrawal], WithdrawalIndex, uint64]:
epoch = get_current_epoch(state)
builders_limit = min(len(state.builders), MAX_BUILDERS_PER_WITHDRAWALS_SWEEP)
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
assert len(prior_withdrawals) <= withdrawals_limit
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD
processed_count: uint64 = 0
withdrawals: List[Withdrawal] = []
builder_index = state.next_withdrawal_builder_index
for _ in range(builders_limit):
all_withdrawals = prior_withdrawals + withdrawals
has_reached_limit = len(all_withdrawals) >= withdrawals_limit
has_reached_limit = len(all_withdrawals) == withdrawals_limit
if has_reached_limit:
break
@@ -2675,7 +2674,7 @@
- name: get_data_column_sidecars#gloas
sources: []
spec: |
<spec fn="get_data_column_sidecars" fork="gloas" hash="abaf4385">
<spec fn="get_data_column_sidecars" fork="gloas" hash="c8d64ac9">
def get_data_column_sidecars(
# [Modified in Gloas:EIP7732]
# Removed `signed_block_header`
@@ -2683,8 +2682,7 @@
beacon_block_root: Root,
# [New in Gloas:EIP7732]
slot: Slot,
# [Modified in Gloas:EIP7732]
# Removed `kzg_commitments`
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
# [Modified in Gloas:EIP7732]
# Removed `kzg_commitments_inclusion_proof`
cells_and_kzg_proofs: Sequence[
@@ -2692,10 +2690,11 @@
],
) -> Sequence[DataColumnSidecar]:
"""
Given a beacon block root and the cells/proofs associated with each blob
in the corresponding payload, assemble the sidecars which can be
distributed to peers.
Given a beacon block root and the commitments, cells/proofs associated with
each blob in the block, assemble the sidecars which can be distributed to peers.
"""
assert len(cells_and_kzg_proofs) == len(kzg_commitments)
sidecars = []
for column_index in range(NUMBER_OF_COLUMNS):
column_cells, column_proofs = [], []
@@ -2703,10 +2702,10 @@
column_cells.append(cells[column_index])
column_proofs.append(proofs[column_index])
sidecars.append(
# [Modified in Gloas:EIP7732]
DataColumnSidecar(
index=column_index,
column=column_cells,
kzg_commitments=kzg_commitments,
kzg_proofs=column_proofs,
slot=slot,
beacon_block_root=beacon_block_root,
@@ -2746,9 +2745,11 @@
- name: get_data_column_sidecars_from_block#gloas
sources: []
spec: |
<spec fn="get_data_column_sidecars_from_block" fork="gloas" hash="302616d2">
<spec fn="get_data_column_sidecars_from_block" fork="gloas" hash="8ac19a18">
def get_data_column_sidecars_from_block(
signed_block: SignedBeaconBlock,
# [New in Gloas:EIP7732]
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
cells_and_kzg_proofs: Sequence[
Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]
],
@@ -2761,6 +2762,7 @@
return get_data_column_sidecars(
beacon_block_root,
signed_block.message.slot,
blob_kzg_commitments,
cells_and_kzg_proofs,
)
</spec>
@@ -2768,7 +2770,7 @@
- name: get_data_column_sidecars_from_column_sidecar#fulu
sources: []
spec: |
<spec fn="get_data_column_sidecars_from_column_sidecar" fork="fulu" hash="4877148a">
<spec fn="get_data_column_sidecars_from_column_sidecar" fork="fulu" hash="4304cdec">
def get_data_column_sidecars_from_column_sidecar(
sidecar: DataColumnSidecar,
cells_and_kzg_proofs: Sequence[
@@ -2776,7 +2778,7 @@
],
) -> Sequence[DataColumnSidecar]:
"""
Given a data column sidecar and the cells/proofs associated with each blob corresponding
Given a DataColumnSidecar and the cells/proofs associated with each blob corresponding
to the commitments it contains, assemble all sidecars for distribution to peers.
"""
assert len(cells_and_kzg_proofs) == len(sidecar.kzg_commitments)
@@ -2792,7 +2794,7 @@
- name: get_data_column_sidecars_from_column_sidecar#gloas
sources: []
spec: |
<spec fn="get_data_column_sidecars_from_column_sidecar" fork="gloas" hash="beb1f94f">
<spec fn="get_data_column_sidecars_from_column_sidecar" fork="gloas" hash="a1052a1c">
def get_data_column_sidecars_from_column_sidecar(
sidecar: DataColumnSidecar,
cells_and_kzg_proofs: Sequence[
@@ -2800,14 +2802,15 @@
],
) -> Sequence[DataColumnSidecar]:
"""
Given a data column sidecar and the cells/proofs associated with each blob
in the corresponding payload, assemble the sidecars which can be
distributed to peers.
Given a DataColumnSidecar and the cells/proofs associated with each blob corresponding
to the commitments it contains, assemble all sidecars for distribution to peers.
"""
# [Modified in Gloas:EIP7732]
assert len(cells_and_kzg_proofs) == len(sidecar.kzg_commitments)
return get_data_column_sidecars(
sidecar.beacon_block_root,
sidecar.slot,
sidecar.kzg_commitments,
cells_and_kzg_proofs,
)
</spec>
@@ -2817,10 +2820,8 @@
- file: beacon-chain/core/signing/domain.go
search: func Domain(
spec: |
<spec fn="get_domain" fork="phase0" hash="e60c5fbc">
def get_domain(
state: BeaconState, domain_type: DomainType, epoch: Optional[Epoch] = None
) -> Domain:
<spec fn="get_domain" fork="phase0" hash="99ea23f6">
def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch = None) -> Domain:
"""
Return the signature domain (fork version concatenated with domain type) of a message.
"""
@@ -3887,7 +3888,7 @@
- name: get_pending_partial_withdrawals#electra
sources: []
spec: |
<spec fn="get_pending_partial_withdrawals" fork="electra" hash="306047e9">
<spec fn="get_pending_partial_withdrawals" fork="electra" hash="b53b25d7">
def get_pending_partial_withdrawals(
state: BeaconState,
withdrawal_index: WithdrawalIndex,
@@ -3898,14 +3899,13 @@
len(prior_withdrawals) + MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP,
MAX_WITHDRAWALS_PER_PAYLOAD - 1,
)
assert len(prior_withdrawals) <= withdrawals_limit
processed_count: uint64 = 0
withdrawals: List[Withdrawal] = []
for withdrawal in state.pending_partial_withdrawals:
all_withdrawals = prior_withdrawals + withdrawals
is_withdrawable = withdrawal.withdrawable_epoch <= epoch
has_reached_limit = len(all_withdrawals) >= withdrawals_limit
has_reached_limit = len(all_withdrawals) == withdrawals_limit
if not is_withdrawable or has_reached_limit:
break
@@ -4091,13 +4091,13 @@
- name: get_ptc_assignment#gloas
sources: []
spec: |
<spec fn="get_ptc_assignment" fork="gloas" hash="7fd50097">
<spec fn="get_ptc_assignment" fork="gloas" hash="817acb90">
def get_ptc_assignment(
state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex
) -> Optional[Slot]:
"""
Returns the slot during the requested epoch in which the validator with
index ``validator_index`` is a member of the PTC. Returns None if no
index `validator_index` is a member of the PTC. Returns None if no
assignment is found.
"""
next_epoch = Epoch(get_current_epoch(state) + 1)
@@ -4509,7 +4509,7 @@
- name: get_validators_sweep_withdrawals#capella
sources: []
spec: |
<spec fn="get_validators_sweep_withdrawals" fork="capella" hash="59563c2a">
<spec fn="get_validators_sweep_withdrawals" fork="capella" hash="81868c81">
def get_validators_sweep_withdrawals(
state: BeaconState,
withdrawal_index: WithdrawalIndex,
@@ -4518,15 +4518,13 @@
epoch = get_current_epoch(state)
validators_limit = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD
# There must be at least one space reserved for validator sweep withdrawals
assert len(prior_withdrawals) < withdrawals_limit
processed_count: uint64 = 0
withdrawals: List[Withdrawal] = []
validator_index = state.next_withdrawal_validator_index
for _ in range(validators_limit):
all_withdrawals = prior_withdrawals + withdrawals
has_reached_limit = len(all_withdrawals) >= withdrawals_limit
has_reached_limit = len(all_withdrawals) == withdrawals_limit
if has_reached_limit:
break
@@ -4562,7 +4560,7 @@
- name: get_validators_sweep_withdrawals#electra
sources: []
spec: |
<spec fn="get_validators_sweep_withdrawals" fork="electra" hash="034093ad">
<spec fn="get_validators_sweep_withdrawals" fork="electra" hash="74bbd437">
def get_validators_sweep_withdrawals(
state: BeaconState,
withdrawal_index: WithdrawalIndex,
@@ -4571,15 +4569,13 @@
epoch = get_current_epoch(state)
validators_limit = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD
# There must be at least one space reserved for validator sweep withdrawals
assert len(prior_withdrawals) < withdrawals_limit
processed_count: uint64 = 0
withdrawals: List[Withdrawal] = []
validator_index = state.next_withdrawal_validator_index
for _ in range(validators_limit):
all_withdrawals = prior_withdrawals + withdrawals
has_reached_limit = len(all_withdrawals) >= withdrawals_limit
has_reached_limit = len(all_withdrawals) == withdrawals_limit
if has_reached_limit:
break
@@ -5735,24 +5731,24 @@
- name: is_valid_indexed_payload_attestation#gloas
sources: []
spec: |
<spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="d76e0f89">
<spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="cf1e65b5">
def is_valid_indexed_payload_attestation(
state: BeaconState, attestation: IndexedPayloadAttestation
state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation
) -> bool:
"""
Check if ``attestation`` is non-empty, has sorted indices, and has
Check if ``indexed_payload_attestation`` is non-empty, has sorted indices, and has
a valid aggregate signature.
"""
# Verify indices are non-empty and sorted
indices = attestation.attesting_indices
indices = indexed_payload_attestation.attesting_indices
if len(indices) == 0 or not indices == sorted(indices):
return False
# Verify aggregate signature
pubkeys = [state.validators[i].pubkey for i in indices]
domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
signing_root = compute_signing_root(attestation.data, domain)
return bls.FastAggregateVerify(pubkeys, signing_root, attestation.signature)
domain = get_domain(state, DOMAIN_PTC_ATTESTER, None)
signing_root = compute_signing_root(indexed_payload_attestation.data, domain)
return bls.FastAggregateVerify(pubkeys, signing_root, indexed_payload_attestation.signature)
</spec>
- name: is_valid_light_client_header#altair
@@ -6578,15 +6574,13 @@
- name: prepare_execution_payload#capella
sources: []
spec: |
<spec fn="prepare_execution_payload" fork="capella" hash="bdb15c3f">
<spec fn="prepare_execution_payload" fork="capella" hash="998e8b92">
def prepare_execution_payload(
state: BeaconState,
safe_block_hash: Hash32,
finalized_block_hash: Hash32,
suggested_fee_recipient: ExecutionAddress,
execution_engine: ExecutionEngine,
# [Modified in Capella]
# Removed `pow_chain`
) -> Optional[PayloadId]:
# [Modified in Capella]
# Removed `is_merge_transition_complete` check
@@ -7317,7 +7311,7 @@
- name: process_deposit_request#gloas
sources: []
spec: |
<spec fn="process_deposit_request" fork="gloas" hash="3c6b0310">
<spec fn="process_deposit_request" fork="gloas" hash="50ffbd27">
def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
# [New in Gloas:EIP7732]
builder_pubkeys = [b.pubkey for b in state.builders]
@@ -7337,7 +7331,6 @@
deposit_request.withdrawal_credentials,
deposit_request.amount,
deposit_request.signature,
state.slot,
)
return
@@ -7848,7 +7841,7 @@
- name: process_execution_payload#gloas
sources: []
spec: |
<spec fn="process_execution_payload" fork="gloas" hash="36bd3af3">
<spec fn="process_execution_payload" fork="gloas" hash="98cceb7d">
def process_execution_payload(
state: BeaconState,
# [Modified in Gloas:EIP7732]
@@ -7878,6 +7871,7 @@
# Verify consistency with the committed bid
committed_bid = state.latest_execution_payload_bid
assert envelope.builder_index == committed_bid.builder_index
assert committed_bid.blob_kzg_commitments_root == hash_tree_root(envelope.blob_kzg_commitments)
assert committed_bid.prev_randao == payload.prev_randao
# Verify consistency with expected withdrawals
@@ -7891,11 +7885,14 @@
assert payload.parent_hash == state.latest_block_hash
# Verify timestamp
assert payload.timestamp == compute_time_at_slot(state, state.slot)
# Verify commitments are under limit
assert (
len(envelope.blob_kzg_commitments)
<= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
)
# Verify the execution payload is valid
versioned_hashes = [
kzg_commitment_to_versioned_hash(commitment)
# [Modified in Gloas:EIP7732]
for commitment in committed_bid.blob_kzg_commitments
kzg_commitment_to_versioned_hash(commitment) for commitment in envelope.blob_kzg_commitments
]
requests = envelope.execution_requests
assert execution_engine.verify_and_notify_new_payload(
@@ -7936,7 +7933,7 @@
- name: process_execution_payload_bid#gloas
sources: []
spec: |
<spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
<spec fn="process_execution_payload_bid" fork="gloas" hash="6dc696bb">
def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
signed_bid = block.body.signed_execution_payload_bid
bid = signed_bid.message
@@ -7955,12 +7952,6 @@
# Verify that the bid signature is valid
assert verify_execution_payload_bid_signature(state, signed_bid)
# Verify commitments are under limit
assert (
len(bid.blob_kzg_commitments)
<= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
)
# Verify that the bid is for the current slot
assert bid.slot == block.slot
# Verify that the bid is for the right parent block
@@ -9535,11 +9526,9 @@
- file: beacon-chain/core/validators/validator.go
search: func SlashValidator(
spec: |
<spec fn="slash_validator" fork="phase0" hash="d2b5fafa">
<spec fn="slash_validator" fork="phase0" hash="85d8d7c9">
def slash_validator(
state: BeaconState,
slashed_index: ValidatorIndex,
whistleblower_index: Optional[ValidatorIndex] = None,
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
) -> None:
"""
Slash the validator with index ``slashed_index``.
@@ -9571,11 +9560,9 @@
- file: beacon-chain/core/validators/validator.go
search: func SlashValidator(
spec: |
<spec fn="slash_validator" fork="altair" hash="179ea102">
<spec fn="slash_validator" fork="altair" hash="88f6c284">
def slash_validator(
state: BeaconState,
slashed_index: ValidatorIndex,
whistleblower_index: Optional[ValidatorIndex] = None,
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
) -> None:
"""
Slash the validator with index ``slashed_index``.
@@ -9607,11 +9594,9 @@
- file: beacon-chain/core/validators/validator.go
search: func SlashValidator(
spec: |
<spec fn="slash_validator" fork="bellatrix" hash="5964268e">
<spec fn="slash_validator" fork="bellatrix" hash="124f6889">
def slash_validator(
state: BeaconState,
slashed_index: ValidatorIndex,
whistleblower_index: Optional[ValidatorIndex] = None,
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
) -> None:
"""
Slash the validator with index ``slashed_index``.
@@ -9643,11 +9628,9 @@
- file: beacon-chain/core/validators/validator.go
search: func SlashValidator(
spec: |
<spec fn="slash_validator" fork="electra" hash="07e584e2">
<spec fn="slash_validator" fork="electra" hash="54b64d21">
def slash_validator(
state: BeaconState,
slashed_index: ValidatorIndex,
whistleblower_index: Optional[ValidatorIndex] = None,
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
) -> None:
"""
Slash the validator with index ``slashed_index``.
@@ -10650,7 +10633,7 @@
- name: upgrade_to_gloas#gloas
sources: []
spec: |
<spec fn="upgrade_to_gloas" fork="gloas" hash="6e66df25">
<spec fn="upgrade_to_gloas" fork="gloas" hash="855ad3f7">
def upgrade_to_gloas(pre: fulu.BeaconState) -> BeaconState:
epoch = fulu.get_current_epoch(pre)
@@ -10719,9 +10702,6 @@
payload_expected_withdrawals=[],
)
# [New in Gloas:EIP7732]
onboard_builders_from_pending_deposits(post)
return post
</spec>
@@ -10999,12 +10979,8 @@
- name: verify_data_column_sidecar#gloas
sources: []
spec: |
<spec fn="verify_data_column_sidecar" fork="gloas" hash="71548b68">
def verify_data_column_sidecar(
sidecar: DataColumnSidecar,
# [New in Gloas:EIP7732]
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
) -> bool:
<spec fn="verify_data_column_sidecar" fork="gloas" hash="8838c4fd">
def verify_data_column_sidecar(sidecar: DataColumnSidecar) -> bool:
"""
Verify if the data column sidecar is valid.
"""
@@ -11012,14 +10988,18 @@
if sidecar.index >= NUMBER_OF_COLUMNS:
return False
# [Modified in Gloas:EIP7732]
# A sidecar for zero blobs is invalid
if len(sidecar.column) == 0:
if len(sidecar.kzg_commitments) == 0:
return False
# [Modified in Gloas:EIP7732]
# Check that the sidecar respects the blob limit
epoch = compute_epoch_at_slot(sidecar.slot)
if len(sidecar.kzg_commitments) > get_blob_parameters(epoch).max_blobs_per_block:
return False
# The column length must be equal to the number of commitments/proofs
if len(sidecar.column) != len(kzg_commitments) or len(sidecar.column) != len(
if len(sidecar.column) != len(sidecar.kzg_commitments) or len(sidecar.column) != len(
sidecar.kzg_proofs
):
return False

View File

@@ -8,9 +8,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
"github.com/OffchainLabs/prysm/v7/testing/util"
@@ -19,9 +17,6 @@ import (
func runExecutionPayloadBidTest(t *testing.T, config string, fork string, objName string, block blockWithSSZObject, sszToState SSZToState, operationFn BlockOperation) {
require.NoError(t, utils.SetConfig(t, config))
cfg := params.BeaconConfig()
params.SetGenesisFork(t, cfg, version.Fulu)
testFolders, testsFolderPath := utils.TestFolders(t, config, fork, "operations/"+objName+"/pyspec_tests")
if len(testFolders) == 0 {
t.Fatalf("No test folders found for %s/%s/%s", config, fork, "operations/"+objName+"/pyspec_tests")

View File

@@ -1568,8 +1568,8 @@ func HydrateExecutionPayloadBid(b *ethpb.ExecutionPayloadBid) *ethpb.ExecutionPa
if b.FeeRecipient == nil {
b.FeeRecipient = make([]byte, fieldparams.FeeRecipientLength)
}
if b.BlobKzgCommitments == nil {
b.BlobKzgCommitments = make([][]byte, 0)
if b.BlobKzgCommitmentsRoot == nil {
b.BlobKzgCommitmentsRoot = make([]byte, fieldparams.RootLength)
}
return b
}
@@ -1636,22 +1636,21 @@ func GenerateTestSignedExecutionPayloadBid(slot primitives.Slot) *ethpb.SignedEx
blockHash := bytesutil.PadTo([]byte{0x03}, fieldparams.RootLength)
prevRandao := bytesutil.PadTo([]byte{0x04}, fieldparams.RootLength)
feeRecipient := bytesutil.PadTo([]byte{0x05}, fieldparams.FeeRecipientLength)
blobKzgCommitment := bytesutil.PadTo([]byte{0x06}, fieldparams.BLSPubkeyLength)
blobKzgRoot := bytesutil.PadTo([]byte{0x06}, fieldparams.RootLength)
signature := bytesutil.PadTo([]byte{0x07}, fieldparams.BLSSignatureLength)
return &ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
Slot: slot,
BuilderIndex: 1,
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
GasLimit: 30000000,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
Value: 1000000,
ExecutionPayment: 2000000,
BlobKzgCommitments: [][]byte{blobKzgCommitment},
Slot: slot,
BuilderIndex: 1,
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
GasLimit: 30000000,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
Value: 1000000,
BlobKzgCommitmentsRoot: blobKzgRoot,
},
Signature: signature,
}

View File

@@ -413,7 +413,6 @@ func TestGenerateTestSignedExecutionPayloadBid(t *testing.T) {
require.Equal(t, primitives.BuilderIndex(1), bid.Message.BuilderIndex)
require.Equal(t, uint64(30000000), bid.Message.GasLimit)
require.Equal(t, primitives.Gwei(1000000), bid.Message.Value)
require.Equal(t, primitives.Gwei(2000000), bid.Message.ExecutionPayment)
// Verify fields are populated
require.NotNil(t, bid.Message.ParentBlockHash)
@@ -421,8 +420,7 @@ func TestGenerateTestSignedExecutionPayloadBid(t *testing.T) {
require.NotNil(t, bid.Message.BlockHash)
require.NotNil(t, bid.Message.PrevRandao)
require.NotNil(t, bid.Message.FeeRecipient)
require.NotNil(t, bid.Message.BlobKzgCommitments)
require.Equal(t, 1, len(bid.Message.BlobKzgCommitments))
require.NotNil(t, bid.Message.BlobKzgCommitmentsRoot)
// Verify HashTreeRoot works
_, err := bid.HashTreeRoot()

View File

@@ -559,12 +559,12 @@ func NewBeaconStateGloas(options ...func(state *ethpb.BeaconStateGloas) error) (
},
ProposerLookahead: make([]uint64, 64),
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitmentsRoot: make([]byte, 32),
},
Builders: make([]*ethpb.Builder, 0),
ExecutionPayloadAvailability: make([]byte, 1024),

View File

@@ -121,6 +121,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api/client/event:go_default_library",
"//api/grpc:go_default_library",
"//api/server/structs:go_default_library",
"//async/event:go_default_library",

View File

@@ -290,10 +290,10 @@ func (c *beaconApiValidatorClient) StartEventStream(ctx context.Context, topics
client := &http.Client{} // event stream should not be subject to the same settings as other api calls
eventStream, err := event.NewEventStream(ctx, client, c.handler.Host(), topics)
if err != nil {
eventsChannel <- &event.Event{
event.Send(ctx, eventsChannel, &event.Event{
EventType: event.EventError,
Data: []byte(errors.Wrap(err, "failed to start event stream").Error()),
}
})
return
}
c.isEventStreamRunning = true

View File

@@ -294,10 +294,10 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
ctx, span := trace.StartSpan(ctx, "validator.gRPCClient.StartEventStream")
defer span.End()
if len(topics) == 0 {
eventsChannel <- &eventClient.Event{
eventClient.Send(ctx, eventsChannel, &eventClient.Event{
EventType: eventClient.EventError,
Data: []byte(errors.New("no topics were added").Error()),
}
})
return
}
// TODO(13563): ONLY WORKS WITH HEAD TOPIC.
@@ -308,10 +308,10 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
}
}
if !containsHead {
eventsChannel <- &eventClient.Event{
eventClient.Send(ctx, eventsChannel, &eventClient.Event{
EventType: eventClient.EventConnectionError,
Data: []byte(errors.Wrap(client.ErrConnectionIssue, "gRPC only supports the head topic, and head topic was not passed").Error()),
}
})
}
if containsHead && len(topics) > 1 {
log.Warn("gRPC only supports the head topic, other topics will be ignored")
@@ -319,62 +319,44 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
stream, err := c.getClient().StreamSlots(ctx, &ethpb.StreamSlotsRequest{VerifiedOnly: true})
if err != nil {
eventsChannel <- &eventClient.Event{
eventClient.Send(ctx, eventsChannel, &eventClient.Event{
EventType: eventClient.EventConnectionError,
Data: []byte(errors.Wrap(client.ErrConnectionIssue, err.Error()).Error()),
}
})
return
}
c.isEventStreamRunning = true
for {
select {
case <-ctx.Done():
log.Info("Context canceled, stopping event stream")
res, err := stream.Recv()
if err != nil {
c.isEventStreamRunning = false
eventClient.Send(ctx, eventsChannel, &eventClient.Event{
EventType: eventClient.EventConnectionError,
Data: []byte(errors.Wrap(client.ErrConnectionIssue, err.Error()).Error()),
})
return
}
if res == nil {
continue
}
b, err := json.Marshal(structs.HeadEvent{
Slot: strconv.FormatUint(uint64(res.Slot), 10),
PreviousDutyDependentRoot: hexutil.Encode(res.PreviousDutyDependentRoot),
CurrentDutyDependentRoot: hexutil.Encode(res.CurrentDutyDependentRoot),
})
if err != nil {
eventClient.Send(ctx, eventsChannel, &eventClient.Event{
EventType: eventClient.EventError,
Data: []byte(errors.Wrap(err, "failed to marshal Head Event").Error()),
})
continue
}
if !eventClient.Send(ctx, eventsChannel, &eventClient.Event{
EventType: eventClient.EventHead,
Data: b,
}) {
c.isEventStreamRunning = false
return
default:
if ctx.Err() != nil {
c.isEventStreamRunning = false
if errors.Is(ctx.Err(), context.Canceled) {
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventConnectionError,
Data: []byte(errors.Wrap(client.ErrConnectionIssue, ctx.Err().Error()).Error()),
}
return
}
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventError,
Data: []byte(ctx.Err().Error()),
}
return
}
res, err := stream.Recv()
if err != nil {
c.isEventStreamRunning = false
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventConnectionError,
Data: []byte(errors.Wrap(client.ErrConnectionIssue, err.Error()).Error()),
}
return
}
if res == nil {
continue
}
b, err := json.Marshal(structs.HeadEvent{
Slot: strconv.FormatUint(uint64(res.Slot), 10),
PreviousDutyDependentRoot: hexutil.Encode(res.PreviousDutyDependentRoot),
CurrentDutyDependentRoot: hexutil.Encode(res.CurrentDutyDependentRoot),
})
if err != nil {
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventError,
Data: []byte(errors.Wrap(err, "failed to marshal Head Event").Error()),
}
}
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventHead,
Data: b,
}
}
}
}

View File

@@ -86,10 +86,17 @@ func (r *runner) run(ctx context.Context) {
cleanup := v.Done
defer cleanup()
v.SetTicker()
var wgEvents sync.WaitGroup
wgEvents.Go(func() {
r.processEvents(ctx)
})
for {
select {
case <-ctx.Done():
log.Info("Context canceled, stopping validator")
wgEvents.Wait()
//nolint:govet
return // Exit if context is canceled.
case slot := <-v.NextSlot():
@@ -148,14 +155,24 @@ func (r *runner) run(ctx context.Context) {
// performRoles calls span.End()
rolesCtx, _ := context.WithDeadline(ctx, deadline) //nolint:govet
performRoles(rolesCtx, allRoles, v, slot, &wg, span)
case e := <-v.EventsChan():
v.ProcessEvent(ctx, e)
case currentKeys := <-v.AccountsChangedChan(): // should be less of a priority than next slot
case currentKeys := <-v.AccountsChangedChan():
onAccountsChanged(ctx, v, currentKeys)
}
}
}
// processEvents handles events in a dedicated goroutine, decoupled from slot processing.
func (r *runner) processEvents(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case e := <-r.validator.EventsChan():
r.validator.ProcessEvent(ctx, e)
}
}
}
func onAccountsChanged(ctx context.Context, v iface.Validator, current [][48]byte) {
ctx, span := prysmTrace.StartSpan(ctx, "validator.accountsChanged")
defer span.End()

View File

@@ -11,6 +11,7 @@ import (
"time"
"github.com/OffchainLabs/go-bitfield"
eventClient "github.com/OffchainLabs/prysm/v7/api/client/event"
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/cache/lru"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -561,3 +562,77 @@ func TestRunnerPushesProposerSettings_ValidContext(t *testing.T) {
runTest(t, timedCtx, v)
}
func TestEventProcessingDoesNotBlockSlotProcessing(t *testing.T) {
slotChan := make(chan primitives.Slot, 1)
v := &testutil.FakeValidator{
Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}},
NextSlotRet: slotChan,
RolesAtRet: []iface.ValidatorRole{iface.RoleUnknown},
EventsChannel: make(chan *eventClient.Event, 64),
IsRegularDeadline: true,
}
require.NoError(t, v.SetProposerSettings(t.Context(), &proposer.Settings{}))
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Send an event before starting the runner.
v.EventsChannel <- &eventClient.Event{
EventType: eventClient.EventHead,
Data: []byte(`{"slot":"1","previous_duty_dependent_root":"0x00","current_duty_dependent_root":"0x00"}`),
}
// Start the runner in a goroutine.
done := make(chan struct{})
go func() {
runTest(t, ctx, v)
close(done)
}()
// Give the runner time to start and spawn its event goroutine.
time.Sleep(100 * time.Millisecond)
// Now send a slot - it should be processed without being blocked by event processing.
slotChan <- 1
time.Sleep(200 * time.Millisecond)
cancel()
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatal("Runner did not exit after context cancel")
}
require.Equal(t, true, v.RoleAtCalled, "Expected RolesAt to be called when slot arrives")
}
func TestEventGoroutine_ExitsOnContextCancel(t *testing.T) {
v := &testutil.FakeValidator{
Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}},
NextSlotRet: make(chan primitives.Slot),
EventsChannel: make(chan *eventClient.Event, 1),
}
ctx, cancel := context.WithCancel(t.Context())
done := make(chan struct{})
go func() {
runTest(t, ctx, v)
close(done)
}()
// Give the runner time to start.
time.Sleep(50 * time.Millisecond)
cancel()
select {
case <-done:
// Runner and event goroutine exited cleanly.
case <-time.After(5 * time.Second):
t.Fatal("Runner did not exit within timeout after context cancel")
}
require.Equal(t, true, v.DoneCalled, "Expected Done() to be called")
}

View File

@@ -231,7 +231,7 @@ func (v *ValidatorService) Start() {
distributed: v.distributed,
disableDutiesPolling: v.disableDutiesPolling,
accountsChangedChannel: make(chan [][fieldparams.BLSPubkeyLength]byte, 1),
eventsChannel: make(chan *eventClient.Event, 1),
eventsChannel: make(chan *eventClient.Event, 64),
}
hm := newHealthMonitor(v.ctx, v.cancel, v.maxHealthChecks, v.validator)

View File

@@ -60,6 +60,7 @@ type FakeValidator struct {
proposerSettings *proposer.Settings
Balances map[[48]byte]uint64
EventsChannel chan *event.Event
ProcessEventCalled chan *event.Event
ProposerSettingsErr error
Km keymanager.IKeymanager
graffiti string
@@ -320,7 +321,11 @@ func (*FakeValidator) StartEventStream(_ context.Context, _ []string) {
}
func (*FakeValidator) ProcessEvent(_ context.Context, _ *event.Event) {}
func (fv *FakeValidator) ProcessEvent(_ context.Context, e *event.Event) {
if fv.ProcessEventCalled != nil {
fv.ProcessEventCalled <- e
}
}
func (*FakeValidator) EventStreamIsRunning() bool {
return true

View File

@@ -1273,6 +1273,7 @@ func (v *validator) checkDependentRoots(ctx context.Context, head *structs.HeadE
func (v *validator) ProcessEvent(ctx context.Context, event *eventClient.Event) {
if event == nil || event.Data == nil {
log.Warn("Received empty event")
return
}
switch event.EventType {
case eventClient.EventError:
@@ -1280,28 +1281,73 @@ func (v *validator) ProcessEvent(ctx context.Context, event *eventClient.Event)
case eventClient.EventConnectionError:
log.WithError(errors.New(string(event.Data))).Error("Event stream interrupted")
case eventClient.EventHead:
log.Debug("Received head event")
head := &structs.HeadEvent{}
if err := json.Unmarshal(event.Data, head); err != nil {
log.WithError(err).Error("Failed to unmarshal head Event into JSON")
}
uintSlot, err := strconv.ParseUint(head.Slot, 10, 64)
latest := v.drainHeadEvents(ctx, event)
head, slot, err := v.parseHeadEvent(latest)
if err != nil {
log.WithError(err).Error("Failed to parse slot")
log.WithError(err).Error("Failed to parse head event")
return
}
v.setHighestSlot(primitives.Slot(uintSlot))
v.setHighestSlot(slot)
if !v.disableDutiesPolling {
if err := v.checkDependentRoots(ctx, head); err != nil {
log.WithError(err).Error("Failed to check dependent roots")
}
}
default:
// just keep going and log the error
log.WithField("type", event.EventType).WithField("data", string(event.Data)).Warn("Received an unknown event")
}
}
// parseHeadEvent unmarshals a head event and extracts the slot.
func (v *validator) parseHeadEvent(event *eventClient.Event) (*structs.HeadEvent, primitives.Slot, error) {
head := &structs.HeadEvent{}
if err := json.Unmarshal(event.Data, head); err != nil {
return nil, 0, err
}
uintSlot, err := strconv.ParseUint(head.Slot, 10, 64)
if err != nil {
return nil, 0, err
}
return head, primitives.Slot(uintSlot), nil
}
// drainHeadEvents reads any queued events from the channel, processing
// non-head events immediately and returning the latest head event.
// It also calls setHighestSlot for each intermediate head event.
func (v *validator) drainHeadEvents(ctx context.Context, first *eventClient.Event) *eventClient.Event {
latest := first
drained := 0
for {
select {
case next := <-v.eventsChannel:
if next == nil || next.Data == nil {
continue
}
switch next.EventType {
case eventClient.EventHead:
if _, slot, err := v.parseHeadEvent(next); err == nil {
v.setHighestSlot(slot)
}
latest = next
drained++
case eventClient.EventError:
log.Error(string(next.Data))
case eventClient.EventConnectionError:
log.WithError(errors.New(string(next.Data))).Error("Event stream interrupted")
default:
log.WithField("type", next.EventType).WithField("data", string(next.Data)).Warn("Received an unknown event")
}
case <-ctx.Done():
return latest
default:
if drained > 0 {
log.WithField("drained", drained).Info("Drained stale head events during reorg")
}
return latest
}
}
}
func (v *validator) EventStreamIsRunning() bool {
return v.validatorClient.EventStreamIsRunning()
}

View File

@@ -3,11 +3,14 @@ package client
import (
"bytes"
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"math"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"sort"
@@ -16,6 +19,7 @@ import (
"testing"
"time"
eventClient "github.com/OffchainLabs/prysm/v7/api/client/event"
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
"github.com/OffchainLabs/prysm/v7/api/server/structs"
"github.com/OffchainLabs/prysm/v7/async/event"
@@ -3167,3 +3171,241 @@ func TestGetAttestationData_PostElectraConcurrentAccess(t *testing.T) {
require.DeepEqual(t, expectedData, results[i])
}
}
func makeHeadEvent(t *testing.T, slot uint64, prevRoot, currRoot string) *eventClient.Event {
t.Helper()
data, err := json.Marshal(structs.HeadEvent{
Slot: fmt.Sprintf("%d", slot),
PreviousDutyDependentRoot: prevRoot,
CurrentDutyDependentRoot: currRoot,
})
require.NoError(t, err)
return &eventClient.Event{
EventType: eventClient.EventHead,
Data: data,
}
}
func TestProcessEvent_NilEvent(t *testing.T) {
hook := logTest.NewGlobal()
v := &validator{
eventsChannel: make(chan *eventClient.Event, 64),
slotFeed: new(event.Feed),
disableDutiesPolling: true,
}
// Should not panic on nil event.
v.ProcessEvent(t.Context(), nil)
assert.LogsContain(t, hook, "Received empty event")
// Should not panic on event with nil data.
v.ProcessEvent(t.Context(), &eventClient.Event{EventType: eventClient.EventHead})
assert.LogsContain(t, hook, "Received empty event")
}
func TestProcessEvent_DrainsStaleHeadEvents(t *testing.T) {
zeroRoot := hexutil.Encode(params.BeaconConfig().ZeroHash[:])
eventsChannel := make(chan *eventClient.Event, 64)
v := &validator{
eventsChannel: eventsChannel,
slotFeed: new(event.Feed),
disableDutiesPolling: true,
}
// Queue 5 head events in the channel with increasing slots.
for i := 1; i <= 5; i++ {
eventsChannel <- makeHeadEvent(t, uint64(i), zeroRoot, zeroRoot)
}
// Process the first event; drainHeadEvents should consume the rest.
first := makeHeadEvent(t, 0, zeroRoot, zeroRoot)
v.ProcessEvent(t.Context(), first)
// The channel should now be empty.
select {
case e := <-eventsChannel:
t.Fatalf("Expected channel to be drained, but got event: %v", e)
default:
}
// Highest slot should be 5 (the last drained event).
v.highestValidSlotLock.Lock()
got := v.highestValidSlot
v.highestValidSlotLock.Unlock()
require.Equal(t, primitives.Slot(5), got)
}
func TestProcessEvent_MixedEventsDuringDrain(t *testing.T) {
hook := logTest.NewGlobal()
zeroRoot := hexutil.Encode(params.BeaconConfig().ZeroHash[:])
eventsChannel := make(chan *eventClient.Event, 64)
v := &validator{
eventsChannel: eventsChannel,
slotFeed: new(event.Feed),
disableDutiesPolling: true,
}
// Queue: head(1), error, head(3)
eventsChannel <- makeHeadEvent(t, 1, zeroRoot, zeroRoot)
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventError,
Data: []byte("test error message"),
}
eventsChannel <- makeHeadEvent(t, 3, zeroRoot, zeroRoot)
first := makeHeadEvent(t, 0, zeroRoot, zeroRoot)
v.ProcessEvent(t.Context(), first)
// Error event should have been logged.
assert.LogsContain(t, hook, "test error message")
// Highest slot should be 3 (latest head event).
v.highestValidSlotLock.Lock()
got := v.highestValidSlot
v.highestValidSlotLock.Unlock()
require.Equal(t, primitives.Slot(3), got)
}
func TestEventFlood_NoDutyDelay(t *testing.T) {
zeroRoot := hexutil.Encode(params.BeaconConfig().ZeroHash[:])
eventsChannel := make(chan *eventClient.Event, 128)
v := &validator{
eventsChannel: eventsChannel,
slotFeed: new(event.Feed),
disableDutiesPolling: true,
}
// Flood the channel with 50 head events with increasing slots.
for i := 1; i <= 50; i++ {
eventsChannel <- makeHeadEvent(t, uint64(i), zeroRoot, zeroRoot)
}
// Process one event: the drain should consume all 50 queued events.
first := makeHeadEvent(t, 0, zeroRoot, zeroRoot)
v.ProcessEvent(t.Context(), first)
// Channel should be empty.
select {
case e := <-eventsChannel:
t.Fatalf("Expected channel to be drained, but got event: %v", e)
default:
}
// Highest slot should be 50.
v.highestValidSlotLock.Lock()
got := v.highestValidSlot
v.highestValidSlotLock.Unlock()
require.Equal(t, primitives.Slot(50), got)
}
func TestReorgBurst_DedupEffective(t *testing.T) {
eventsChannel := make(chan *eventClient.Event, 128)
v := &validator{
eventsChannel: eventsChannel,
slotFeed: new(event.Feed),
disableDutiesPolling: true,
}
// Simulate reorg burst: alternating dependent roots queued up.
rootA := hexutil.Encode(bytesutil.PadTo([]byte{0xAA}, 32))
rootB := hexutil.Encode(bytesutil.PadTo([]byte{0xBB}, 32))
for i := 1; i <= 10; i++ {
root := rootA
if i%2 == 0 {
root = rootB
}
eventsChannel <- makeHeadEvent(t, uint64(i), root, root)
}
first := makeHeadEvent(t, 0, rootA, rootA)
v.ProcessEvent(t.Context(), first)
// All events should be drained.
select {
case e := <-eventsChannel:
t.Fatalf("Expected channel to be drained, but got event: %v", e)
default:
}
// Highest slot should be 10 (latest event from the reorg burst).
v.highestValidSlotLock.Lock()
got := v.highestValidSlot
v.highestValidSlotLock.Unlock()
require.Equal(t, primitives.Slot(10), got)
}
func TestFullPipeline_CleanShutdown(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
require.Equal(t, true, ok)
for i := 0; ; i++ {
data := fmt.Sprintf(`{"slot":"%d","previous_duty_dependent_root":"0x0000000000000000000000000000000000000000000000000000000000000000","current_duty_dependent_root":"0x0000000000000000000000000000000000000000000000000000000000000000"}`, i)
_, err := fmt.Fprintf(w, "event: head\ndata: %s\n\n", data)
if err != nil {
return
}
flusher.Flush()
time.Sleep(50 * time.Millisecond)
}
})
server := httptest.NewServer(mux)
defer server.Close()
eventsChannel := make(chan *eventClient.Event, 64)
ctx, cancel := context.WithCancel(t.Context())
// Start SSE subscriber goroutine.
stream, err := eventClient.NewEventStream(ctx, http.DefaultClient, server.URL, []string{"head"})
require.NoError(t, err)
subscribeDone := make(chan struct{})
go func() {
stream.Subscribe(eventsChannel)
close(subscribeDone)
}()
// Start event processor goroutine.
v := &validator{
eventsChannel: eventsChannel,
slotFeed: new(event.Feed),
disableDutiesPolling: true,
}
processDone := make(chan struct{})
go func() {
for {
select {
case <-ctx.Done():
close(processDone)
return
case e := <-eventsChannel:
v.ProcessEvent(ctx, e)
}
}
}()
// Let the pipeline run for a bit to process some events.
time.Sleep(200 * time.Millisecond)
// Cancel context and verify all goroutines exit cleanly.
cancel()
select {
case <-subscribeDone:
case <-time.After(2 * time.Second):
t.Fatal("Subscribe goroutine did not exit within timeout")
}
select {
case <-processDone:
case <-time.After(2 * time.Second):
t.Fatal("Process goroutine did not exit within timeout")
}
// Verify some events were actually processed.
v.highestValidSlotLock.Lock()
got := v.highestValidSlot
v.highestValidSlotLock.Unlock()
require.NotEqual(t, primitives.Slot(0), got, "Expected some events to be processed")
}