Compare commits

..

2 Commits

Author SHA1 Message Date
Manu NALEPA
f847487938 Add metrics tracking for FieldTrie node sizes and overrides 2026-03-27 15:07:00 +01:00
Manu NALEPA
90f8f9507e Add metrics for consolidation requests. 2026-03-26 08:07:37 +01:00
397 changed files with 4416 additions and 15900 deletions

View File

@@ -1,4 +1,4 @@
version: v1.7.0-alpha.4
version: v1.7.0-alpha.2
style: full
specrefs:
@@ -23,8 +23,6 @@ exceptions:
- PTC_SIZE#gloas
constants:
# heze
- DOMAIN_INCLUSION_LIST_COMMITTEE#heze
# phase0
- BASIS_POINTS#phase0
- ENDIANNESS#phase0
@@ -74,30 +72,10 @@ exceptions:
- GLOAS_FORK_EPOCH#gloas
- GLOAS_FORK_VERSION#gloas
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
# heze
- HEZE_FORK_EPOCH#heze
- HEZE_FORK_VERSION#heze
- INCLUSION_LIST_SUBMISSION_DUE_BPS#heze
- MAX_BYTES_PER_INCLUSION_LIST#heze
- MAX_REQUEST_INCLUSION_LIST#heze
- PROPOSER_INCLUSION_LIST_CUTOFF_BPS#heze
- VIEW_FREEZE_CUTOFF_BPS#heze
ssz_objects:
# phase0
- Eth1Block#phase0
# fulu
- PartialDataColumnHeader#fulu
- PartialDataColumnPartsMetadata#fulu
- PartialDataColumnSidecar#fulu
# gloas
- PartialDataColumnHeader#gloas
# heze
- BeaconState#heze
- ExecutionPayloadBid#heze
- InclusionList#heze
- SignedExecutionPayloadBid#heze
- SignedInclusionList#heze
# capella
- LightClientBootstrap#capella
- LightClientFinalityUpdate#capella
@@ -127,7 +105,6 @@ exceptions:
dataclasses:
# phase0
- LatestMessage#phase0
- Seen#phase0
- Store#phase0
# altair
- LightClientStore#altair
@@ -144,11 +121,6 @@ exceptions:
- ExpectedWithdrawals#gloas
- LatestMessage#gloas
- Store#gloas
# heze
- GetInclusionListResponse#heze
- InclusionListStore#heze
- PayloadAttributes#heze
- Store#heze
functions:
# Functions implemented by KZG library for EIP-4844
@@ -205,22 +177,11 @@ exceptions:
- verify_cell_kzg_proof_batch_impl#fulu
# phase0
- compute_attestation_subnet_prefix_bits#phase0
- compute_min_epochs_for_block_requests#phase0
- compute_time_at_slot_ms#phase0
- is_not_from_future_slot#phase0
- is_within_slot_range#phase0
- update_proposer_boost_root#phase0
- is_proposer_equivocation#phase0
- record_block_timeliness#phase0
- compute_proposer_score#phase0
- get_attestation_score#phase0
- validate_attester_slashing_gossip#phase0
- validate_beacon_aggregate_and_proof_gossip#phase0
- validate_beacon_attestation_gossip#phase0
- validate_beacon_block_gossip#phase0
- validate_proposer_slashing_gossip#phase0
- validate_voluntary_exit_gossip#phase0
- calculate_committee_fraction#phase0
- compute_fork_version#phase0
- compute_pulled_up_tip#phase0
@@ -311,7 +272,6 @@ exceptions:
- upgrade_lc_store_to_capella#capella
- upgrade_lc_update_to_capella#capella
# deneb
- compute_max_request_blob_sidecars#deneb
- get_lc_execution_root#deneb
- is_valid_light_client_header#deneb
- prepare_execution_payload#deneb
@@ -322,7 +282,6 @@ exceptions:
- upgrade_lc_store_to_deneb#deneb
- upgrade_lc_update_to_deneb#deneb
# electra
- compute_max_request_blob_sidecars#electra
- compute_weak_subjectivity_period#electra
- current_sync_committee_gindex_at_slot#electra
- finalized_root_gindex_at_slot#electra
@@ -344,20 +303,12 @@ exceptions:
- upgrade_lc_store_to_electra#electra
- upgrade_lc_update_to_electra#electra
# fulu
- compute_max_request_data_column_sidecars#fulu
- compute_matrix#fulu
- verify_partial_data_column_header_inclusion_proof#fulu
- verify_partial_data_column_sidecar_kzg_proofs#fulu
- get_blob_parameters#fulu
- get_data_column_sidecars_from_block#fulu
- get_data_column_sidecars_from_column_sidecar#fulu
- recover_matrix#fulu
# gloas
- compute_ptc#gloas
- initialize_ptc_window#gloas
- is_payload_data_available#gloas
- is_pending_validator#gloas
- process_ptc_window#gloas
- compute_balance_weighted_acceptance#gloas
- compute_balance_weighted_selection#gloas
- compute_fork_version#gloas
@@ -455,28 +406,6 @@ exceptions:
- update_next_withdrawal_builder_index#gloas
- update_payload_expected_withdrawals#gloas
- update_proposer_boost_root#gloas
# heze
- compute_fork_version#heze
- get_forkchoice_store#heze
- get_inclusion_list_bits#heze
- get_inclusion_list_committee_assignment#heze
- get_inclusion_list_committee#heze
- get_inclusion_list_signature#heze
- get_inclusion_list_store#heze
- get_inclusion_list_submission_due_ms#heze
- get_inclusion_list_transactions#heze
- get_proposer_inclusion_list_cutoff_ms#heze
- get_view_freeze_cutoff_ms#heze
- is_inclusion_list_bits_inclusive#heze
- is_payload_inclusion_list_satisfied#heze
- is_valid_inclusion_list_signature#heze
- on_execution_payload#heze
- on_inclusion_list#heze
- prepare_execution_payload#heze
- process_inclusion_list#heze
- record_payload_inclusion_list_satisfaction#heze
- should_extend_payload#heze
- upgrade_to_heze#heze
presets:
# gloas
@@ -485,5 +414,3 @@ exceptions:
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
- MAX_PAYLOAD_ATTESTATIONS#gloas
- PTC_SIZE#gloas
# heze
- INCLUSION_LIST_COMMITTEE_SIZE#heze

View File

@@ -273,16 +273,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.7.0-alpha.4"
consensus_spec_version = "v1.7.0-alpha.2"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-kNJxuhCtW4RbuS9nb4U6JXHlPgTSg6G3hWeHFVB9gZ4=",
"minimal": "sha256-U1tCkXxtdI6mkEdk80i8z9LU2hAyf7Ztz5SBYo5oMzo=",
"mainnet": "sha256-Ga8VDOcNhTTdXDj8tSyBVYrwya9f1HO94ehJ5vv91r4=",
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
},
version = consensus_spec_version,
)
@@ -298,7 +298,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-XHu5K/65mue+5po63L9yGTFjGfU1RGj4S56dmcHc2Rs=",
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -46,7 +46,7 @@ func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecke
"previous": startingHost,
"current": provider.CurrentHost(),
"tried": attemptedHosts,
}).Warn("Switched to responsive beacon node")
}).Info("Switched to responsive beacon node")
}
return true
}

View File

@@ -3,15 +3,14 @@ package api
import "net/http"
const (
VersionHeader = "Eth-Consensus-Version"
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
ExecutionPayloadIncludedHeader = "Eth-Execution-Payload-Included"
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
EventStreamMediaType = "text/event-stream"
KeepAlive = "keep-alive"
VersionHeader = "Eth-Consensus-Version"
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
EventStreamMediaType = "text/event-stream"
KeepAlive = "keep-alive"
)
// SetSSEHeaders sets the headers needed for a server-sent event response.

View File

@@ -29,8 +29,6 @@ type Server struct {
startFailure error
}
const eventStreamPath = "/eth/v1/events"
// New returns a new instance of the Server.
func New(ctx context.Context, opts ...Option) (*Server, error) {
g := &Server{
@@ -50,17 +48,7 @@ func New(ctx context.Context, opts ...Option) (*Server, error) {
handler = middleware.MiddlewareChain(g.cfg.router, g.cfg.middlewares)
if g.cfg.timeout > 0*time.Second {
defaultReadHeaderTimeout = g.cfg.timeout
baseHandler := handler
timeoutHandler := http.TimeoutHandler(baseHandler, g.cfg.timeout, "request timed out")
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// SSE streams stay open indefinitely, so the global timeout wrapper must not
// cancel `/eth/v1/events` before the handler starts streaming responses.
if r.URL != nil && r.URL.Path == eventStreamPath {
baseHandler.ServeHTTP(w, r)
return
}
timeoutHandler.ServeHTTP(w, r)
})
handler = http.TimeoutHandler(handler, g.cfg.timeout, "request timed out")
}
g.server = &http.Server{
Addr: g.cfg.httpAddr,

View File

@@ -7,9 +7,7 @@ import (
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/testing/assert"
@@ -39,18 +37,10 @@ func TestServer_StartStop(t *testing.T) {
require.NoError(t, err)
g.Start()
require.Eventually(t, func() bool {
foundStart := false
for _, entry := range hook.AllEntries() {
if strings.Contains(entry.Message, "Starting HTTP server") {
foundStart = true
}
if strings.Contains(entry.Message, "Starting API middleware") {
return false
}
}
return foundStart
}, time.Second, 10*time.Millisecond)
go func() {
require.LogsContain(t, hook, "Starting HTTP server")
require.LogsDoNotContain(t, hook, "Starting API middleware")
}()
err = g.Stop()
require.NoError(t, err)
}
@@ -78,51 +68,3 @@ func TestServer_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
assert.Equal(t, http.StatusNotFound, writer.Code)
}
func TestServer_TimeoutHandlerBypassesSSE(t *testing.T) {
handler := http.NewServeMux()
handler.HandleFunc(eventStreamPath, func(w http.ResponseWriter, _ *http.Request) {
time.Sleep(20 * time.Millisecond)
w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte("stream-open"))
require.NoError(t, err)
})
g, err := New(t.Context(),
WithHTTPAddr("127.0.0.1:0"),
WithRouter(handler),
WithTimeout(5*time.Millisecond),
)
require.NoError(t, err)
req := httptest.NewRequest(http.MethodGet, eventStreamPath, nil)
writer := httptest.NewRecorder()
g.server.Handler.ServeHTTP(writer, req)
assert.Equal(t, http.StatusOK, writer.Code)
assert.Equal(t, "stream-open", writer.Body.String())
}
func TestServer_TimeoutHandlerStillAppliesToNonSSE(t *testing.T) {
handler := http.NewServeMux()
handler.HandleFunc("/foo", func(w http.ResponseWriter, _ *http.Request) {
time.Sleep(20 * time.Millisecond)
w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte("ok"))
require.NoError(t, err)
})
g, err := New(t.Context(),
WithHTTPAddr("127.0.0.1:0"),
WithRouter(handler),
WithTimeout(5*time.Millisecond),
)
require.NoError(t, err)
req := httptest.NewRequest(http.MethodGet, "/foo", nil)
writer := httptest.NewRecorder()
g.server.Handler.ServeHTTP(writer, req)
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
assert.Equal(t, true, strings.Contains(writer.Body.String(), "request timed out"))
}

View File

@@ -54,13 +54,11 @@ go_test(
name = "go_default_test",
srcs = [
"conversions_block_execution_test.go",
"conversions_block_gloas_test.go",
"conversions_test.go",
],
embed = [":go_default_library"],
deps = [
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -584,13 +584,6 @@ func (s *SignedBeaconBlockGloas) SigString() string {
return s.Signature
}
type BlockContentsGloas struct {
Block *BeaconBlockGloas `json:"block"`
ExecutionPayloadEnvelope *ExecutionPayloadEnvelope `json:"execution_payload_envelope"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type ExecutionPayloadEnvelope struct {
Payload *ExecutionPayloadDeneb `json:"payload"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`

View File

@@ -2983,19 +2983,6 @@ func PayloadAttestationDataFromConsensus(d *eth.PayloadAttestationData) *Payload
}
}
func (b *SignedBeaconBlockGloas) ToGeneric() (*eth.GenericSignedBeaconBlock, error) {
if b == nil {
return nil, errNilValue
}
signed, err := b.ToConsensus()
if err != nil {
return nil, err
}
return &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Gloas{Gloas: signed},
}, nil
}
func (b *SignedBeaconBlockGloas) ToConsensus() (*eth.SignedBeaconBlockGloas, error) {
if b == nil {
return nil, errNilValue
@@ -3150,14 +3137,6 @@ func (b *BeaconBlockBodyGloas) ToConsensus() (*eth.BeaconBlockBodyGloas, error)
}, nil
}
func (b *BeaconBlockGloas) ToGeneric() (*eth.GenericBeaconBlock, error) {
block, err := b.ToConsensus()
if err != nil {
return nil, errors.Wrap(err, "could not convert gloas block to consensus")
}
return &eth.GenericBeaconBlock{Block: &eth.GenericBeaconBlock_Gloas{Gloas: block}}, nil
}
func (b *SignedExecutionPayloadBid) ToConsensus() (*eth.SignedExecutionPayloadBid, error) {
if b == nil {
return nil, errNilValue
@@ -3305,113 +3284,25 @@ func (d *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, err
}, nil
}
// ExecutionPayloadEnvelopeFromConsensus converts a proto envelope to the API struct.
func ExecutionPayloadEnvelopeFromConsensus(e *eth.ExecutionPayloadEnvelope) (*ExecutionPayloadEnvelope, error) {
payload, err := ExecutionPayloadDenebFromConsensus(e.Payload)
// SignedExecutionPayloadEnvelopeFromConsensus converts a proto envelope to the API struct.
func SignedExecutionPayloadEnvelopeFromConsensus(e *eth.SignedExecutionPayloadEnvelope) (*SignedExecutionPayloadEnvelope, error) {
payload, err := ExecutionPayloadDenebFromConsensus(e.Message.Payload)
if err != nil {
return nil, err
}
var requests *ExecutionRequests
if e.ExecutionRequests != nil {
requests = ExecutionRequestsFromConsensus(e.ExecutionRequests)
}
return &ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: requests,
BuilderIndex: fmt.Sprintf("%d", e.BuilderIndex),
BeaconBlockRoot: hexutil.Encode(e.BeaconBlockRoot),
Slot: fmt.Sprintf("%d", e.Slot),
StateRoot: hexutil.Encode(e.StateRoot),
}, nil
}
// SignedExecutionPayloadEnvelopeFromConsensus converts a signed proto envelope to the API struct.
func SignedExecutionPayloadEnvelopeFromConsensus(e *eth.SignedExecutionPayloadEnvelope) (*SignedExecutionPayloadEnvelope, error) {
envelope, err := ExecutionPayloadEnvelopeFromConsensus(e.Message)
if err != nil {
return nil, err
if e.Message.ExecutionRequests != nil {
requests = ExecutionRequestsFromConsensus(e.Message.ExecutionRequests)
}
return &SignedExecutionPayloadEnvelope{
Message: envelope,
Message: &ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: requests,
BuilderIndex: fmt.Sprintf("%d", e.Message.BuilderIndex),
BeaconBlockRoot: hexutil.Encode(e.Message.BeaconBlockRoot),
Slot: fmt.Sprintf("%d", e.Message.Slot),
StateRoot: hexutil.Encode(e.Message.StateRoot),
},
Signature: hexutil.Encode(e.Signature),
}, nil
}
// BlockContentsGloasFromConsensus converts a proto Gloas block and envelope to the API struct.
func BlockContentsGloasFromConsensus(block *eth.BeaconBlockGloas, envelope *eth.ExecutionPayloadEnvelope) (*BlockContentsGloas, error) {
b, err := BeaconBlockGloasFromConsensus(block)
if err != nil {
return nil, err
}
env, err := ExecutionPayloadEnvelopeFromConsensus(envelope)
if err != nil {
return nil, err
}
return &BlockContentsGloas{
Block: b,
ExecutionPayloadEnvelope: env,
KzgProofs: []string{}, // TODO: populate from blobs bundle
Blobs: []string{}, // TODO: populate from blobs bundle
}, nil
}
// ToConsensus converts the API struct to a proto ExecutionPayloadEnvelope.
func (e *ExecutionPayloadEnvelope) ToConsensus() (*eth.ExecutionPayloadEnvelope, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadEnvelope")
}
payload, err := e.Payload.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Payload")
}
var requests *enginev1.ExecutionRequests
if e.ExecutionRequests != nil {
requests, err = e.ExecutionRequests.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionRequests")
}
}
builderIndex, err := strconv.ParseUint(e.BuilderIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "BuilderIndex")
}
beaconBlockRoot, err := bytesutil.DecodeHexWithLength(e.BeaconBlockRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "BeaconBlockRoot")
}
slot, err := strconv.ParseUint(e.Slot, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Slot")
}
stateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "StateRoot")
}
return &eth.ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: requests,
BuilderIndex: primitives.BuilderIndex(builderIndex),
BeaconBlockRoot: beaconBlockRoot,
Slot: primitives.Slot(slot),
StateRoot: stateRoot,
}, nil
}
// ToConsensus converts the API struct to a proto SignedExecutionPayloadEnvelope.
func (e *SignedExecutionPayloadEnvelope) ToConsensus() (*eth.SignedExecutionPayloadEnvelope, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "SignedExecutionPayloadEnvelope")
}
msg, err := e.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
}
sig, err := bytesutil.DecodeHexWithLength(e.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SignedExecutionPayloadEnvelope{
Message: msg,
Signature: sig,
}, nil
}

View File

@@ -1,67 +0,0 @@
package structs
import (
"testing"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
)
func testEnvelopeProto() *eth.ExecutionPayloadEnvelope {
return &eth.ExecutionPayloadEnvelope{
Payload: &enginev1.ExecutionPayloadDeneb{
ParentHash: fillByteSlice(common.HashLength, 0xaa),
FeeRecipient: fillByteSlice(20, 0xbb),
StateRoot: fillByteSlice(32, 0xcc),
ReceiptsRoot: fillByteSlice(32, 0xdd),
LogsBloom: fillByteSlice(256, 0xee),
PrevRandao: fillByteSlice(32, 0xff),
BaseFeePerGas: fillByteSlice(32, 0x11),
BlockHash: fillByteSlice(common.HashLength, 0x22),
},
ExecutionRequests: &enginev1.ExecutionRequests{},
BuilderIndex: 7,
BeaconBlockRoot: fillByteSlice(32, 0x33),
Slot: 42,
StateRoot: fillByteSlice(32, 0x44),
}
}
func TestExecutionPayloadEnvelopeFromConsensus(t *testing.T) {
env := testEnvelopeProto()
result, err := ExecutionPayloadEnvelopeFromConsensus(env)
require.NoError(t, err)
require.NotNil(t, result.Payload)
require.Equal(t, hexutil.Encode(env.Payload.ParentHash), result.Payload.ParentHash)
require.Equal(t, "7", result.BuilderIndex)
require.Equal(t, hexutil.Encode(env.BeaconBlockRoot), result.BeaconBlockRoot)
require.Equal(t, "42", result.Slot)
require.Equal(t, hexutil.Encode(env.StateRoot), result.StateRoot)
require.NotNil(t, result.ExecutionRequests)
}
func TestExecutionPayloadEnvelopeFromConsensus_NilRequests(t *testing.T) {
env := testEnvelopeProto()
env.ExecutionRequests = nil
result, err := ExecutionPayloadEnvelopeFromConsensus(env)
require.NoError(t, err)
require.Equal(t, (*ExecutionRequests)(nil), result.ExecutionRequests)
}
func TestBlockContentsGloasFromConsensus(t *testing.T) {
block := util.NewBeaconBlockGloas().Block
env := testEnvelopeProto()
result, err := BlockContentsGloasFromConsensus(block, env)
require.NoError(t, err)
require.NotNil(t, result.Block)
require.NotNil(t, result.Block.Body)
require.NotNil(t, result.ExecutionPayloadEnvelope)
require.Equal(t, hexutil.Encode(env.BeaconBlockRoot), result.ExecutionPayloadEnvelope.BeaconBlockRoot)
require.Equal(t, 0, len(result.KzgProofs))
require.Equal(t, 0, len(result.Blobs))
}

View File

@@ -5,7 +5,6 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
@@ -489,7 +488,7 @@ func TestBeaconStateGloasFromConsensus(t *testing.T) {
state.GenesisTime = 123
state.GenesisValidatorsRoot = bytes.Repeat([]byte{0x10}, 32)
state.Slot = 5
state.ProposerLookahead = []primitives.ValidatorIndex{1, 2}
state.ProposerLookahead = []uint64{1, 2}
state.LatestExecutionPayloadBid = &eth.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x12}, 32),

View File

@@ -53,8 +53,8 @@ type ChainReorgEvent struct {
Slot string `json:"slot"`
Depth string `json:"depth"`
OldHeadBlock string `json:"old_head_block"`
NewHeadBlock string `json:"new_head_block"`
OldHeadState string `json:"old_head_state"`
NewHeadBlock string `json:"old_head_state"`
OldHeadState string `json:"new_head_block"`
NewHeadState string `json:"new_head_state"`
Epoch string `json:"epoch"`
ExecutionOptimistic bool `json:"execution_optimistic"`

View File

@@ -95,14 +95,6 @@ type ProduceBlockV3Response struct {
Data json.RawMessage `json:"data"` // represents the block values based on the version
}
// ProduceBlockV4Response is a wrapper json object for the returned block from the ProduceBlockV4 endpoint
type ProduceBlockV4Response struct {
Version string `json:"version"`
ConsensusBlockValue string `json:"consensus_block_value"`
ExecutionPayloadIncluded bool `json:"execution_payload_included"`
Data json.RawMessage `json:"data"`
}
type GetLivenessResponse struct {
Data []*Liveness `json:"data"`
}
@@ -159,11 +151,6 @@ type ValidatorParticipation struct {
PreviousEpochHeadAttestingGwei string `json:"previous_epoch_head_attesting_gwei"`
}
type GetValidatorExecutionPayloadEnvelopeResponse struct {
Version string `json:"version"`
Data *ExecutionPayloadEnvelope `json:"data"`
}
type ActiveSetChanges struct {
Epoch string `json:"epoch"`
ActivatedPublicKeys []string `json:"activated_public_keys"`

View File

@@ -141,7 +141,6 @@ go_test(
"service_test.go",
"setup_forkchoice_test.go",
"setup_test.go",
"tracked_proposer_test.go",
"weak_subjectivity_checks_test.go",
],
embed = [":go_default_library"],
@@ -155,6 +154,7 @@ go_test(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/gloas:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",

View File

@@ -59,7 +59,6 @@ type ForkchoiceFetcher interface {
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
DependentRoot(primitives.Epoch) ([32]byte, error)
CanonicalNodeAtSlot(primitives.Slot) ([32]byte, bool)
ShouldIgnoreData(parentRoot [32]byte, dataSlot primitives.Slot) bool
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
@@ -599,26 +598,3 @@ func (s *Service) inRegularSync() bool {
func (s *Service) validating() bool {
return s.cfg.TrackedValidatorsCache.Validating()
}
// ShouldIgnoreData returns true if the data for the given parent root and slot should be ignored.
func (s *Service) ShouldIgnoreData(parentRoot [32]byte, dataSlot primitives.Slot) bool {
currentEpoch := slots.ToEpoch(s.CurrentSlot())
if slots.ToEpoch(dataSlot) < currentEpoch {
return false
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
parentSlot, err := s.cfg.ForkChoiceStore.Slot(parentRoot)
if err != nil {
// This should not happen. The caller should have already checked the parent is in forkchoice.
return false
}
j := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
if j == nil {
return false
}
if slots.ToEpoch(parentSlot) >= j.Epoch {
return false
}
return s.cfg.ForkChoiceStore.IsCanonical(parentRoot)
}

View File

@@ -157,13 +157,6 @@ func (s *Service) hashForGenesisBlock(ctx context.Context, root [32]byte) ([]byt
if st.Version() < version.Bellatrix {
return nil, nil
}
if st.Version() >= version.Gloas {
h, err := st.LatestBlockHash()
if err != nil {
return nil, errors.Wrap(err, "could not get latest block hash")
}
return bytesutil.SafeCopyBytes(h[:]), nil
}
header, err := st.LatestExecutionPayloadHeader()
if err != nil {
return nil, errors.Wrap(err, "could not get latest execution payload header")

View File

@@ -20,7 +20,6 @@ import (
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/OffchainLabs/prysm/v7/time/slots"
"google.golang.org/protobuf/proto"
)
@@ -736,73 +735,6 @@ func TestParentPayloadReady(t *testing.T) {
})
}
func TestService_ShouldIgnoreData(t *testing.T) {
service, tr := minimalTestService(t)
ctx := t.Context()
fcs := tr.fcs
zeroHash := params.BeaconConfig().ZeroHash
currentSlot := service.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
// Build a chain in forkchoice:
// genesis (slot 0) -> nodeA (slot 1, epoch 0) -> nodeB (slot slotsPerEpoch, epoch 1) -> nodeC (slot 2*slotsPerEpoch, epoch 2)
nodeARoot := [32]byte{1}
nodeBRoot := [32]byte{2}
nodeCRoot := [32]byte{3}
nodeASlot := primitives.Slot(1)
nodeBSlot := primitives.Slot(slotsPerEpoch) // epoch 1
nodeCSlot := primitives.Slot(2 * slotsPerEpoch) // epoch 2
stA, robA, err := prepareForkchoiceState(ctx, nodeASlot, nodeARoot, zeroHash, [32]byte{10}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stA, robA))
stB, robB, err := prepareForkchoiceState(ctx, nodeBSlot, nodeBRoot, nodeARoot, [32]byte{11}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stB, robB))
stC, robC, err := prepareForkchoiceState(ctx, nodeCSlot, nodeCRoot, nodeBRoot, [32]byte{12}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stC, robC))
// Set justified checkpoint to nodeB (epoch 1).
fcs.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 1, Root: nodeBRoot}))
t.Run("past epoch data is not ignored", func(t *testing.T) {
pastSlot := primitives.Slot((currentEpoch - 1) * primitives.Epoch(slotsPerEpoch))
require.Equal(t, false, service.ShouldIgnoreData(nodeARoot, pastSlot))
})
t.Run("parent not in forkchoice", func(t *testing.T) {
unknownRoot := [32]byte{99}
require.Equal(t, false, service.ShouldIgnoreData(unknownRoot, currentSlot))
})
t.Run("parent epoch at or after justified", func(t *testing.T) {
// nodeB is at epoch 1, justified is epoch 1 => parentEpoch >= justified => false
require.Equal(t, false, service.ShouldIgnoreData(nodeBRoot, currentSlot))
})
t.Run("canonical parent before justified is ignored", func(t *testing.T) {
// nodeA is at epoch 0 < justified epoch 1, and is canonical => true
require.Equal(t, true, service.ShouldIgnoreData(nodeARoot, currentSlot))
})
t.Run("non-canonical parent before justified is not ignored", func(t *testing.T) {
// Insert a fork: nodeD at slot 2 (epoch 0) branching from nodeA, not on the canonical chain.
nodeDRoot := [32]byte{4}
stD, robD, err := prepareForkchoiceState(ctx, 2, nodeDRoot, nodeARoot, [32]byte{13}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stD, robD))
// nodeD is at epoch 0 < justified epoch 1, but not canonical => false
require.Equal(t, false, service.ShouldIgnoreData(nodeDRoot, currentSlot))
})
}
func Test_hashForGenesisRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()
@@ -820,23 +752,3 @@ func Test_hashForGenesisRoot(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte{}, [32]byte(genRoot))
}
func Test_hashForGenesisRoot_Gloas(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()
c := setupBeaconChain(t, beaconDB)
expectedHash := [32]byte{1, 2, 3, 4, 5}
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
LatestBlockHash: expectedHash[:],
})
require.NoError(t, err)
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
genesisRoot := [32]byte{0xaa}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
genHash, err := c.hashForGenesisBlock(ctx, genesisRoot)
require.NoError(t, err)
require.Equal(t, expectedHash, [32]byte(genHash))
}

View File

@@ -271,7 +271,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
}
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests, blk.Block().Slot())
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
if err == nil {
newPayloadValidNodeCount.Inc()
return true, nil

View File

@@ -75,7 +75,7 @@ func prepareGloasForkchoiceState(
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 64),
ProposerLookahead: make([]uint64, 64),
}
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
@@ -146,7 +146,7 @@ func testGloasState(t *testing.T, slot primitives.Slot, parentRoot [32]byte, blo
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 64),
ProposerLookahead: make([]uint64, 64),
}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
@@ -797,7 +797,7 @@ func TestSaveHead_GloasForkBoundary_PreforkBidForcesEmptyHead(t *testing.T) {
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 64),
ProposerLookahead: make([]uint64, 64),
})
require.NoError(t, err2)
oldRoot := bytesutil.ToBytes32([]byte("oldroot1"))
@@ -874,7 +874,7 @@ func TestSaveHead_GloasForkBoundary_PostforkBidSetsFullHead(t *testing.T) {
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 64),
ProposerLookahead: make([]uint64, 64),
})
require.NoError(t, err2)
oldRoot2 := bytesutil.ToBytes32([]byte("oldroot2"))

View File

@@ -74,6 +74,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
}
if len(eReqs.Consolidations) > 0 {
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
consolidationRequestCount.Add(float64(len(eReqs.Consolidations)))
}
if len(eReqs.Withdrawals) > 0 {
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))

View File

@@ -170,6 +170,10 @@ var (
Name: "txs_per_slot_count",
Help: "Count the number of txs per slot",
})
consolidationRequestCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "consolidation_request_count",
Help: "Count the number of consolidation requests",
})
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "on_block_processing_milliseconds",
Help: "Total time in milliseconds to complete a call to postBlockProcess()",

View File

@@ -96,15 +96,6 @@ func WithTrackedValidatorsCache(c *cache.TrackedValidatorsCache) Option {
}
}
// WithProposerPreferencesCache sets the proposer preferences cache used to
// look up fee recipient and gas limit from Gloas gossip preferences.
func WithProposerPreferencesCache(c *cache.ProposerPreferencesCache) Option {
return func(s *Service) error {
s.cfg.ProposerPreferencesCache = c
return nil
}
}
// WithAttestationCache for attestation lifecycle after chain inclusion.
func WithAttestationCache(c *cache.AttestationCache) Option {
return func(s *Service) error {

View File

@@ -135,7 +135,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
var err error
preStateVersion := st.Version()
switch preStateVersion {
case version.Phase0, version.Altair, version.Gloas:
case version.Phase0, version.Altair:
default:
preStateHeader, err = st.LatestExecutionPayloadHeader()
if err != nil {
@@ -145,112 +145,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
// applyPayloadIfNeeded applies the parent block's execution payload envelope to
// preState when the current block's bid indicates it built on a full parent.
func (s *Service) applyPayloadIfNeeded(ctx context.Context, b interfaces.ReadOnlyBeaconBlock, parentRoot [32]byte, preState state.BeaconState) error {
if b.Version() < version.Gloas || parentRoot == [32]byte{} {
return nil
}
parentBlock, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
if err != nil {
return errors.Wrapf(err, "could not get parent block with root %#x", parentRoot)
}
if parentBlock.Version() < version.Gloas {
return nil
}
sb, err := b.Body().SignedExecutionPayloadBid()
if err != nil {
return errors.Wrap(err, "could not get execution payload bid for block")
}
if sb == nil || sb.Message == nil {
return fmt.Errorf("missing execution payload bid for block at slot %d", b.Slot())
}
parentBid, err := parentBlock.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return errors.Wrapf(err, "could not get execution payload bid for parent block with root %#x", parentRoot)
}
if parentBid == nil || parentBid.Message == nil {
return fmt.Errorf("missing execution payload bid for parent block with root %#x", parentRoot)
}
if !bytes.Equal(sb.Message.ParentBlockHash, parentBid.Message.BlockHash) {
return nil
}
signedEnvelope, err := s.cfg.BeaconDB.ExecutionPayloadEnvelope(ctx, parentRoot)
if err != nil {
return errors.Wrapf(err, "could not get execution payload envelope for parent block with root %#x", parentRoot)
}
if signedEnvelope == nil || signedEnvelope.Message == nil {
return nil
}
envelope, err := consensusblocks.WrappedROBlindedExecutionPayloadEnvelope(signedEnvelope.Message)
if err != nil {
return errors.Wrapf(err, "could not wrap blinded execution payload envelope for parent block with root %#x", parentRoot)
}
return gloas.ProcessBlindedExecutionPayload(ctx, preState, parentBlock.Block().StateRoot(), envelope)
}
// getBatchPrestate returns the pre-state to apply to the first beacon block in the batch and returns true if it applied the first envelope before
func (s *Service) getBatchPrestate(ctx context.Context, b consensusblocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope) (state.BeaconState, bool, error) {
if len(envelopes) == 0 || b.Version() < version.Gloas {
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.Block().ParentRoot())
if err != nil {
return nil, false, errors.Wrap(err, "could not get block pre state")
}
return blockPreState, false, nil
}
parentRoot := b.Block().ParentRoot()
full, err := consensusblocks.BlockBuiltOnEnvelope(envelopes[0], b)
if err != nil {
return nil, false, errors.Wrap(err, "could not check if block builds on envelope")
}
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, parentRoot)
if err != nil {
return nil, false, errors.Wrap(err, "could not get block pre state")
}
if !full {
return blockPreState, false, nil
}
parentBlock, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent block")
}
if s.cfg.BeaconDB.HasExecutionPayloadEnvelope(ctx, parentRoot) {
// The parent envelope was already saved by a previous batch but the
// replayed state may not include it (replay skips the last block's
// envelope). Load the blinded form from DB and apply it.
blindedEnv, err := s.cfg.BeaconDB.ExecutionPayloadEnvelope(ctx, parentRoot)
if err != nil {
return nil, false, errors.Wrap(err, "could not load parent blinded envelope from DB")
}
wrappedEnv, err := consensusblocks.WrappedROBlindedExecutionPayloadEnvelope(blindedEnv.Message)
if err != nil {
return nil, false, errors.Wrap(err, "could not wrap blinded envelope")
}
if err := gloas.ProcessBlindedExecutionPayload(ctx, blockPreState, parentBlock.Block().StateRoot(), wrappedEnv); err != nil {
return nil, false, errors.Wrap(err, "could not apply parent blinded envelope from DB")
}
return blockPreState, true, nil
}
env, err := envelopes[0].Envelope()
if err != nil {
return nil, false, err
}
// notify the engine of the new envelope
if _, err := s.notifyNewEnvelope(ctx, blockPreState, env); err != nil {
return nil, false, err
}
if err := gloas.ProcessBlindedExecutionPayload(ctx, blockPreState, parentBlock.Block().StateRoot(), env); err != nil {
return nil, false, err
}
return blockPreState, true, nil
}
type versionAndHeader struct {
version int
header interfaces.ExecutionData
}
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error {
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
@@ -264,35 +159,16 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
b := blks[0].Block()
// Retrieve incoming block's pre state.
parentRoot := b.ParentRoot()
if err := s.verifyBlkPreState(ctx, parentRoot); err != nil {
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
return err
}
preState, applied, err := s.getBatchPrestate(ctx, blks[0], envelopes)
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.ParentRoot())
if err != nil {
return err
}
if preState == nil || preState.IsNil() {
return fmt.Errorf("nil pre state for slot %d", b.Slot())
}
var eidx int
var br [32]byte
sigSet := bls.NewSet()
if applied {
eidx = 1
envSigSet, err := gloas.ExecutionPayloadEnvelopeSignatureBatch(preState, envelopes[0])
if err != nil {
return err
}
sigSet.Join(envSigSet)
}
if eidx < len(envelopes) {
env, err := envelopes[eidx].Envelope()
if err != nil {
return err
}
br = env.BeaconBlockRoot()
}
// Fill in missing blocks
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
@@ -301,6 +177,11 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
sigSet := bls.NewSet()
type versionAndHeader struct {
version int
header interfaces.ExecutionData
}
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
var set *bls.SignatureBatch
@@ -322,23 +203,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
if err != nil {
return invalidBlock{error: err}
}
if b.Root() == br && eidx < len(envelopes) {
envSigSet, err := gloas.ProcessExecutionPayloadWithDeferredSig(ctx, preState, b.Block().StateRoot(), envelopes[eidx])
if err != nil {
return err
}
sigSet.Join(envSigSet)
eidx++
if eidx < len(envelopes) {
nextEnv, err := envelopes[eidx].Envelope()
if err != nil {
return err
}
br = nextEnv.BeaconBlockRoot()
} else {
br = [32]byte{}
}
}
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
boundaries[b.Root()] = preState.Copy()
@@ -370,9 +234,58 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return errors.New("batch block signature verification failed")
}
pendingNodes, isValidPayload, err := s.notifyEngineAndSaveData(ctx, blks, envelopes, avs, preVersionAndHeaders, postVersionAndHeaders, jCheckpoints, fCheckpoints)
if err != nil {
return err
// blocks have been verified, save them and call the engine
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
var isValidPayload bool
for i, b := range blks {
root := b.Root()
isValidPayload, err = s.notifyNewPayload(ctx,
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
// this call does not have the root in forkchoice yet.
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
preVersionAndHeaders[i].header, b); err != nil {
return err
}
}
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
}
args := &forkchoicetypes.BlockAndCheckpoints{
Block: b,
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i],
}
pendingNodes[i] = args
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
tracing.AnnotateError(span, err)
return err
}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: b.Block().Slot(),
Root: root[:],
}); err != nil {
tracing.AnnotateError(span, err)
return err
}
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
}
// Save boundary states that will be useful for forkchoice
for r, st := range boundaries {
@@ -387,15 +300,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return err
}
// Insert all nodes to forkchoice
if applied {
env, err := envelopes[0].Envelope()
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertPayload(env); err != nil {
return errors.Wrap(err, "could not insert first payload in batch to forkchoice")
}
}
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
return errors.Wrap(err, "could not insert batch to forkchoice")
}
@@ -408,102 +312,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
func (s *Service) notifyEngineAndSaveData(
ctx context.Context,
blks []consensusblocks.ROBlock,
envelopes []interfaces.ROSignedExecutionPayloadEnvelope,
avs das.AvailabilityChecker,
preVersionAndHeaders []*versionAndHeader,
postVersionAndHeaders []*versionAndHeader,
jCheckpoints []*ethpb.Checkpoint,
fCheckpoints []*ethpb.Checkpoint,
) ([]*forkchoicetypes.BlockAndCheckpoints, bool, error) {
span := trace.FromContext(ctx)
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
var isValidPayload bool
var err error
envMap := make(map[[32]byte]int, len(envelopes))
for i, e := range envelopes {
env, err := e.Envelope()
if err != nil {
return nil, false, err
}
envMap[env.BeaconBlockRoot()] = i
}
for i, b := range blks {
root := b.Root()
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
if b.Version() < version.Gloas {
isValidPayload, err = s.notifyNewPayload(ctx,
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
return nil, false, s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
preVersionAndHeaders[i].header, b); err != nil {
return nil, false, err
}
}
} else {
idx, ok := envMap[root]
if ok {
env, err := envelopes[idx].Envelope()
if err != nil {
return nil, false, err
}
isValidPayload, err = s.notifyNewEnvelopeFromBlock(ctx, b, env)
if err != nil {
return nil, false, errors.Wrap(err, "could not notify new envelope from block")
}
args.HasPayload = true
bh := env.BlockHash()
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: b.Block().Slot(),
Root: bh[:],
}); err != nil {
tracing.AnnotateError(span, err)
return nil, false, err
}
}
}
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
return nil, false, errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
}
pendingNodes[i] = args
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
tracing.AnnotateError(span, err)
return nil, false, err
}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: b.Block().Slot(),
Root: root[:],
}); err != nil {
tracing.AnnotateError(span, err)
return nil, false, err
}
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return nil, false, err
}
}
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return nil, false, err
}
}
}
return pendingNodes, isValidPayload, nil
}
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
blockVersion := roBlock.Version()
block := roBlock.Block()
@@ -686,7 +494,7 @@ func (s *Service) handleBlockPayloadAttestations(ctx context.Context, blk interf
if len(atts) == 0 {
return nil
}
committee, err := st.PayloadCommitteeReadOnly(blk.Slot() - 1)
committee, err := gloas.PayloadCommittee(ctx, st, blk.Slot()-1)
if err != nil {
return err
}

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"slices"
@@ -23,7 +22,6 @@ import (
mathutil "github.com/OffchainLabs/prysm/v7/math"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
@@ -389,7 +387,6 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
return err
}
root := signed.Block().ParentRoot()
child := signed
// As long as parent node is not in fork choice store, and parent node is in DB.
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
b, err := s.getBlock(ctx, root)
@@ -403,33 +400,10 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
if err != nil {
return err
}
hasPayload := false
if roblock.Version() >= version.Gloas {
sbid, err := child.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return errors.Wrapf(err, "could not get execution payload bid for block at slot %d", child.Block().Slot())
}
if sbid == nil || sbid.Message == nil {
return fmt.Errorf("missing execution payload bid for block at slot %d", child.Block().Slot())
}
parentBid, err := b.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return errors.Wrapf(err, "could not get execution payload bid for block at slot %d", b.Block().Slot())
}
if parentBid == nil || parentBid.Message == nil {
return fmt.Errorf("missing execution payload bid for block at slot %d", b.Block().Slot())
}
if bytes.Equal(sbid.Message.ParentBlockHash, parentBid.Message.BlockHash) {
hasPayload = true
}
}
root = b.Block().ParentRoot()
child = b
args := &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: jCheckpoint,
FinalizedCheckpoint: fCheckpoint,
HasPayload: hasPayload,
}
FinalizedCheckpoint: fCheckpoint}
pendingNodes = append(pendingNodes, args)
}
if len(pendingNodes) == 0 {

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
@@ -163,7 +164,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
require.NoError(t, err)
blks = append(blks, rwsb)
}
err := service.onBlockBatch(ctx, blks, nil, &das.MockAvailabilityStore{})
err := service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{})
require.NoError(t, err)
jcp := service.CurrentJustifiedCheckpt()
jroot := bytesutil.ToBytes32(jcp.Root)
@@ -193,7 +194,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb))
blks = append(blks, rwsb)
}
require.NoError(t, service.onBlockBatch(ctx, blks, nil, &das.MockAvailabilityStore{}))
require.NoError(t, service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{}))
}
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
@@ -2073,7 +2074,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
rwsb, err := consensusblocks.NewROBlock(wsb)
require.NoError(t, err)
// We use onBlockBatch here because the valid chain is missing in forkchoice
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, nil, &das.MockAvailabilityStore{}))
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, &das.MockAvailabilityStore{}))
// Check that the head is now VALID and the node is not optimistic
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
headRoot, err = service.HeadRoot(ctx)
@@ -3555,7 +3556,7 @@ func TestHandleBlockPayloadAttestations(t *testing.T) {
base, insertBlk := testGloasState(t, 1, parentRoot, blockHash)
insertGloasBlock(t, s, base, insertBlk, blockRoot)
ptc, err := headState.PayloadCommitteeReadOnly(1)
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
require.NoError(t, err)
require.NotEqual(t, 0, len(ptc))

View File

@@ -41,7 +41,7 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
BlockBeingSynced([32]byte) bool
@@ -373,7 +373,7 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()
@@ -381,7 +381,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the incoming newly received block batches, one by one.
if err := s.onBlockBatch(ctx, blocks, envelopes, avs); err != nil {
if err := s.onBlockBatch(ctx, blocks, avs); err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
return err
@@ -421,15 +421,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
for _, e := range envelopes {
protoEnv, ok := e.Proto().(*ethpb.SignedExecutionPayloadEnvelope)
if !ok {
return errors.New("could not type assert signed envelope to proto")
}
if err := s.cfg.BeaconDB.SaveExecutionPayloadEnvelope(ctx, protoEnv); err != nil {
return errors.Wrap(err, "could not save execution payload envelope")
}
}
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
if finalized == nil {
return errNilFinalizedInStore

View File

@@ -281,7 +281,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
require.NoError(t, err)
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, nil, &das.MockAvailabilityStore{})
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, &das.MockAvailabilityStore{})
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {

View File

@@ -12,7 +12,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -203,50 +202,6 @@ func (s *Service) getPayloadEnvelopePrestate(ctx context.Context, envelope inter
return preState, nil
}
func (s *Service) callNewPayload(
ctx context.Context,
payload interfaces.ExecutionData,
versionedHashes []common.Hash,
parentRoot common.Hash,
requests *enginev1.ExecutionRequests,
slot primitives.Slot,
) (bool, error) {
_, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &parentRoot, requests, slot)
if err == nil {
return true, nil
}
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
log.WithFields(logrus.Fields{
"slot": slot,
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic envelope")
return false, nil
}
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
return false, invalidBlock{error: ErrInvalidPayload}
}
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
func (s *Service) notifyNewEnvelopeFromBlock(ctx context.Context, b blocks.ROBlock, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewEnvelopeFromBlock")
defer span.End()
payload, err := envelope.Execution()
if err != nil {
return false, errors.Wrap(err, "could not get execution payload from envelope")
}
sbid, err := b.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return false, errors.Wrap(err, "could not get signed execution payload bid from block")
}
versionedHashes := make([]common.Hash, len(sbid.Message.BlobKzgCommitments))
for i, c := range sbid.Message.BlobKzgCommitments {
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(c)
}
return s.callNewPayload(ctx, payload, versionedHashes, common.Hash(b.Block().ParentRoot()), envelope.ExecutionRequests(), envelope.Slot())
}
// The returned boolean indicates whether the payload was valid or if it was accepted as syncing (optimistic).
func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewEnvelope")
@@ -256,6 +211,7 @@ func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, e
if err != nil {
return false, errors.Wrap(err, "could not get execution payload from envelope")
}
latestBid, err := st.LatestExecutionPayloadBid()
if err != nil {
return false, errors.Wrap(err, "could not get latest execution payload bid")
@@ -265,7 +221,25 @@ func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, e
for i, c := range commitments {
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(c)
}
return s.callNewPayload(ctx, payload, versionedHashes, common.Hash(bytesutil.ToBytes32(st.LatestBlockHeader().ParentRoot)), envelope.ExecutionRequests(), envelope.Slot())
parentRoot := common.Hash(bytesutil.ToBytes32(st.LatestBlockHeader().ParentRoot))
requests := envelope.ExecutionRequests()
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &parentRoot, requests)
if err == nil {
return true, nil
}
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
log.WithFields(logrus.Fields{
"slot": envelope.Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic envelope")
return false, nil
}
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
return false, invalidBlock{error: ErrInvalidPayload}
}
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
func (s *Service) validateExecutionOnEnvelope(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {

View File

@@ -3,6 +3,7 @@ package blockchain
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
mockExecution "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -50,7 +51,7 @@ func TestReceivePayloadAttestationMessage_ValidatorNotInPTC(t *testing.T) {
require.NoError(t, err)
s.head = &head{root: blockRoot, block: wsb, state: headState, slot: 1}
ptc, err := headState.PayloadCommitteeReadOnly(1)
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
require.NoError(t, err)
// Pick a validator index not in the PTC.
@@ -99,7 +100,7 @@ func TestReceivePayloadAttestationMessage_OK(t *testing.T) {
require.NoError(t, err)
s.head = &head{root: blockRoot, block: wsb, state: headState, slot: 1}
ptc, err := headState.PayloadCommitteeReadOnly(1)
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
require.NoError(t, err)
require.NotEqual(t, 0, len(ptc))

View File

@@ -73,30 +73,29 @@ type Service struct {
// config options for the service.
type config struct {
BeaconBlockBuf int
ChainStartFetcher execution.ChainStartFetcher
BeaconDB db.HeadAccessDatabase
DepositCache cache.DepositCache
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
ProposerPreferencesCache *cache.ProposerPreferencesCache
AttestationCache *cache.AttestationCache
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
P2P p2p.Accessor
MaxRoutines int
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
StateGen *stategen.State
SlasherAttestationsFeed *event.Feed
WeakSubjectivityCheckpt *ethpb.Checkpoint
BlockFetcher execution.POWBlockFetcher
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller execution.EngineCaller
SyncChecker Checker
BeaconBlockBuf int
ChainStartFetcher execution.ChainStartFetcher
BeaconDB db.HeadAccessDatabase
DepositCache cache.DepositCache
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
AttestationCache *cache.AttestationCache
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
P2P p2p.Accessor
MaxRoutines int
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
StateGen *stategen.State
SlasherAttestationsFeed *event.Feed
WeakSubjectivityCheckpt *ethpb.Checkpoint
BlockFetcher execution.POWBlockFetcher
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller execution.EngineCaller
SyncChecker Checker
}
// Checker is an interface used to determine if a node is in initial sync

View File

@@ -12,7 +12,6 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
@@ -78,12 +77,8 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
return nil
}
resolveChainPayloadStatus(chain)
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.markFinalizedRootFull(chain, fRoot); err != nil {
log.WithError(err).Error("Could not mark finalized root as full in forkchoice")
}
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
}
@@ -150,68 +145,6 @@ func (s *Service) setupForkchoiceRoot(st state.BeaconState) error {
return nil
}
// resolveChainPayloadStatus determines which blocks in the chain had their
// execution payloads delivered by checking if consecutive blocks' bids indicate
// payload delivery. For each pair of blocks (chain[i], chain[i+1]), if the next
// block's bid parentBlockHash equals the current block's bid blockHash, the
// current block's payload was delivered.
func resolveChainPayloadStatus(chain []*forkchoicetypes.BlockAndCheckpoints) {
for i := 0; i < len(chain)-1; i++ {
curr := chain[i].Block.Block()
next := chain[i+1].Block.Block()
if curr.Version() < version.Gloas || next.Version() < version.Gloas {
continue
}
currBid, err := curr.Body().SignedExecutionPayloadBid()
if err != nil || currBid == nil || currBid.Message == nil {
continue
}
nextBid, err := next.Body().SignedExecutionPayloadBid()
if err != nil || nextBid == nil || nextBid.Message == nil {
continue
}
if bytes.Equal(nextBid.Message.ParentBlockHash, currBid.Message.BlockHash) {
chain[i].HasPayload = true
}
}
}
// markFinalizedRootFull checks whether the finalized root block's execution
// payload was delivered by inspecting the first block in the chain. If the first
// block's bid parentBlockHash equals the finalized block's bid blockHash, the
// finalized block's payload was delivered and a full node must be created in
// forkchoice. The caller must hold the forkchoice lock.
func (s *Service) markFinalizedRootFull(chain []*forkchoicetypes.BlockAndCheckpoints, fRoot [32]byte) error {
if len(chain) == 0 {
return nil
}
firstBlock := chain[0].Block.Block()
if firstBlock.Version() < version.Gloas {
return nil
}
firstBid, err := firstBlock.Body().SignedExecutionPayloadBid()
if err != nil || firstBid == nil || firstBid.Message == nil {
return nil
}
fBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block")
}
if fBlock.Block().Version() < version.Gloas {
return nil
}
fBid, err := fBlock.Block().Body().SignedExecutionPayloadBid()
if err != nil || fBid == nil || fBid.Message == nil {
return nil
}
if !bytes.Equal(firstBid.Message.ParentBlockHash, fBid.Message.BlockHash) {
return nil
}
// The finalized block's payload was delivered. Create the full node.
s.cfg.ForkChoiceStore.MarkFullNode(fRoot)
return nil
}
func (s *Service) setupForkchoiceCheckpoints() error {
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {

View File

@@ -94,11 +94,6 @@ func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []bl
return nil
}
func (mb *mockBroadcaster) BroadcastForEpoch(_ context.Context, _ proto.Message, _ primitives.Epoch) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}

View File

@@ -77,7 +77,6 @@ type ChainService struct {
DataColumns []blocks.VerifiedRODataColumn
TargetRoot [32]byte
MockHeadSlot *primitives.Slot
DependentRootCB func([32]byte, primitives.Epoch) ([32]byte, error)
MockCanonicalRoots map[primitives.Slot][32]byte
MockCanonicalFull map[primitives.Slot]bool
MockPayloadContentLookup map[[32]byte][32]byte
@@ -282,7 +281,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ []interfaces.ROSignedExecutionPayloadEnvelope, _ das.AvailabilityChecker) error {
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
if s.State == nil {
return ErrNilState
}
@@ -758,11 +757,6 @@ func (s *ChainService) HasFullNode(root [32]byte) bool {
return false
}
// ShouldIgnoreData returns true if the data for the given parent root and slot should be ignored.
func (s *ChainService) ShouldIgnoreData(_ [32]byte, _ primitives.Slot) bool {
return false
}
// PayloadContentLookup mocks the same method in the chain service.
func (s *ChainService) PayloadContentLookup(root [32]byte) ([32]byte, bool) {
if s.ForkChoiceStore != nil {
@@ -868,10 +862,7 @@ func (s *ChainService) ParentPayloadReady(_ interfaces.ReadOnlyBeaconBlock) bool
}
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
if c.DependentRootCB != nil {
return c.DependentRootCB(root, epoch)
}
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil
}

View File

@@ -8,35 +8,11 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
// proposerPreference returns a TrackedValidator from the ProposerPreferencesCache
// if a preference exists for the given slot.
func (s *Service) proposerPreference(slot primitives.Slot) (cache.TrackedValidator, bool) {
if s.cfg.ProposerPreferencesCache == nil {
return cache.TrackedValidator{}, false
}
pref, ok := s.cfg.ProposerPreferencesCache.Get(slot)
if !ok {
return cache.TrackedValidator{}, false
}
var feeRecipient primitives.ExecutionAddress
copy(feeRecipient[:], pref.FeeRecipient)
return cache.TrackedValidator{Active: true, FeeRecipient: feeRecipient, GasLimit: pref.GasLimit}, true
}
// trackedProposer returns whether the beacon node was informed, via the
// validators/prepare_proposer endpoint, of the proposer at the given slot.
// It only returns true if the tracked proposer is present and active.
//
// When PrepareAllPayloads is enabled, the node prepares payloads for every
// slot. After the Gloas fork, proposers broadcast their preferences (fee
// recipient, gas limit) via gossip into the ProposerPreferencesCache. When
// available, these preferences supply the fee recipient; otherwise the
// default (burn address) is used.
func (s *Service) trackedProposer(st state.ReadOnlyBeaconState, slot primitives.Slot) (cache.TrackedValidator, bool) {
if features.Get().PrepareAllPayloads {
if val, ok := s.proposerPreference(slot); ok {
return val, true
}
return cache.TrackedValidator{Active: true}, true
}
id, err := helpers.BeaconProposerIndexAtSlot(s.ctx, st, slot)
@@ -47,8 +23,5 @@ func (s *Service) trackedProposer(st state.ReadOnlyBeaconState, slot primitives.
if !ok {
return cache.TrackedValidator{}, false
}
if pref, ok := s.proposerPreference(slot); ok {
return pref, true
}
return val, val.Active
}

View File

@@ -1,83 +0,0 @@
package blockchain
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/ethereum/go-ethereum/common"
)
func TestTrackedProposer_NotTracked(t *testing.T) {
service, _ := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
_, ok := service.trackedProposer(st, 0)
require.Equal(t, false, ok)
}
func TestTrackedProposer_Tracked(t *testing.T) {
service, _ := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
addr := common.HexToAddress("0x1234")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(addr), Index: 0})
val, ok := service.trackedProposer(st, 0)
require.Equal(t, true, ok)
require.Equal(t, primitives.ExecutionAddress(addr), val.FeeRecipient)
}
func TestTrackedProposer_PrepareAllPayloads_Default(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{PrepareAllPayloads: true})
defer resetCfg()
service, _ := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
val, ok := service.trackedProposer(st, 0)
require.Equal(t, true, ok)
require.Equal(t, true, val.Active)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(val.FeeRecipient[:]).String())
}
func TestTrackedProposer_PrepareAllPayloads_WithProposerPreference(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{PrepareAllPayloads: true})
defer resetCfg()
prefCache := cache.NewProposerPreferencesCache()
service, _ := minimalTestService(t,
WithPayloadIDCache(cache.NewPayloadIDCache()),
WithProposerPreferencesCache(prefCache),
)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
addr := common.HexToAddress("0xabcd")
prefCache.Add(0, addr.Bytes(), 42_000_000)
val, ok := service.trackedProposer(st, 0)
require.Equal(t, true, ok)
require.Equal(t, true, val.Active)
require.Equal(t, primitives.ExecutionAddress(addr), val.FeeRecipient)
require.Equal(t, uint64(42_000_000), val.GasLimit)
}
func TestTrackedProposer_TrackedWithProposerPreferenceOverride(t *testing.T) {
prefCache := cache.NewProposerPreferencesCache()
service, _ := minimalTestService(t,
WithPayloadIDCache(cache.NewPayloadIDCache()),
WithProposerPreferencesCache(prefCache),
)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
trackedAddr := common.HexToAddress("0x1111")
prefAddr := common.HexToAddress("0x2222")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(trackedAddr), Index: 0})
prefCache.Add(0, prefAddr.Bytes(), 50_000_000)
val, ok := service.trackedProposer(st, 0)
require.Equal(t, true, ok)
// Proposer preference overrides tracked validator.
require.Equal(t, primitives.ExecutionAddress(prefAddr), val.FeeRecipient)
require.Equal(t, uint64(50_000_000), val.GasLimit)
}

View File

@@ -21,7 +21,6 @@ type (
Active bool
FeeRecipient primitives.ExecutionAddress
Index primitives.ValidatorIndex
GasLimit uint64
}
TrackedValidatorsCache struct {

View File

@@ -192,47 +192,11 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
Block: electraGenesisBlock(root),
Signature: params.BeaconConfig().EmptySignature[:],
})
case *ethpb.BeaconStateGloas:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockGloas{
Block: gloasGenesisBlock(root),
Signature: params.BeaconConfig().EmptySignature[:],
})
default:
return nil, ErrUnrecognizedState
}
}
func gloasGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockGloas {
return &ethpb.BeaconBlockGloas{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyGloas{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
SignedExecutionPayloadBid: &ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: make([][]byte, 0),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
PayloadAttestations: make([]*ethpb.PayloadAttestation, 0),
},
}
}
func electraGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockElectra {
return &ethpb.BeaconBlockElectra{
ParentRoot: params.BeaconConfig().ZeroHash[:],

View File

@@ -51,10 +51,8 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
if effectiveBal != val.EffectiveBalance() {
newVal = val.Copy()
newVal.EffectiveBalance = effectiveBal
}
newVal = val.Copy()
newVal.EffectiveBalance = effectiveBal
}
return newVal, nil
}

View File

@@ -35,7 +35,6 @@ go_library(
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",

View File

@@ -29,7 +29,7 @@ func processDepositRequests(ctx context.Context, beaconState state.BeaconState,
// processDepositRequest processes the specific deposit request
//
// <spec fn="process_deposit_request" fork="gloas" hash="0e8b94ab">
// <spec fn="process_deposit_request" fork="gloas" hash="3c6b0310">
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
// # [New in Gloas:EIP7732]
// builder_pubkeys = [b.pubkey for b in state.builders]
@@ -40,11 +40,8 @@ func processDepositRequests(ctx context.Context, beaconState state.BeaconState,
// # already exists with this pubkey, apply the deposit to their balance
// is_builder = deposit_request.pubkey in builder_pubkeys
// is_validator = deposit_request.pubkey in validator_pubkeys
// if is_builder or (
// is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
// and not is_validator
// and not is_pending_validator(state, deposit_request.pubkey)
// ):
// is_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
// if is_builder or (is_builder_prefix and not is_validator):
// # Apply builder deposits immediately
// apply_deposit_for_builder(
// state,
@@ -68,27 +65,37 @@ func processDepositRequests(ctx context.Context, beaconState state.BeaconState,
// )
// </spec>
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) error {
var err error
defer func() {
if err == nil {
builderDepositsProcessedTotal.Inc()
}
}()
if request == nil {
return errors.New("nil deposit request")
err = errors.New("nil deposit request")
return err
}
applied, err := applyBuilderDepositRequest(beaconState, request)
var applied bool
applied, err = applyBuilderDepositRequest(beaconState, request)
if err != nil {
return errors.Wrap(err, "could not apply builder deposit")
err = errors.Wrap(err, "could not apply builder deposit")
return err
}
if applied {
builderDepositsProcessedTotal.Inc()
return nil
}
if err := beaconState.AppendPendingDeposit(&ethpb.PendingDeposit{
if err = beaconState.AppendPendingDeposit(&ethpb.PendingDeposit{
PublicKey: request.Pubkey,
WithdrawalCredentials: request.WithdrawalCredentials,
Amount: request.Amount,
Signature: request.Signature,
Slot: beaconState.Slot(),
}); err != nil {
return errors.Wrap(err, "could not append deposit request")
err = errors.Wrap(err, "could not append deposit request")
return err
}
return nil
}
@@ -122,7 +129,13 @@ func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1
}
pubkey := bytesutil.ToBytes48(request.Pubkey)
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
idx, isBuilder := beaconState.BuilderIndexByPubkey(pubkey)
isBuilderPrefix := helpers.IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
if !isBuilder && (!isBuilderPrefix || isValidator) {
return false, nil
}
if isBuilder {
if err := beaconState.IncreaseBuilderBalance(idx, request.Amount); err != nil {
return false, err
@@ -130,20 +143,6 @@ func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1
return true, nil
}
isBuilderPrefix := helpers.IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
if !isBuilderPrefix || isValidator {
return false, nil
}
isPending, err := beaconState.IsPendingValidator(request.Pubkey)
if err != nil {
return false, err
}
if isPending {
return false, nil
}
if err := applyDepositForNewBuilder(
beaconState,
request.Pubkey,

View File

@@ -91,33 +91,6 @@ func TestProcessDepositRequest_ExistingBuilderIncreasesBalance(t *testing.T) {
require.Equal(t, 0, len(pending))
}
func TestProcessDepositRequest_BuilderDepositWithExistingPendingDepositStaysPending(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
validatorCred := validatorWithdrawalCredentials()
builderCred := builderWithdrawalCredentials()
existingPending := stateTesting.GeneratePendingDeposit(t, sk, 1234, validatorCred, 0)
req := depositRequestFromPending(stateTesting.GeneratePendingDeposit(t, sk, 200, builderCred, 1), 9)
st := newGloasState(t, nil, nil)
require.NoError(t, st.SetPendingDeposits([]*ethpb.PendingDeposit{existingPending}))
err = processDepositRequest(st, req)
require.NoError(t, err)
_, ok := st.BuilderIndexByPubkey(toBytes48(req.Pubkey))
require.Equal(t, false, ok)
pending, err := st.PendingDeposits()
require.NoError(t, err)
require.Equal(t, 2, len(pending))
require.DeepEqual(t, existingPending.PublicKey, pending[0].PublicKey)
require.DeepEqual(t, req.Pubkey, pending[1].PublicKey)
require.DeepEqual(t, req.WithdrawalCredentials, pending[1].WithdrawalCredentials)
require.Equal(t, req.Amount, pending[1].Amount)
}
func TestApplyDepositForBuilder_InvalidSignatureIgnoresDeposit(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)

View File

@@ -17,8 +17,7 @@ import (
"github.com/pkg/errors"
)
// ProcessExecutionPayload is the gossip entry point: verify signature, validate
// consistency, apply state mutations, and verify the post-payload state root.
// ProcessExecutionPayload processes the signed execution payload envelope for the Gloas fork.
//
// <spec fn="process_execution_payload" fork="gloas" hash="36bd3af3">
// def process_execution_payload(
@@ -109,7 +108,7 @@ func ProcessExecutionPayload(
st state.BeaconState,
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
) error {
if err := verifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
if err := VerifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
return errors.Wrap(err, "signature verification failed")
}
@@ -118,132 +117,29 @@ func ProcessExecutionPayload(
return errors.Wrap(err, "could not get envelope from signed envelope")
}
if err := cacheLatestBlockHeaderStateRoot(ctx, st); err != nil {
if err := ApplyExecutionPayload(ctx, st, envelope); err != nil {
return err
}
if err := validatePayloadConsistency(st, envelope); err != nil {
return err
r, err := st.HashTreeRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get hash tree root")
}
if err := applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelope.BlockHash()); err != nil {
return err
if r != envelope.StateRoot() {
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
}
return verifyPostStateRoot(ctx, st, envelope)
return nil
}
// ProcessExecutionPayloadWithDeferredSig is the init-sync entry point: extract the
// signature for deferred verification, validate consistency, apply state
// mutations, and verify the post-payload state root. The caller provides the
// previousStateRoot to avoid recomputing it.
func ProcessExecutionPayloadWithDeferredSig(
ctx context.Context,
st state.BeaconState,
previousStateRoot [32]byte,
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
) (*bls.SignatureBatch, error) {
sigBatch, err := ExecutionPayloadEnvelopeSignatureBatch(st, signedEnvelope)
if err != nil {
return nil, errors.Wrap(err, "could not extract envelope signature batch")
}
envelope, err := signedEnvelope.Envelope()
if err != nil {
return nil, errors.Wrap(err, "could not get envelope from signed envelope")
}
if err := setLatestBlockHeaderStateRoot(st, previousStateRoot); err != nil {
return nil, errors.Wrap(err, "could not set latest block header state root")
}
if err := validatePayloadConsistency(st, envelope); err != nil {
return nil, err
}
if err := applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelope.BlockHash()); err != nil {
return nil, err
}
if err := verifyPostStateRoot(ctx, st, envelope); err != nil {
return nil, err
}
return sigBatch, nil
}
// ProcessBlindedExecutionPayload is the replay/stategen entry
// point: patch the block header, do minimal bid consistency checks, and apply
// state mutations. No payload data is available — only the blinded envelope.
// A nil envelope is a no-op (the payload was not delivered for that slot).
func ProcessBlindedExecutionPayload(
ctx context.Context,
st state.BeaconState,
previousStateRoot [32]byte,
envelope interfaces.ROBlindedExecutionPayloadEnvelope,
) error {
if envelope == nil {
return nil
}
if err := setLatestBlockHeaderStateRoot(st, previousStateRoot); err != nil {
return errors.Wrap(err, "could not set latest block header state root")
}
if envelope.Slot() != st.Slot() {
return errors.Errorf("blinded envelope slot does not match state slot: envelope=%d, state=%d", envelope.Slot(), st.Slot())
}
latestBid, err := st.LatestExecutionPayloadBid()
if err != nil {
return errors.Wrap(err, "could not get latest execution payload bid")
}
if latestBid == nil {
return errors.New("latest execution payload bid is nil")
}
if envelope.BuilderIndex() != latestBid.BuilderIndex() {
return errors.Errorf(
"blinded envelope builder index does not match committed bid builder index: envelope=%d, bid=%d",
envelope.BuilderIndex(),
latestBid.BuilderIndex(),
)
}
bidBlockHash := latestBid.BlockHash()
envelopeBlockHash := envelope.BlockHash()
if bidBlockHash != envelopeBlockHash {
return errors.Errorf(
"blinded envelope block hash does not match committed bid block hash: envelope=%#x, bid=%#x",
envelopeBlockHash,
bidBlockHash,
)
}
return applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelopeBlockHash)
}
// ApplyExecutionPayload patches the block header state root, validates
// consistency, and applies state mutations. No signature or post-state-root
// verification is performed. Used by the proposer path to compute the
// post-payload state root for the envelope.
// ApplyExecutionPayload applies the execution payload envelope to the state and performs the same
// consistency checks as the full processing path. This keeps the post-payload state root computation
// on a shared code path, even though some bid/payload checks are not strictly required for the root itself.
func ApplyExecutionPayload(
ctx context.Context,
st state.BeaconState,
envelope interfaces.ROExecutionPayloadEnvelope,
) error {
if err := cacheLatestBlockHeaderStateRoot(ctx, st); err != nil {
return err
}
if err := validatePayloadConsistency(st, envelope); err != nil {
return err
}
return applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelope.BlockHash())
}
func setLatestBlockHeaderStateRoot(st state.BeaconState, root [32]byte) error {
latestHeader := st.LatestBlockHeader()
latestHeader.StateRoot = root[:]
return st.SetLatestBlockHeader(latestHeader)
}
// cacheLatestBlockHeaderStateRoot fills in the state root on the latest block
// header if it hasn't been set yet (the spec's "cache latest block header
// state root" step).
func cacheLatestBlockHeaderStateRoot(ctx context.Context, st state.BeaconState) error {
latestHeader := st.LatestBlockHeader()
if len(latestHeader.StateRoot) == 0 || bytes.Equal(latestHeader.StateRoot, make([]byte, 32)) {
previousStateRoot, err := st.HashTreeRoot(ctx)
@@ -255,13 +151,7 @@ func cacheLatestBlockHeaderStateRoot(ctx context.Context, st state.BeaconState)
return errors.Wrap(err, "could not set latest block header")
}
}
return nil
}
// validatePayloadConsistency checks that the envelope and payload are consistent
// with the beacon block header, the committed bid, and the current state.
func validatePayloadConsistency(st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) error {
latestHeader := st.LatestBlockHeader()
blockHeaderRoot, err := latestHeader.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute block header root")
@@ -300,6 +190,7 @@ func validatePayloadConsistency(st state.BeaconState, envelope interfaces.ROExec
if err != nil {
return errors.Wrap(err, "could not get withdrawals from payload")
}
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
if err != nil {
return errors.Wrap(err, "could not validate payload withdrawals")
@@ -334,26 +225,14 @@ func validatePayloadConsistency(st state.BeaconState, envelope interfaces.ROExec
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
}
if err := ApplyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), [32]byte(payload.BlockHash())); err != nil {
return err
}
return nil
}
// verifyPostStateRoot checks that the post-payload state root matches the
// envelope's declared state root.
func verifyPostStateRoot(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) error {
r, err := st.HashTreeRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not compute post-envelope state root")
}
if r != envelope.StateRoot() {
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
}
return nil
}
// applyExecutionPayloadStateMutations applies the state-changing operations
// from an execution payload: process execution requests, queue builder payment,
// set execution payload availability, and update the latest block hash.
func applyExecutionPayloadStateMutations(
func ApplyExecutionPayloadStateMutations(
ctx context.Context,
st state.BeaconState,
executionRequests *enginev1.ExecutionRequests,
@@ -378,107 +257,6 @@ func applyExecutionPayloadStateMutations(
return nil
}
// ExecutionPayloadEnvelopeSignatureBatch extracts the BLS signature from a signed execution payload
// envelope as a SignatureBatch for deferred batch verification.
func ExecutionPayloadEnvelopeSignatureBatch(
st state.BeaconState,
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
) (*bls.SignatureBatch, error) {
envelope, err := signedEnvelope.Envelope()
if err != nil {
return nil, fmt.Errorf("failed to get envelope: %w", err)
}
builderIdx := envelope.BuilderIndex()
publicKey, err := envelopePublicKey(st, builderIdx)
if err != nil {
return nil, err
}
currentEpoch := slots.ToEpoch(envelope.Slot())
domain, err := signing.Domain(
st.Fork(),
currentEpoch,
params.BeaconConfig().DomainBeaconBuilder,
st.GenesisValidatorsRoot(),
)
if err != nil {
return nil, fmt.Errorf("failed to compute signing domain: %w", err)
}
signingRoot, err := signedEnvelope.SigningRoot(domain)
if err != nil {
return nil, fmt.Errorf("failed to compute signing root: %w", err)
}
signatureBytes := signedEnvelope.Signature()
return &bls.SignatureBatch{
Signatures: [][]byte{signatureBytes[:]},
PublicKeys: []bls.PublicKey{publicKey},
Messages: [][32]byte{signingRoot},
Descriptions: []string{"execution payload envelope signature"},
}, nil
}
// verifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
//
// <spec fn="verify_execution_payload_envelope_signature" fork="gloas" style="full" hash="49483ae2">
// def verify_execution_payload_envelope_signature(
// state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope
// ) -> bool:
// builder_index = signed_envelope.message.builder_index
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// validator_index = state.latest_block_header.proposer_index
// pubkey = state.validators[validator_index].pubkey
// else:
// pubkey = state.builders[builder_index].pubkey
//
// signing_root = compute_signing_root(
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
// )
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
// </spec>
func verifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
envelope, err := signedEnvelope.Envelope()
if err != nil {
return fmt.Errorf("failed to get envelope: %w", err)
}
builderIdx := envelope.BuilderIndex()
publicKey, err := envelopePublicKey(st, builderIdx)
if err != nil {
return err
}
signatureBytes := signedEnvelope.Signature()
signature, err := bls.SignatureFromBytes(signatureBytes[:])
if err != nil {
return fmt.Errorf("invalid signature format: %w", err)
}
currentEpoch := slots.ToEpoch(envelope.Slot())
domain, err := signing.Domain(
st.Fork(),
currentEpoch,
params.BeaconConfig().DomainBeaconBuilder,
st.GenesisValidatorsRoot(),
)
if err != nil {
return fmt.Errorf("failed to compute signing domain: %w", err)
}
signingRoot, err := signedEnvelope.SigningRoot(domain)
if err != nil {
return fmt.Errorf("failed to compute signing root: %w", err)
}
if !signature.Verify(publicKey, signingRoot[:]) {
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
}
return nil
}
func envelopePublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
if builderIdx == params.BeaconConfig().BuilderIndexSelfBuild {
return proposerPublicKey(st)
@@ -515,6 +293,10 @@ func builderPublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex)
}
// processExecutionRequests processes deposits, withdrawals, and consolidations from execution requests.
// Spec v1.7.0-alpha.0 (pseudocode):
// for op in requests.deposits: process_deposit_request(state, op)
// for op in requests.withdrawals: process_withdrawal_request(state, op)
// for op in requests.consolidations: process_consolidation_request(state, op)
func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *enginev1.ExecutionRequests) error {
if err := processDepositRequests(ctx, st, rqs.Deposits); err != nil {
return errors.Wrap(err, "could not process deposit requests")
@@ -531,3 +313,65 @@ func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *en
}
return nil
}
// VerifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
// <spec fn="verify_execution_payload_envelope_signature" fork="gloas" style="full" hash="49483ae2">
// def verify_execution_payload_envelope_signature(
//
// state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope
//
// ) -> bool:
//
// builder_index = signed_envelope.message.builder_index
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// validator_index = state.latest_block_header.proposer_index
// pubkey = state.validators[validator_index].pubkey
// else:
// pubkey = state.builders[builder_index].pubkey
//
// signing_root = compute_signing_root(
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
// )
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
//
// </spec>
func VerifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
envelope, err := signedEnvelope.Envelope()
if err != nil {
return fmt.Errorf("failed to get envelope: %w", err)
}
builderIdx := envelope.BuilderIndex()
publicKey, err := envelopePublicKey(st, builderIdx)
if err != nil {
return err
}
signatureBytes := signedEnvelope.Signature()
signature, err := bls.SignatureFromBytes(signatureBytes[:])
if err != nil {
return fmt.Errorf("invalid signature format: %w", err)
}
currentEpoch := slots.ToEpoch(envelope.Slot())
domain, err := signing.Domain(
st.Fork(),
currentEpoch,
params.BeaconConfig().DomainBeaconBuilder,
st.GenesisValidatorsRoot(),
)
if err != nil {
return fmt.Errorf("failed to compute signing domain: %w", err)
}
signingRoot, err := signedEnvelope.SigningRoot(domain)
if err != nil {
return fmt.Errorf("failed to compute signing root: %w", err)
}
if !signature.Verify(publicKey, signingRoot[:]) {
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
}
return nil
}

View File

@@ -19,7 +19,6 @@ import (
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/crypto/hash"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
@@ -81,7 +80,7 @@ func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body
// indexedPayloadAttestation converts a payload attestation into its indexed form.
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
committee, err := st.PayloadCommitteeReadOnly(att.Data.Slot)
committee, err := PayloadCommittee(ctx, st, att.Data.Slot)
if err != nil {
return nil, err
}
@@ -100,10 +99,10 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
}, nil
}
// computePTC computes the payload timeliness committee for a given slot.
// PayloadCommittee returns the payload timeliness committee for a given slot for the state.
//
// <spec fn="compute_ptc" fork="gloas" hash="0f323552">
// def compute_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
// """
// Get the payload timeliness committee for the given ``slot``.
// """
@@ -119,7 +118,7 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
// )
// </spec>
func computePTC(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
epoch := slots.ToEpoch(slot)
seed, err := ptcSeed(st, epoch, slot)
if err != nil {
@@ -167,7 +166,7 @@ func PayloadCommitteeIndex(
slot primitives.Slot,
validatorIndex primitives.ValidatorIndex,
) (uint64, error) {
ptc, err := st.PayloadCommitteeReadOnly(slot)
ptc, err := PayloadCommittee(ctx, st, slot)
if err != nil {
return 0, err
}
@@ -343,43 +342,3 @@ func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus
}
return nil
}
// ProcessPTCWindow rotates the cached PTC window at epoch boundaries by computing
// PTC assignments for the new lookahead epoch and shifting the window.
//
// <spec fn="process_ptc_window" fork="gloas" hash="7be3d509">
// def process_ptc_window(state: BeaconState) -> None:
// """
// Update the cached PTC window.
// """
// # Shift all epochs forward by one
// state.ptc_window[: len(state.ptc_window) - SLOTS_PER_EPOCH] = state.ptc_window[SLOTS_PER_EPOCH:]
// # Fill in the last epoch
// next_epoch = Epoch(get_current_epoch(state) + MIN_SEED_LOOKAHEAD + 1)
// start_slot = compute_start_slot_at_epoch(next_epoch)
// state.ptc_window[len(state.ptc_window) - SLOTS_PER_EPOCH :] = [
// compute_ptc(state, Slot(slot)) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH)
// ]
// </spec>
func ProcessPTCWindow(ctx context.Context, st state.BeaconState) error {
_, span := trace.StartSpan(ctx, "gloas.ProcessPTCWindow")
defer span.End()
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
lastEpoch := slots.ToEpoch(st.Slot()) + params.BeaconConfig().MinSeedLookahead + 1
startSlot, err := slots.EpochStart(lastEpoch)
if err != nil {
return err
}
newSlots := make([]*eth.PTCs, slotsPerEpoch)
for i := range slotsPerEpoch {
ptc, err := computePTC(ctx, st, startSlot+primitives.Slot(i))
if err != nil {
return err
}
newSlots[i] = &eth.PTCs{ValidatorIndices: ptc}
}
return st.RotatePTCWindow(newSlots)
}

View File

@@ -2,14 +2,13 @@ package gloas_test
import (
"bytes"
"slices"
"testing"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
@@ -120,6 +119,7 @@ func TestProcessPayloadAttestations_EmptyAggregationBits(t *testing.T) {
}
func TestProcessPayloadAttestations_HappyPath(t *testing.T) {
helpers.ClearCache()
setupTestConfig(t)
sk1, pk1 := newKey(t)
@@ -150,6 +150,7 @@ func TestProcessPayloadAttestations_HappyPath(t *testing.T) {
}
func TestProcessPayloadAttestations_MultipleAttestations(t *testing.T) {
helpers.ClearCache()
setupTestConfig(t)
sk1, pk1 := newKey(t)
@@ -210,30 +211,12 @@ func TestProcessPayloadAttestations_IndexedVerificationError(t *testing.T) {
errIndex: 0,
}
err := gloas.ProcessPayloadAttestations(t.Context(), errState, body)
require.ErrorContains(t, "failed to verify indexed form", err)
require.ErrorContains(t, "failed to convert to indexed form", err)
require.ErrorContains(t, "failed to sample beacon committee 0", err)
require.ErrorContains(t, "validator 0", err)
}
func newTestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
t.Helper()
st, err := testutil.NewBeaconStateGloas(func(seed *eth.BeaconStateGloas) error {
seed.Slot = slot
seed.Validators = vals
seed.Balances = make([]uint64, len(vals))
for i, v := range vals {
seed.Balances[i] = v.EffectiveBalance
}
seed.PtcWindow = deterministicPTCWindow(len(vals))
return nil
})
require.NoError(t, err)
return st
}
func newPhase0TestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
t.Helper()
st, err := testutil.NewBeaconState()
require.NoError(t, err)
for _, v := range vals {
@@ -241,25 +224,10 @@ func newPhase0TestState(t *testing.T, vals []*eth.Validator, slot primitives.Slo
require.NoError(t, st.AppendBalance(v.EffectiveBalance))
}
require.NoError(t, st.SetSlot(slot))
require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), st, slots.ToEpoch(slot)))
return st
}
func deterministicPTCWindow(validatorCount int) []*eth.PTCs {
window := make([]*eth.PTCs, 3*params.BeaconConfig().SlotsPerEpoch)
indices := make([]primitives.ValidatorIndex, fieldparams.PTCSize)
if validatorCount > 0 {
for i := range indices {
indices[i] = primitives.ValidatorIndex(i % validatorCount)
}
}
for i := range window {
window[i] = &eth.PTCs{
ValidatorIndices: slices.Clone(indices),
}
}
return window
}
func setupTestConfig(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
@@ -324,50 +292,6 @@ func signAttestation(t *testing.T, st state.ReadOnlyBeaconState, data *eth.Paylo
return agg.Marshal()
}
func TestProcessPTCWindow(t *testing.T) {
fuluSt, _ := testutil.DeterministicGenesisStateFulu(t, 256)
st, err := gloas.UpgradeToGloas(fuluSt)
require.NoError(t, err)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
// Get original window.
origWindow, err := st.PTCWindow()
require.NoError(t, err)
windowSize := int(slotsPerEpoch.Mul(uint64(2 + params.BeaconConfig().MinSeedLookahead)))
require.Equal(t, windowSize, len(origWindow))
// Advance state to next epoch boundary so process_ptc_window sees a new epoch.
require.NoError(t, st.SetSlot(slotsPerEpoch))
// Process PTC window — should rotate.
require.NoError(t, gloas.ProcessPTCWindow(t.Context(), st))
newWindow, err := st.PTCWindow()
require.NoError(t, err)
require.Equal(t, windowSize, len(newWindow))
// The first two epochs should be the old epochs 1 and 2 (shifted left by one epoch).
for i := range 2 * slotsPerEpoch {
require.DeepEqual(t, origWindow[slotsPerEpoch+i], newWindow[i])
}
// The last epoch should be freshly computed — not all zeros.
lastStart := 2 * slotsPerEpoch
for i := range slotsPerEpoch {
ptcSlot := newWindow[lastStart+i]
require.NotNil(t, ptcSlot)
nonZero := false
for _, idx := range ptcSlot.ValidatorIndices {
if idx != 0 {
nonZero = true
break
}
}
require.Equal(t, true, nonZero, "last epoch slot %d should have non-zero validator indices", i)
}
}
type validatorLookupErrState struct {
state.BeaconState
errIndex primitives.ValidatorIndex

View File

@@ -242,74 +242,13 @@ func TestProcessExecutionPayload_Success(t *testing.T) {
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
}
func TestProcessExecutionPayloadWithDeferredSig_Success(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
header := fixture.state.LatestBlockHeader()
var previousStateRoot [32]byte
copy(previousStateRoot[:], header.StateRoot)
sigBatch, err := ProcessExecutionPayloadWithDeferredSig(t.Context(), fixture.state, previousStateRoot, fixture.signed)
require.NoError(t, err)
require.NotNil(t, sigBatch)
require.Equal(t, 1, len(sigBatch.Signatures))
require.Equal(t, 1, len(sigBatch.PublicKeys))
require.Equal(t, 1, len(sigBatch.Messages))
require.Equal(t, 1, len(sigBatch.Descriptions))
require.Equal(t, "execution payload envelope signature", sigBatch.Descriptions[0])
valid, err := sigBatch.Verify()
require.NoError(t, err)
require.Equal(t, true, valid)
latestHash, err := fixture.state.LatestBlockHash()
require.NoError(t, err)
var expectedHash [32]byte
copy(expectedHash[:], fixture.payload.BlockHash)
require.Equal(t, expectedHash, latestHash)
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
require.NoError(t, err)
require.Equal(t, uint64(1), available)
updatedHeader := fixture.state.LatestBlockHeader()
require.DeepEqual(t, previousStateRoot[:], updatedHeader.StateRoot)
}
func TestProcessExecutionPayloadWithDeferredSig_PreviousStateRootMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
previousStateRoot := [32]byte{0x42}
_, err := ProcessExecutionPayloadWithDeferredSig(t.Context(), fixture.state, previousStateRoot, fixture.signed)
require.ErrorContains(t, "envelope beacon block root does not match state latest block header root", err)
}
func TestApplyExecutionPayload_Success(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
envelope, err := fixture.signed.Envelope()
require.NoError(t, err)
require.NoError(t, ApplyExecutionPayload(t.Context(), fixture.state, envelope))
latestHash, err := fixture.state.LatestBlockHash()
require.NoError(t, err)
var expectedHash [32]byte
copy(expectedHash[:], fixture.payload.BlockHash)
require.Equal(t, expectedHash, latestHash)
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
require.NoError(t, err)
require.Equal(t, uint64(1), available)
}
func TestApplyExecutionPayloadStateMutations_UpdatesAvailabilityAndLatestHash(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
newHash := [32]byte{}
newHash[0] = 0x99
require.NoError(t, applyExecutionPayloadStateMutations(t.Context(), fixture.state, fixture.envelope.ExecutionRequests, newHash))
require.NoError(t, ApplyExecutionPayloadStateMutations(t.Context(), fixture.state, fixture.envelope.ExecutionRequests, newHash))
latestHash, err := fixture.state.LatestBlockHash()
require.NoError(t, err)
@@ -343,95 +282,6 @@ func TestQueueBuilderPayment_ZeroAmountClearsSlot(t *testing.T) {
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
}
func TestProcessBlindedExecutionPayload_NilEnvelope(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
require.NoError(t, ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, nil))
}
func TestProcessBlindedExecutionPayload_Success(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
st := fixture.state
blockHash := [32]byte(fixture.payload.BlockHash)
stateRoot := [32]byte{0xAA}
envelope := &ethpb.SignedBlindedExecutionPayloadEnvelope{
Message: &ethpb.BlindedExecutionPayloadEnvelope{
Slot: fixture.slot,
BuilderIndex: fixture.envelope.BuilderIndex,
BlockHash: blockHash[:],
BeaconBlockRoot: make([]byte, 32),
ExecutionRequests: fixture.envelope.ExecutionRequests,
},
}
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
require.NoError(t, err)
require.NoError(t, ProcessBlindedExecutionPayload(t.Context(), st, stateRoot, wrappedEnv))
latestHash, err := st.LatestBlockHash()
require.NoError(t, err)
require.Equal(t, blockHash, latestHash)
available, err := st.ExecutionPayloadAvailability(fixture.slot)
require.NoError(t, err)
require.Equal(t, uint64(1), available)
header := st.LatestBlockHeader()
require.DeepEqual(t, stateRoot[:], header.StateRoot)
}
func TestProcessBlindedExecutionPayload_SlotMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
envelope := &ethpb.SignedBlindedExecutionPayloadEnvelope{
Message: &ethpb.BlindedExecutionPayloadEnvelope{
Slot: fixture.slot + 1,
BlockHash: make([]byte, 32),
BeaconBlockRoot: make([]byte, 32),
},
}
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
require.NoError(t, err)
err = ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
require.ErrorContains(t, "blinded envelope slot does not match state slot", err)
}
func TestProcessBlindedExecutionPayload_BuilderIndexMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
blockHash := [32]byte(fixture.payload.BlockHash)
envelope := &ethpb.SignedBlindedExecutionPayloadEnvelope{
Message: &ethpb.BlindedExecutionPayloadEnvelope{
Slot: fixture.slot,
BuilderIndex: 999,
BlockHash: blockHash[:],
BeaconBlockRoot: make([]byte, 32),
},
}
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
require.NoError(t, err)
err = ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
require.ErrorContains(t, "builder index does not match", err)
}
func TestProcessBlindedExecutionPayload_BlockHashMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
wrongHash := bytes.Repeat([]byte{0xFF}, 32)
envelope := &ethpb.SignedBlindedExecutionPayloadEnvelope{
Message: &ethpb.BlindedExecutionPayloadEnvelope{
Slot: fixture.slot,
BuilderIndex: fixture.envelope.BuilderIndex,
BlockHash: wrongHash,
BeaconBlockRoot: make([]byte, 32),
},
}
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
require.NoError(t, err)
err = ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
require.ErrorContains(t, "block hash does not match", err)
}
func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
@@ -464,14 +314,14 @@ func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(st, signed))
require.NoError(t, VerifyExecutionPayloadEnvelopeSignature(st, signed))
})
t.Run("builder", func(t *testing.T) {
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(fixture.signedProto)
require.NoError(t, err)
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
require.NoError(t, VerifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
})
t.Run("invalid signature", func(t *testing.T) {
@@ -497,7 +347,7 @@ func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
err = verifyExecutionPayloadEnvelopeSignature(st, badSigned)
err = VerifyExecutionPayloadEnvelopeSignature(st, badSigned)
require.ErrorContains(t, "invalid signature format", err)
})
@@ -509,7 +359,7 @@ func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
err = verifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
err = VerifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
require.ErrorContains(t, "invalid signature format", err)
})
})

View File

@@ -1,8 +1,6 @@
package gloas
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
@@ -11,13 +9,12 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// UpgradeToGloas updates inputs a generic state to return the version Gloas state.
//
// <spec fn="upgrade_to_gloas" fork="gloas" hash="8f67112c">
// <spec fn="upgrade_to_gloas" fork="gloas" hash="6e66df25">
// def upgrade_to_gloas(pre: fulu.BeaconState) -> BeaconState:
// epoch = fulu.get_current_epoch(pre)
//
@@ -84,8 +81,6 @@ import (
// latest_block_hash=pre.latest_execution_payload_header.block_hash,
// # [New in Gloas:EIP7732]
// payload_expected_withdrawals=[],
// # [New in Gloas:EIP7732]
// ptc_window=initialize_ptc_window(pre),
// )
//
// # [New in Gloas:EIP7732]
@@ -148,73 +143,12 @@ func UpgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
if err != nil {
return nil, errors.Wrap(err, "could not convert to gloas")
}
ptcWindow, err := initializePTCWindow(context.Background(), s)
if err != nil {
return nil, errors.Wrap(err, "failed to initialize ptc window")
}
if err := s.SetPTCWindow(ptcWindow); err != nil {
return nil, errors.Wrap(err, "failed to set ptc window")
}
if err := s.OnboardBuildersFromPendingDeposits(); err != nil {
return nil, errors.Wrap(err, "failed to onboard builders from pending deposits")
}
return s, nil
}
// initializePTCWindow builds the initial PTC window for the Gloas fork upgrade.
//
// <spec fn="initialize_ptc_window" fork="gloas" hash="3764b7f5">
// def initialize_ptc_window(
// state: BeaconState,
// ) -> Vector[Vector[ValidatorIndex, PTC_SIZE], (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH]:
// """
// Return the cached PTC window starting from the current epoch.
// Used to initialize the ``ptc_window`` field in the beacon state at genesis and after forks.
// """
// empty_previous_epoch = [
// Vector[ValidatorIndex, PTC_SIZE]([ValidatorIndex(0) for _ in range(PTC_SIZE)])
// for _ in range(SLOTS_PER_EPOCH)
// ]
//
// ptcs = []
// current_epoch = get_current_epoch(state)
// for e in range(1 + MIN_SEED_LOOKAHEAD):
// epoch = Epoch(current_epoch + e)
// start_slot = compute_start_slot_at_epoch(epoch)
// ptcs += [compute_ptc(state, Slot(start_slot + i)) for i in range(SLOTS_PER_EPOCH)]
//
// return empty_previous_epoch + ptcs
// </spec>
func initializePTCWindow(ctx context.Context, st state.ReadOnlyBeaconState) ([]*ethpb.PTCs, error) {
currentEpoch := slots.ToEpoch(st.Slot())
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
windowSize := slotsPerEpoch.Mul(uint64(2 + params.BeaconConfig().MinSeedLookahead))
window := make([]*ethpb.PTCs, 0, windowSize)
// Previous epoch has no cached data at fork time — fill with empty slots.
for range slotsPerEpoch {
window = append(window, &ethpb.PTCs{
ValidatorIndices: make([]primitives.ValidatorIndex, fieldparams.PTCSize),
})
}
// Compute PTC for current epoch through lookahead.
startSlot, err := slots.EpochStart(currentEpoch)
if err != nil {
return nil, err
}
totalSlots := slotsPerEpoch.Mul(uint64(1 + params.BeaconConfig().MinSeedLookahead))
for i := range totalSlots {
ptc, err := computePTC(ctx, st, startSlot+i)
if err != nil {
return nil, err
}
window = append(window, &ethpb.PTCs{ValidatorIndices: ptc})
}
return window, nil
}
func upgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
if err != nil {
@@ -292,6 +226,10 @@ func upgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
if err != nil {
return nil, err
}
proposerLookaheadU64 := make([]uint64, len(proposerLookahead))
for i, v := range proposerLookahead {
proposerLookaheadU64[i] = uint64(v)
}
executionPayloadAvailability := make([]byte, int((params.BeaconConfig().SlotsPerHistoricalRoot+7)/8))
for i := range executionPayloadAvailability {
@@ -355,7 +293,7 @@ func upgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
PendingDeposits: pendingDeposits,
PendingPartialWithdrawals: pendingPartialWithdrawals,
PendingConsolidations: pendingConsolidations,
ProposerLookahead: proposerLookahead,
ProposerLookahead: proposerLookaheadU64,
Builders: []*ethpb.Builder{},
NextWithdrawalBuilderIndex: primitives.BuilderIndex(0),
ExecutionPayloadAvailability: executionPayloadAvailability,

View File

@@ -103,7 +103,7 @@ func TestUpgradeToGloas_Basic(t *testing.T) {
}
func TestUpgradeToGloas_OnboardsBuilderDeposit(t *testing.T) {
st, _ := util.DeterministicGenesisStateFulu(t, params.BeaconConfig().MaxValidatorsPerCommittee)
st, _ := util.DeterministicGenesisStateFulu(t, 4)
sk, err := bls.RandKey()
require.NoError(t, err)

View File

@@ -658,8 +658,8 @@ func ComputeCommittee(
}
// InitializeProposerLookahead computes the list of the proposer indices for the next MIN_SEED_LOOKAHEAD + 1 epochs.
func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error) {
lookAhead := make([]primitives.ValidatorIndex, 0, uint64(params.BeaconConfig().MinSeedLookahead+1)*uint64(params.BeaconConfig().SlotsPerEpoch))
func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]uint64, error) {
lookAhead := make([]uint64, 0, uint64(params.BeaconConfig().MinSeedLookahead+1)*uint64(params.BeaconConfig().SlotsPerEpoch))
for i := range params.BeaconConfig().MinSeedLookahead + 1 {
indices, err := ActiveValidatorIndices(ctx, state, epoch+i)
if err != nil {
@@ -669,7 +669,9 @@ func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeacon
if err != nil {
return nil, errors.Wrap(err, "could not compute proposer indices")
}
lookAhead = append(lookAhead, proposerIndices...)
for _, proposerIndex := range proposerIndices {
lookAhead = append(lookAhead, uint64(proposerIndex))
}
}
return lookAhead, nil
}

View File

@@ -945,8 +945,13 @@ func TestInitializeProposerLookahead_RegressionTest(t *testing.T) {
endIdx := startIdx + slotsPerEpoch
actualProposers := proposerLookahead[startIdx:endIdx]
expectedUint64 := make([]uint64, len(expectedProposers))
for i, proposer := range expectedProposers {
expectedUint64[i] = uint64(proposer)
}
// This assertion would fail with the original bug:
for i, expected := range expectedProposers {
for i, expected := range expectedUint64 {
require.Equal(t, expected, actualProposers[i],
"Proposer index mismatch at slot %d in epoch %d", i, targetEpoch)
}

View File

@@ -1165,7 +1165,7 @@ func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 1
params.OverrideBeaconConfig(cfg)
lookahead := make([]primitives.ValidatorIndex, 64)
lookahead := make([]uint64, 64)
lookahead[0] = 15
lookahead[1] = 16
lookahead[34] = 42

View File

@@ -26,7 +26,6 @@ go_library(
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",

View File

@@ -33,31 +33,24 @@ func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCou
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
// The sidecar index must be within the valid range.
index := sidecar.Index()
if index >= fieldparams.NumberOfColumns {
if sidecar.Index >= fieldparams.NumberOfColumns {
return ErrIndexTooLarge
}
// A sidecar for zero blobs is invalid.
kzgCommitments, err := sidecar.KzgCommitments()
if err != nil {
return errors.Wrap(err, "kzg commitments")
}
if len(kzgCommitments) == 0 {
if len(sidecar.KzgCommitments) == 0 {
return ErrNoKzgCommitments
}
// A sidecar with more commitments than the max blob count for this block is invalid.
slot := sidecar.Slot()
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if len(kzgCommitments) > maxBlobsPerBlock {
if len(sidecar.KzgCommitments) > maxBlobsPerBlock {
return ErrTooManyCommitments
}
// The column length must be equal to the number of commitments/proofs.
column := sidecar.Column()
kzgProofs := sidecar.KzgProofs()
if len(column) != len(kzgCommitments) || len(column) != len(kzgProofs) {
if len(sidecar.Column) != len(sidecar.KzgCommitments) || len(sidecar.Column) != len(sidecar.KzgProofs) {
return ErrMismatchLength
}
@@ -74,11 +67,7 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
commitmentsBySidecar := make([][][]byte, len(sidecars))
for i := range sidecars {
c, err := sidecars[i].KzgCommitments()
if err != nil {
return errors.Wrapf(err, "sidecar %d kzg commitments", i)
}
commitmentsBySidecar[i] = c
commitmentsBySidecar[i] = sidecars[i].KzgCommitments
}
return verifyDataColumnsSidecarKZGProofs(sidecars, commitmentsBySidecar)
}
@@ -98,11 +87,10 @@ func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmen
// Compute the total count.
count := 0
for i, sidecar := range sidecars {
column := sidecar.Column()
if len(column) != len(commitmentsBySidecar[i]) {
if len(sidecar.Column) != len(commitmentsBySidecar[i]) {
return ErrMismatchLength
}
count += len(column)
count += len(sidecar.Column)
}
commitments := make([]kzg.Bytes48, 0, count)
@@ -111,10 +99,7 @@ func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmen
proofs := make([]kzg.Bytes48, 0, count)
for sidecarIndex, sidecar := range sidecars {
column := sidecar.Column()
kzgProofs := sidecar.KzgProofs()
index := sidecar.Index()
for i := range column {
for i := range sidecar.Column {
var (
commitment kzg.Bytes48
cell kzg.Cell
@@ -122,8 +107,8 @@ func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmen
)
commitmentBytes := commitmentsBySidecar[sidecarIndex][i]
cellBytes := column[i]
proofBytes := kzgProofs[i]
cellBytes := sidecar.Column[i]
proofBytes := sidecar.KzgProofs[i]
if len(commitmentBytes) != len(commitment) ||
len(cellBytes) != len(cell) ||
@@ -136,7 +121,7 @@ func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmen
copy(proof[:], proofBytes)
commitments = append(commitments, commitment)
indices = append(indices, index)
indices = append(indices, sidecar.Index)
cells = append(cells, cell)
proofs = append(proofs, proof)
}
@@ -158,27 +143,16 @@ func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmen
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
if sidecar.IsGloas() {
return nil
}
signedBlockHeader, err := sidecar.SignedBlockHeader()
if err != nil {
return errors.Wrap(err, "signed block header")
}
if signedBlockHeader == nil || signedBlockHeader.Header == nil {
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
return ErrNilBlockHeader
}
root := signedBlockHeader.Header.BodyRoot
root := sidecar.SignedBlockHeader.Header.BodyRoot
if len(root) != fieldparams.RootLength {
return ErrBadRootLength
}
kzgCommitments, err := sidecar.KzgCommitments()
if err != nil {
return errors.Wrap(err, "kzg commitments")
}
leaves := blocks.LeavesFromCommitments(kzgCommitments)
leaves := blocks.LeavesFromCommitments(sidecar.KzgCommitments)
sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
if err != nil {
@@ -190,11 +164,7 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
return errors.Wrap(err, "hash tree root")
}
kzgInclusionProof, err := sidecar.KzgCommitmentsInclusionProof()
if err != nil {
return errors.Wrap(err, "kzg commitments inclusion proof")
}
verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, kzgInclusionProof)
verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, sidecar.KzgCommitmentsInclusionProof)
if !verified {
return ErrInvalidInclusionProof
}

View File

@@ -70,8 +70,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
t.Run("size mismatch", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
column := sidecars[0].Column()
column[0] = column[0][:len(column[0])-1] // Remove one byte to create size mismatch
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
@@ -79,7 +78,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
t.Run("invalid proof", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column()[0][0]++ // It is OK to overflow
sidecars[0].Column[0][0]++ // It is OK to overflow
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
@@ -93,7 +92,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
t.Run("with commitments", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
err := peerdas.VerifyDataColumnsSidecarKZGProofsWithCommitments(sidecars, sidecarCommitments(t, sidecars))
err := peerdas.VerifyDataColumnsSidecarKZGProofsWithCommitments(sidecars, sidecarCommitments(sidecars))
require.NoError(t, err)
})
}
@@ -203,7 +202,7 @@ func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
roDataColumn := blocks.NewRODataColumnNoVerify(tc.dataColumnSidecar)
roDataColumn := blocks.RODataColumn{DataColumnSidecar: tc.dataColumnSidecar}
err = peerdas.VerifyDataColumnSidecarInclusionProof(roDataColumn)
if tc.expectedError == nil {
require.NoError(t, err)
@@ -215,13 +214,6 @@ func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
}
}
func TestVerifyDataColumnSidecarInclusionProof_SkipsGloas(t *testing.T) {
dc := &ethpb.DataColumnSidecarGloas{Index: 0, Column: [][]byte{{0x01}}, KzgProofs: [][]byte{make([]byte, 48)}}
roCol, err := blocks.NewRODataColumnGloas(dc)
require.NoError(t, err)
require.NoError(t, peerdas.VerifyDataColumnSidecarInclusionProof(roCol))
}
func TestComputeSubnetForDataColumnSidecar(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
@@ -362,12 +354,10 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
}
}
func sidecarCommitments(t *testing.T, sidecars []blocks.RODataColumn) [][][]byte {
func sidecarCommitments(sidecars []blocks.RODataColumn) [][][]byte {
commitmentsBySidecar := make([][][]byte, len(sidecars))
for i := range sidecars {
var err error
commitmentsBySidecar[i], err = sidecars[i].KzgCommitments()
require.NoError(t, err)
commitmentsBySidecar[i] = sidecars[i].KzgCommitments
}
return commitmentsBySidecar
}

View File

@@ -79,9 +79,9 @@ func recoverCellsForBlobs(verifiedRoSidecars []blocks.VerifiedRODataColumn, blob
cells := make([]kzg.Cell, 0, sidecarCount)
for _, sidecar := range verifiedRoSidecars {
cell := sidecar.Column()[blobIndex]
cell := sidecar.Column[blobIndex]
cells = append(cells, kzg.Cell(cell))
cellsIndices = append(cellsIndices, sidecar.Index())
cellsIndices = append(cellsIndices, sidecar.Index)
}
recoveredCells, err := kzg.RecoverCells(cellsIndices, cells)
@@ -116,9 +116,9 @@ func recoverCellsAndProofsForBlobs(verifiedRoSidecars []blocks.VerifiedRODataCol
cells := make([]kzg.Cell, 0, sidecarCount)
for _, sidecar := range verifiedRoSidecars {
cell := sidecar.Column()[blobIndex]
cell := sidecar.Column[blobIndex]
cells = append(cells, kzg.Cell(cell))
cellsIndices = append(cellsIndices, sidecar.Index())
cellsIndices = append(cellsIndices, sidecar.Index)
}
recoveredCells, recoveredProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
@@ -151,10 +151,10 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
referenceSidecar := verifiedRoSidecars[0]
// Check if all columns have the same length and are commmitted to the same block.
blobCount := len(referenceSidecar.Column())
blobCount := len(referenceSidecar.Column)
blockRoot := referenceSidecar.BlockRoot()
for _, sidecar := range verifiedRoSidecars[1:] {
if len(sidecar.Column()) != blobCount {
if len(sidecar.Column) != blobCount {
return nil, ErrColumnLengthsDiffer
}
@@ -171,7 +171,7 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
// Sort the input sidecars by index.
sort.Slice(verifiedRoSidecars, func(i, j int) bool {
return verifiedRoSidecars[i].Index() < verifiedRoSidecars[j].Index()
return verifiedRoSidecars[i].Index < verifiedRoSidecars[j].Index
})
// Recover cells and compute proofs in parallel.
@@ -209,9 +209,9 @@ func reconstructIfNeeded(verifiedDataColumnSidecars []blocks.VerifiedRODataColum
}
// Check if the sidecars are sorted by index and do not contain duplicates.
previousColumnIndex := verifiedDataColumnSidecars[0].Index()
previousColumnIndex := verifiedDataColumnSidecars[0].Index
for _, dataColumnSidecar := range verifiedDataColumnSidecars[1:] {
columnIndex := dataColumnSidecar.Index()
columnIndex := dataColumnSidecar.Index
if columnIndex <= previousColumnIndex {
return nil, ErrDataColumnSidecarsNotSortedByIndex
}
@@ -226,7 +226,7 @@ func reconstructIfNeeded(verifiedDataColumnSidecars []blocks.VerifiedRODataColum
}
// If all column sidecars corresponding to (non-extended) blobs are present, no need to reconstruct.
if verifiedDataColumnSidecars[cellsPerBlob-1].Index() == uint64(cellsPerBlob-1) {
if verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1) {
return verifiedDataColumnSidecars, nil
}
@@ -415,9 +415,9 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
}
// Check if the sidecars are sorted by index and do not contain duplicates.
previousColumnIndex := verifiedDataColumnSidecars[0].Index()
previousColumnIndex := verifiedDataColumnSidecars[0].Index
for _, dataColumnSidecar := range verifiedDataColumnSidecars[1:] {
columnIndex := dataColumnSidecar.Index()
columnIndex := dataColumnSidecar.Index
if columnIndex <= previousColumnIndex {
return nil, ErrDataColumnSidecarsNotSortedByIndex
}
@@ -433,7 +433,7 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
// Verify that the actual blob count from the first sidecar matches the expected count
referenceSidecar := verifiedDataColumnSidecars[0]
actualBlobCount := len(referenceSidecar.Column())
actualBlobCount := len(referenceSidecar.Column)
if actualBlobCount != blobCount {
return nil, errors.Errorf("blob count mismatch: expected %d, got %d", blobCount, actualBlobCount)
}
@@ -448,7 +448,7 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
// Check if all columns have the same length and are committed to the same block.
blockRoot := referenceSidecar.BlockRoot()
for _, sidecar := range verifiedDataColumnSidecars[1:] {
if len(sidecar.Column()) != blobCount {
if len(sidecar.Column) != blobCount {
return nil, ErrColumnLengthsDiffer
}
@@ -458,7 +458,7 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
}
// Check if we have all non-extended columns (0..63) - if so, no reconstruction needed.
hasAllNonExtendedColumns := verifiedDataColumnSidecars[cellsPerBlob-1].Index() == uint64(cellsPerBlob-1)
hasAllNonExtendedColumns := verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1)
var reconstructedCells map[int][]kzg.Cell
if !hasAllNonExtendedColumns {
@@ -480,7 +480,7 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
var cell []byte
if hasAllNonExtendedColumns {
// Use existing cells from sidecars
cell = verifiedDataColumnSidecars[columnIndex].Column()[blobIndex]
cell = verifiedDataColumnSidecars[columnIndex].Column[blobIndex]
} else {
// Use reconstructed cells
cell = reconstructedCells[blobIndex][columnIndex][:]
@@ -501,14 +501,8 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
func blobSidecarsFromDataColumnSidecars(roBlock blocks.ROBlock, dataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
referenceSidecar := dataColumnSidecars[0]
kzgCommitments, err := referenceSidecar.KzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "kzg commitments")
}
signedBlockHeader, err := referenceSidecar.SignedBlockHeader()
if err != nil {
return nil, errors.Wrap(err, "signed block header")
}
kzgCommitments := referenceSidecar.KzgCommitments
signedBlockHeader := referenceSidecar.SignedBlockHeader
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, len(indices))
for _, blobIndex := range indices {
@@ -517,7 +511,7 @@ func blobSidecarsFromDataColumnSidecars(roBlock blocks.ROBlock, dataColumnSideca
// Compute the content of the blob.
for columnIndex := range fieldparams.CellsPerBlob {
dataColumnSidecar := dataColumnSidecars[columnIndex]
cell := dataColumnSidecar.Column()[blobIndex]
cell := dataColumnSidecar.Column[blobIndex]
if copy(blob[kzg.BytesPerCell*columnIndex:], cell) != kzg.BytesPerCell {
return nil, errors.New("wrong cell size - should never happen")
}

View File

@@ -36,7 +36,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
// Arbitrarily alter the column with index 3
verifiedRoSidecars[3].DataColumnSidecar().Column = verifiedRoSidecars[3].DataColumnSidecar().Column[1:]
verifiedRoSidecars[3].Column = verifiedRoSidecars[3].Column[1:]
_, err := peerdas.ReconstructDataColumnSidecars(verifiedRoSidecars)
require.ErrorIs(t, err, peerdas.ErrColumnLengthsDiffer)
@@ -88,10 +88,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
require.NoError(t, err)
// Verify that the reconstructed sidecars are equal to the original ones.
require.Equal(t, len(inputVerifiedRoSidecars), len(reconstructedVerifiedRoSidecars))
for i := range inputVerifiedRoSidecars {
require.DeepSSZEqual(t, inputVerifiedRoSidecars[i].DataColumnSidecar(), reconstructedVerifiedRoSidecars[i].DataColumnSidecar())
}
require.DeepSSZEqual(t, inputVerifiedRoSidecars, reconstructedVerifiedRoSidecars)
})
}

View File

@@ -10,7 +10,6 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
@@ -24,13 +23,11 @@ var (
var (
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
_ ConstructionPopulator = (*BidReconstructionSource)(nil)
)
const (
BlockType = "BeaconBlock"
SidecarType = "DataColumnSidecar"
BidType = "ExecutionPayloadBid"
)
type (
@@ -40,7 +37,7 @@ type (
ConstructionPopulator interface {
Slot() primitives.Slot
Root() [fieldparams.RootLength]byte
ProposerIndex() (primitives.ValidatorIndex, error)
ProposerIndex() primitives.ValidatorIndex
Commitments() ([][]byte, error)
Type() string
@@ -52,17 +49,11 @@ type (
blocks.ROBlock
}
// SidecarReconstructionSource is a ConstructionPopulator that uses a data column sidecar as the source of data
// DataColumnSidecar is a ConstructionPopulator that uses a data column sidecar as the source of data
SidecarReconstructionSource struct {
blocks.VerifiedRODataColumn
}
// BidReconstructionSource is a ConstructionPopulator that uses the execution payload bid
// from a Gloas beacon block to extract KZG commitments for data column sidecar construction.
BidReconstructionSource struct {
blocks.ROBlock
}
blockInfo struct {
signedBlockHeader *ethpb.SignedBeaconBlockHeader
kzgCommitments [][]byte
@@ -80,14 +71,6 @@ func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstruc
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
}
// PopulateFromBid creates a BidReconstructionSource from a Gloas beacon block.
// In Gloas (ePBS), the execution payload is delivered separately via the payload envelope,
// but the KZG commitments are available in the bid embedded in the block, allowing
// data column sidecars to be constructed from the EL as soon as the block arrives.
func PopulateFromBid(block blocks.ROBlock) *BidReconstructionSource {
return &BidReconstructionSource{ROBlock: block}
}
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
func ValidatorsCustodyRequirement(st beaconState.ReadOnlyBalances, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
@@ -128,93 +111,33 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
}
isGloas := slots.ToEpoch(src.Slot()) >= params.BeaconConfig().GloasForkEpoch
root := src.Root()
roSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
if isGloas {
for idx := range numberOfColumns {
sidecar := &ethpb.DataColumnSidecarGloas{
Index: idx,
Column: cells[idx],
KzgProofs: proofs[idx],
Slot: src.Slot(),
BeaconBlockRoot: root[:],
}
if len(sidecar.Column) != len(sidecar.KzgProofs) {
return nil, ErrSizeMismatch
}
roSidecar, err := blocks.NewRODataColumnGloasWithRoot(sidecar, root)
if err != nil {
return nil, errors.Wrap(err, "new ro data column gloas")
}
roSidecars = append(roSidecars, roSidecar)
}
} else {
info, err := src.extract()
if err != nil {
return nil, errors.Wrap(err, "extract block info")
}
for idx := range numberOfColumns {
sidecar := &ethpb.DataColumnSidecar{
Index: idx,
Column: cells[idx],
KzgCommitments: info.kzgCommitments,
KzgProofs: proofs[idx],
SignedBlockHeader: info.signedBlockHeader,
KzgCommitmentsInclusionProof: info.kzgInclusionProof,
}
if len(sidecar.KzgCommitments) != len(sidecar.Column) || len(sidecar.KzgCommitments) != len(sidecar.KzgProofs) {
return nil, ErrSizeMismatch
}
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, root)
if err != nil {
return nil, errors.Wrap(err, "new ro data column")
}
roSidecars = append(roSidecars, roSidecar)
}
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return roSidecars, nil
}
// DataColumnSidecarsGloas constructs Gloas-format data column sidecars directly from cells, proofs,
// slot, and block root. Used by the proposer when building sidecars outside the ConstructionPopulator flow.
func DataColumnSidecarsGloas(
cellsPerBlob [][]kzg.Cell,
proofsPerBlob [][]kzg.Proof,
slot primitives.Slot,
beaconBlockRoot [32]byte,
) ([]blocks.RODataColumn, error) {
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
if len(cellsPerBlob) == 0 {
return nil, nil
}
start := time.Now()
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
info, err := src.extract()
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
return nil, errors.Wrap(err, "extract block info")
}
roSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
for idx := range numberOfColumns {
sidecar := &ethpb.DataColumnSidecarGloas{
Index: idx,
Column: cells[idx],
KzgProofs: proofs[idx],
Slot: slot,
BeaconBlockRoot: beaconBlockRoot[:],
sidecar := &ethpb.DataColumnSidecar{
Index: idx,
Column: cells[idx],
KzgCommitments: info.kzgCommitments,
KzgProofs: proofs[idx],
SignedBlockHeader: info.signedBlockHeader,
KzgCommitmentsInclusionProof: info.kzgInclusionProof,
}
if len(sidecar.Column) != len(sidecar.KzgProofs) {
if len(sidecar.KzgCommitments) != len(sidecar.Column) || len(sidecar.KzgCommitments) != len(sidecar.KzgProofs) {
return nil, ErrSizeMismatch
}
roSidecar, err := blocks.NewRODataColumnGloasWithRoot(sidecar, beaconBlockRoot)
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, src.Root())
if err != nil {
return nil, errors.Wrap(err, "new ro data column gloas")
return nil, errors.Wrap(err, "new ro data column")
}
roSidecars = append(roSidecars, roSidecar)
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return roSidecars, nil
}
@@ -225,8 +148,8 @@ func (s *BlockReconstructionSource) Slot() primitives.Slot {
}
// ProposerIndex returns the proposer index of the source
func (s *BlockReconstructionSource) ProposerIndex() (primitives.ValidatorIndex, error) {
return s.Block().ProposerIndex(), nil
func (s *BlockReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
return s.Block().ProposerIndex()
}
// Commitments returns the blob KZG commitments of the source
@@ -245,24 +168,32 @@ func (s *BlockReconstructionSource) Type() string {
return BlockType
}
// extract extracts the block information from the source
func (b *BlockReconstructionSource) extract() (*blockInfo, error) {
block := b.Block()
header, err := b.Header()
if err != nil {
return nil, errors.Wrap(err, "header")
}
commitments, err := b.Block().Body().BlobKzgCommitments()
commitments, err := block.Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "commitments")
}
inclusionProof, err := blocks.MerkleProofKZGCommitments(b.Block().Body())
inclusionProof, err := blocks.MerkleProofKZGCommitments(block.Body())
if err != nil {
return nil, errors.Wrap(err, "merkle proof kzg commitments")
}
return &blockInfo{
info := &blockInfo{
signedBlockHeader: header,
kzgCommitments: commitments,
kzgInclusionProof: inclusionProof,
}, nil
}
return info, nil
}
// rotateRowsToCols takes a 2D slice of cells and proofs, where the x is rows (blobs) and y is columns,
@@ -304,7 +235,7 @@ func (s *SidecarReconstructionSource) Root() [fieldparams.RootLength]byte {
// Commmitments returns the blob KZG commitments of the source
func (s *SidecarReconstructionSource) Commitments() ([][]byte, error) {
return s.KzgCommitments()
return s.KzgCommitments, nil
}
// Type returns the type of the source
@@ -312,61 +243,13 @@ func (s *SidecarReconstructionSource) Type() string {
return SidecarType
}
// extract extracts the block information from the source
func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
sbh, err := s.SignedBlockHeader()
if err != nil {
return nil, err
info := &blockInfo{
signedBlockHeader: s.SignedBlockHeader,
kzgCommitments: s.KzgCommitments,
kzgInclusionProof: s.KzgCommitmentsInclusionProof,
}
comms, err := s.KzgCommitments()
if err != nil {
return nil, err
}
incProof, err := s.KzgCommitmentsInclusionProof()
if err != nil {
return nil, err
}
return &blockInfo{
signedBlockHeader: sbh,
kzgCommitments: comms,
kzgInclusionProof: incProof,
}, nil
}
// Slot returns the slot of the source
func (s *BidReconstructionSource) Slot() primitives.Slot {
return s.Block().Slot()
}
// ProposerIndex returns the proposer index of the source
func (s *BidReconstructionSource) ProposerIndex() (primitives.ValidatorIndex, error) {
return s.Block().ProposerIndex(), nil
}
// Commitments returns the blob KZG commitments from the execution payload bid
func (s *BidReconstructionSource) Commitments() ([][]byte, error) {
bid, err := s.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return nil, errors.Wrap(err, "signed execution payload bid")
}
return bid.Message.BlobKzgCommitments, nil
}
// Type returns the type of the source
func (s *BidReconstructionSource) Type() string {
return BidType
}
func (s *BidReconstructionSource) extract() (*blockInfo, error) {
commitments, err := s.Commitments()
if err != nil {
return nil, err
}
header, err := s.Header()
if err != nil {
return nil, errors.Wrap(err, "header")
}
return &blockInfo{
signedBlockHeader: header,
kzgCommitments: commitments,
}, nil
return info, nil
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
@@ -177,24 +176,22 @@ func TestDataColumnSidecars(t *testing.T) {
// Verify each sidecar has the expected structure
for i, sidecar := range sidecars {
require.Equal(t, uint64(i), sidecar.Index())
require.Equal(t, 2, len(sidecar.Column()))
comms, err := sidecar.KzgCommitments()
require.NoError(t, err)
require.Equal(t, 2, len(comms))
require.Equal(t, 2, len(sidecar.KzgProofs()))
require.Equal(t, uint64(i), sidecar.Index)
require.Equal(t, 2, len(sidecar.Column))
require.Equal(t, 2, len(sidecar.KzgCommitments))
require.Equal(t, 2, len(sidecar.KzgProofs))
// Verify commitments match what we set
require.DeepEqual(t, commitment1, comms[0])
require.DeepEqual(t, commitment2, comms[1])
require.DeepEqual(t, commitment1, sidecar.KzgCommitments[0])
require.DeepEqual(t, commitment2, sidecar.KzgCommitments[1])
// Verify column data comes from the correct cells
require.Equal(t, byte(i), sidecar.Column()[0][0])
require.Equal(t, byte(i+128), sidecar.Column()[1][0])
require.Equal(t, byte(i), sidecar.Column[0][0])
require.Equal(t, byte(i+128), sidecar.Column[1][0])
// Verify proofs come from the correct proofs
require.Equal(t, byte(i), sidecar.KzgProofs()[0][0])
require.Equal(t, byte(i+128), sidecar.KzgProofs()[1][0])
require.Equal(t, byte(i), sidecar.KzgProofs[0][0])
require.Equal(t, byte(i+128), sidecar.KzgProofs[1][0])
}
})
}
@@ -244,9 +241,7 @@ func TestReconstructionSource(t *testing.T) {
src := peerdas.PopulateFromBlock(rob)
require.Equal(t, rob.Block().Slot(), src.Slot())
require.Equal(t, rob.Root(), src.Root())
srcPI, err := src.ProposerIndex()
require.NoError(t, err)
require.Equal(t, rob.Block().ProposerIndex(), srcPI)
require.Equal(t, rob.Block().ProposerIndex(), src.ProposerIndex())
commitments, err := src.Commitments()
require.NoError(t, err)
@@ -262,11 +257,7 @@ func TestReconstructionSource(t *testing.T) {
src := peerdas.PopulateFromSidecar(referenceSidecar)
require.Equal(t, referenceSidecar.Slot(), src.Slot())
require.Equal(t, referenceSidecar.BlockRoot(), src.Root())
refPI, err := referenceSidecar.ProposerIndex()
require.NoError(t, err)
srcPI, err := src.ProposerIndex()
require.NoError(t, err)
require.Equal(t, refPI, srcPI)
require.Equal(t, referenceSidecar.ProposerIndex(), src.ProposerIndex())
commitments, err := src.Commitments()
require.NoError(t, err)
@@ -276,87 +267,4 @@ func TestReconstructionSource(t *testing.T) {
require.Equal(t, peerdas.SidecarType, src.Type())
})
t.Run("from bid", func(t *testing.T) {
bidCommitment1 := make([]byte, 48)
bidCommitment2 := make([]byte, 48)
bidCommitment1[0] = 0xAA
bidCommitment2[0] = 0xBB
gloasBlockPb := util.NewBeaconBlockGloas()
gloasBlockPb.Block.Body.SignedExecutionPayloadBid.Message.BlobKzgCommitments = [][]byte{bidCommitment1, bidCommitment2}
gloasBlockPb.Block.Slot = 42
gloasBlockPb.Block.ProposerIndex = 7
signedGloasBlock, err := blocks.NewSignedBeaconBlock(gloasBlockPb)
require.NoError(t, err)
gloasRob, err := blocks.NewROBlock(signedGloasBlock)
require.NoError(t, err)
src := peerdas.PopulateFromBid(gloasRob)
require.Equal(t, primitives.Slot(42), src.Slot())
require.Equal(t, gloasRob.Root(), src.Root())
bidPI, err := src.ProposerIndex()
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(7), bidPI)
commitments, err := src.Commitments()
require.NoError(t, err)
require.Equal(t, 2, len(commitments))
require.DeepEqual(t, bidCommitment1, commitments[0])
require.DeepEqual(t, bidCommitment2, commitments[1])
require.Equal(t, peerdas.BidType, src.Type())
})
}
func TestPopulateFromBid_DataColumnSidecars(t *testing.T) {
const numberOfColumns = fieldparams.NumberOfColumns
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 0
params.OverrideBeaconConfig(cfg)
bidCommitment1 := make([]byte, 48)
bidCommitment2 := make([]byte, 48)
bidCommitment1[0] = 0xAA
bidCommitment2[0] = 0xBB
gloasBlockPb := util.NewBeaconBlockGloas()
gloasBlockPb.Block.Body.SignedExecutionPayloadBid.Message.BlobKzgCommitments = [][]byte{bidCommitment1, bidCommitment2}
signedGloasBlock, err := blocks.NewSignedBeaconBlock(gloasBlockPb)
require.NoError(t, err)
gloasRob, err := blocks.NewROBlock(signedGloasBlock)
require.NoError(t, err)
cellsPerBlob := [][]kzg.Cell{
make([]kzg.Cell, numberOfColumns),
make([]kzg.Cell, numberOfColumns),
}
proofsPerBlob := [][]kzg.Proof{
make([]kzg.Proof, numberOfColumns),
make([]kzg.Proof, numberOfColumns),
}
for i := range numberOfColumns {
cellsPerBlob[0][i][0] = byte(i)
proofsPerBlob[0][i][0] = byte(i)
cellsPerBlob[1][i][0] = byte(i + 128)
proofsPerBlob[1][i][0] = byte(i + 128)
}
sidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBid(gloasRob))
require.NoError(t, err)
require.Equal(t, int(numberOfColumns), len(sidecars))
for i, sidecar := range sidecars {
require.Equal(t, true, sidecar.IsGloas())
require.Equal(t, uint64(i), sidecar.Index())
require.Equal(t, 2, len(sidecar.Column()))
require.Equal(t, 2, len(sidecar.KzgProofs()))
}
}

View File

@@ -46,21 +46,16 @@ func DataColumnsAlignWithBlock(block blocks.ROBlock, dataColumns []blocks.ROData
return ErrRootMismatch
}
dcKzgCommitments, err := dataColumn.KzgCommitments()
if err != nil {
return errors.Wrap(err, "kzg commitments")
}
// Check if the content length of the data column sidecar matches the block.
if len(dataColumn.Column()) != blockCommitmentCount ||
len(dcKzgCommitments) != blockCommitmentCount ||
len(dataColumn.KzgProofs()) != blockCommitmentCount {
if len(dataColumn.Column) != blockCommitmentCount ||
len(dataColumn.KzgCommitments) != blockCommitmentCount ||
len(dataColumn.KzgProofs) != blockCommitmentCount {
return ErrBlockColumnSizeMismatch
}
// Check if the commitments of the data column sidecar match the block.
for i := range blockCommitments {
if !bytes.Equal(blockCommitments[i], dcKzgCommitments[i]) {
if !bytes.Equal(blockCommitments[i], dataColumn.KzgCommitments[i]) {
return ErrCommitmentMismatch
}
}

View File

@@ -41,21 +41,21 @@ func TestDataColumnsAlignWithBlock(t *testing.T) {
t.Run("column size mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
sidecars[0].DataColumnSidecar().Column = [][]byte{}
sidecars[0].Column = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("KZG commitments size mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
sidecars[0].DataColumnSidecar().KzgCommitments = [][]byte{}
sidecars[0].KzgCommitments = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("KZG proofs mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
sidecars[0].DataColumnSidecar().KzgProofs = [][]byte{}
sidecars[0].KzgProofs = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
@@ -63,7 +63,7 @@ func TestDataColumnsAlignWithBlock(t *testing.T) {
t.Run("commitment mismatch", func(t *testing.T) {
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
alteredSidecars[1].DataColumnSidecar().KzgCommitments[0][0]++ // Overflow is OK
alteredSidecars[1].KzgCommitments[0][0]++ // Overflow is OK
err := peerdas.DataColumnsAlignWithBlock(block, alteredSidecars)
require.ErrorIs(t, err, peerdas.ErrCommitmentMismatch)
})

View File

@@ -130,7 +130,7 @@ func gloasOperations(ctx context.Context, st state.BeaconState, block interfaces
//
// Spec definition:
//
// <spec fn="process_epoch" fork="gloas" hash="bf3575a9">
// <spec fn="process_epoch" fork="gloas" hash="393b69ef">
// def process_epoch(state: BeaconState) -> None:
// process_justification_and_finalization(state)
// process_inactivity_updates(state)
@@ -149,8 +149,6 @@ func gloasOperations(ctx context.Context, st state.BeaconState, block interfaces
// process_participation_flag_updates(state)
// process_sync_committee_updates(state)
// process_proposer_lookahead(state)
// # [New in Gloas:EIP7732]
// process_ptc_window(state)
// </spec>
func processEpochGloas(ctx context.Context, state state.BeaconState) error {
_, span := trace.StartSpan(ctx, "gloas.ProcessEpoch")
@@ -224,5 +222,5 @@ func processEpochGloas(ctx context.Context, state state.BeaconState) error {
if err := fulu.ProcessProposerLookahead(ctx, state); err != nil {
return err
}
return gloas.ProcessPTCWindow(ctx, state)
return nil
}

View File

@@ -183,7 +183,7 @@ func newGloasForkBoundaryState(
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
PayloadExpectedWithdrawals: make([]*engine.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 0),
ProposerLookahead: make([]uint64, 0),
Builders: make([]*ethpb.Builder, 0),
}
for i := range protoState.BlockRoots {

View File

@@ -116,32 +116,17 @@ func CalculateStateRoot(
rollback state.BeaconState,
signed interfaces.ReadOnlySignedBeaconBlock,
) ([32]byte, error) {
st, err := CalculatePostState(ctx, rollback, signed)
if err != nil {
return [32]byte{}, err
}
return st.HashTreeRoot(ctx)
}
// CalculatePostState returns the post-block state after processing the given
// block on a copy of the input state. It is identical to CalculateStateRoot
// but returns the full state instead of just its hash tree root.
func CalculatePostState(
ctx context.Context,
rollback state.BeaconState,
signed interfaces.ReadOnlySignedBeaconBlock,
) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.CalculatePostState")
ctx, span := trace.StartSpan(ctx, "core.state.CalculateStateRoot")
defer span.End()
if ctx.Err() != nil {
tracing.AnnotateError(span, ctx.Err())
return nil, ctx.Err()
return [32]byte{}, ctx.Err()
}
if rollback == nil || rollback.IsNil() {
return nil, errors.New("nil state")
return [32]byte{}, errors.New("nil state")
}
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
return nil, errors.New("nil block")
return [32]byte{}, errors.New("nil block")
}
// Copy state to avoid mutating the state reference.
@@ -151,22 +136,22 @@ func CalculatePostState(
var err error
state, err = ProcessSlotsForBlock(ctx, state, signed.Block())
if err != nil {
return nil, errors.Wrap(err, "could not process slots")
return [32]byte{}, errors.Wrap(err, "could not process slots")
}
// Execute per block transition.
if features.Get().EnableProposerPreprocessing {
state, err = processBlockForProposing(ctx, state, signed)
if err != nil {
return nil, errors.Wrap(err, "could not process block for proposing")
return [32]byte{}, errors.Wrap(err, "could not process block for proposing")
}
} else {
state, err = ProcessBlockForStateRoot(ctx, state, signed)
if err != nil {
return nil, errors.Wrap(err, "could not process block")
return [32]byte{}, errors.Wrap(err, "could not process block")
}
}
return state, nil
return state.HashTreeRoot(ctx)
}
// processBlockVerifySigs processes the block and verifies the signatures within it. Block signatures are not verified as this block is not yet signed.

View File

@@ -204,7 +204,7 @@ func (s *LazilyPersistentStoreColumn) columnsNotStored(sidecars []blocks.RODataC
sum = s.store.Summary(sc.BlockRoot())
lastRoot = sc.BlockRoot()
}
if sum.HasIndex(sc.Index()) {
if sum.HasIndex(sc.Index) {
stored[i] = struct{}{}
}
}

View File

@@ -875,7 +875,7 @@ func TestColumnsNotStored(t *testing.T) {
if len(tc.stored) > 0 {
resultIndices := make(map[uint64]bool)
for _, col := range result {
resultIndices[col.Index()] = true
resultIndices[col.Index] = true
}
for _, storedIdx := range tc.stored {
require.Equal(t, false, resultIndices[storedIdx],
@@ -887,8 +887,8 @@ func TestColumnsNotStored(t *testing.T) {
if len(tc.expected) > 0 && len(tc.stored) == 0 {
// Only check exact order for non-stored cases (where we know they stay in same order)
for i, expectedIdx := range tc.expected {
require.Equal(t, columns[expectedIdx].Index(), result[i].Index(),
fmt.Sprintf("column %d: expected index %d, got %d", i, columns[expectedIdx].Index(), result[i].Index()))
require.Equal(t, columns[expectedIdx].Index, result[i].Index,
fmt.Sprintf("column %d: expected index %d, got %d", i, columns[expectedIdx].Index, result[i].Index))
}
}

View File

@@ -66,11 +66,10 @@ type dataColumnCacheEntry struct {
// stash will return an error if the given data column Index is out of bounds.
// It will overwrite any existing entry for the same index.
func (e *dataColumnCacheEntry) stash(sc blocks.RODataColumn) error {
index := sc.Index()
if index >= fieldparams.NumberOfColumns {
return errors.Wrapf(errColumnIndexTooHigh, "index=%d", index)
if sc.Index >= fieldparams.NumberOfColumns {
return errors.Wrapf(errColumnIndexTooHigh, "index=%d", sc.Index)
}
e.scs[index] = sc
e.scs[sc.Index] = sc
return nil
}

View File

@@ -23,7 +23,7 @@ func TestEnsureDeleteSetDiskSummary(t *testing.T) {
expect, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
require.NoError(t, entry.stash(expect[0]))
require.Equal(t, 1, len(entry.scs))
cols, err := nonDupe.append([]blocks.RODataColumn{}, peerdas.NewColumnIndicesFromSlice([]uint64{expect[0].Index()}))
cols, err := nonDupe.append([]blocks.RODataColumn{}, peerdas.NewColumnIndicesFromSlice([]uint64{expect[0].Index}))
require.NoError(t, err)
require.DeepEqual(t, expect[0], cols[0])
@@ -109,10 +109,10 @@ func TestAppendDataColumns(t *testing.T) {
require.NoError(t, err)
require.Equal(t, len(expected), len(actual))
slices.SortFunc(actual, func(i, j blocks.RODataColumn) int {
return int(i.Index()) - int(j.Index())
return int(i.Index) - int(j.Index)
})
for i := range expected {
require.Equal(t, expected[i].Index(), actual[i].Index())
require.Equal(t, expected[i].Index, actual[i].Index)
}
require.Equal(t, 1, len(original))
})

View File

@@ -369,7 +369,7 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
// Group data column sidecars by root.
for _, dataColumnSidecar := range dataColumnSidecars {
// Check if the data column index is too large.
if dataColumnSidecar.Index() >= mandatoryNumberOfColumns {
if dataColumnSidecar.Index >= mandatoryNumberOfColumns {
return errDataColumnIndexTooLarge
}
@@ -396,7 +396,7 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
// Get all indices.
indices := make([]uint64, 0, len(dataColumnSidecars))
for _, dataColumnSidecar := range dataColumnSidecars {
indices = append(indices, dataColumnSidecar.Index())
indices = append(indices, dataColumnSidecar.Index)
}
// Compute the data columns ident.
@@ -546,7 +546,7 @@ func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []u
return nil, errors.Wrap(err, "seek")
}
verifiedRODataColumn, err := verification.VerifiedRODataColumnFromDisk(file, root, metadata.sszEncodedDataColumnSidecarSize, summary.epoch)
verifiedRODataColumn, err := verification.VerifiedRODataColumnFromDisk(file, root, metadata.sszEncodedDataColumnSidecarSize)
if err != nil {
return nil, errors.Wrap(err, "verified RO data column from disk")
}
@@ -733,7 +733,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
for _, dataColumnSidecar := range dataColumnSidecars {
// Extract the data columns index.
dataColumnIndex := dataColumnSidecar.Index()
dataColumnIndex := dataColumnSidecar.Index
ok, _, err := metadata.indices.get(dataColumnIndex)
if err != nil {
@@ -830,7 +830,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
for _, dataColumnSidecar := range dataColumnSidecars {
// Extract the data column index.
dataColumnIndex := dataColumnSidecar.Index()
dataColumnIndex := dataColumnSidecar.Index
// Skip if the data column is already stored.
ok, _, err := indices.get(dataColumnIndex)

View File

@@ -112,7 +112,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
alteredVerifiedRoDataColumnSidecars = append(alteredVerifiedRoDataColumnSidecars, verifiedRoDataColumnSidecars[0])
altered, err := blocks.NewRODataColumnWithRoot(
verifiedRoDataColumnSidecars[1].RODataColumn.DataColumnSidecar(),
verifiedRoDataColumnSidecars[1].RODataColumn.DataColumnSidecar,
verifiedRoDataColumnSidecars[0].BlockRoot(),
)
require.NoError(t, err)
@@ -263,7 +263,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
)
// Build expected bytes.
firstSszEncodedDataColumnSidecar, err := expectedDataColumnSidecars[0].RODataColumn.DataColumnSidecar().MarshalSSZ()
firstSszEncodedDataColumnSidecar, err := expectedDataColumnSidecars[0].MarshalSSZ()
require.NoError(t, err)
dataColumnSidecarsCount := len(expectedDataColumnSidecars)
@@ -272,7 +272,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
sszEncodedDataColumnSidecars := make([]byte, 0, dataColumnSidecarsCount*sszEncodedDataColumnSidecarSize)
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, firstSszEncodedDataColumnSidecar...)
for _, dataColumnSidecar := range expectedDataColumnSidecars[1:] {
sszEncodedDataColumnSidecar, err := dataColumnSidecar.RODataColumn.DataColumnSidecar().MarshalSSZ()
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
require.NoError(t, err)
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
}
@@ -362,17 +362,11 @@ func TestGetDataColumnSidecars(t *testing.T) {
verifiedRODataColumnSidecars, err := dataColumnStorage.Get(root, nil)
require.NoError(t, err)
require.Equal(t, len(expectedVerifiedRoDataColumnSidecars), len(verifiedRODataColumnSidecars))
for i := range expectedVerifiedRoDataColumnSidecars {
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars[i].DataColumnSidecar(), verifiedRODataColumnSidecars[i].DataColumnSidecar())
}
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
verifiedRODataColumnSidecars, err = dataColumnStorage.Get(root, []uint64{12, 13, 14})
require.NoError(t, err)
require.Equal(t, len(expectedVerifiedRoDataColumnSidecars), len(verifiedRODataColumnSidecars))
for i := range expectedVerifiedRoDataColumnSidecars {
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars[i].DataColumnSidecar(), verifiedRODataColumnSidecars[i].DataColumnSidecar())
}
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
})
}

View File

@@ -60,10 +60,6 @@ var (
GetPayloadMethodV5,
GetBlobsV2,
}
gloasEngineEndpoints = []string{
NewPayloadMethodV5,
}
)
// ClientVersionV1 represents the response from engine_getClientVersionV1.
@@ -84,8 +80,6 @@ const (
NewPayloadMethodV3 = "engine_newPayloadV3"
// NewPayloadMethodV4 is the engine_newPayloadVX method added at Electra.
NewPayloadMethodV4 = "engine_newPayloadV4"
// NewPayloadMethodV5 is the engine_newPayloadVX method added at Gloas.
NewPayloadMethodV5 = "engine_newPayloadV5"
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
@@ -154,7 +148,7 @@ type Reconstructor interface {
// EngineCaller defines a client that can interact with an Ethereum
// execution node's engine service via JSON-RPC.
type EngineCaller interface {
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests, slot primitives.Slot) ([]byte, error)
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error)
ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
) (*pb.PayloadIDBytes, []byte, error)
@@ -167,7 +161,7 @@ type EngineCaller interface {
var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
// NewPayload request calls the engine_newPayloadVX method via JSON-RPC.
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests, slot primitives.Slot) ([]byte, error) {
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
defer span.End()
defer func(start time.Time) {
@@ -201,11 +195,7 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
if err != nil {
return nil, errors.Wrap(err, "failed to encode execution requests")
}
method := NewPayloadMethodV4
if slots.ToEpoch(slot) >= params.BeaconConfig().GloasForkEpoch {
method = NewPayloadMethodV5
}
err = s.rpcClient.CallContext(ctx, result, method, payloadPb, versionedHashes, parentBlockRoot, flattenedRequests)
err = s.rpcClient.CallContext(ctx, result, NewPayloadMethodV4, payloadPb, versionedHashes, parentBlockRoot, flattenedRequests)
if err != nil {
return nil, handleRPCError(err)
}
@@ -271,7 +261,7 @@ func (s *Service) ForkchoiceUpdated(
if err != nil {
return nil, nil, handleRPCError(err)
}
case version.Deneb, version.Electra, version.Fulu, version.Gloas:
case version.Deneb, version.Electra, version.Fulu:
a, err := attrs.PbV3()
if err != nil {
return nil, nil, err
@@ -305,7 +295,7 @@ func (s *Service) ForkchoiceUpdated(
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
epoch := slots.ToEpoch(slot)
if epoch >= params.BeaconConfig().GloasForkEpoch || epoch >= params.BeaconConfig().FuluForkEpoch {
if epoch >= params.BeaconConfig().FuluForkEpoch {
return GetPayloadMethodV5, &pb.ExecutionBundleFulu{}
}
if epoch >= params.BeaconConfig().ElectraForkEpoch {
@@ -357,10 +347,6 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
supportedEngineEndpoints = append(supportedEngineEndpoints, fuluEngineEndpoints...)
}
if params.GloasEnabled() {
supportedEngineEndpoints = append(supportedEngineEndpoints, gloasEngineEndpoints...)
}
elSupportedEndpointsSlice := make([]string, len(supportedEngineEndpoints))
if err := s.rpcClient.CallContext(ctx, &elSupportedEndpointsSlice, ExchangeCapabilities, supportedEngineEndpoints); err != nil {
return nil, handleRPCError(err)

View File

@@ -129,7 +129,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayload(req)
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.NoError(t, err)
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
})
@@ -140,7 +140,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req)
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.NoError(t, err)
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
})
@@ -605,7 +605,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -619,7 +619,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -633,7 +633,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -672,7 +672,7 @@ func TestClient_HTTP(t *testing.T) {
},
}
client := newPayloadV4Setup(t, want, execPayload, requests)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -686,7 +686,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -700,7 +700,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -714,7 +714,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -753,7 +753,7 @@ func TestClient_HTTP(t *testing.T) {
},
}
client := newPayloadV4Setup(t, want, execPayload, requests)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -767,7 +767,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -781,7 +781,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -795,7 +795,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -833,7 +833,7 @@ func TestClient_HTTP(t *testing.T) {
},
}
client := newPayloadV4Setup(t, want, execPayload, requests)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -847,7 +847,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -861,7 +861,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -875,7 +875,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -914,7 +914,7 @@ func TestClient_HTTP(t *testing.T) {
},
}
client := newPayloadV4Setup(t, want, execPayload, requests)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -928,7 +928,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, 0)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
require.ErrorIs(t, err, ErrUnknownPayloadStatus)
require.DeepEqual(t, []uint8(nil), resp)
})

View File

@@ -49,7 +49,7 @@ type EngineClient struct {
}
// NewPayload --
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ []common.Hash, _ *common.Hash, _ *pb.ExecutionRequests, _ primitives.Slot) ([]byte, error) {
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ []common.Hash, _ *common.Hash, _ *pb.ExecutionRequests) ([]byte, error) {
return e.NewPayloadResp, e.ErrNewPayload
}

View File

@@ -325,20 +325,15 @@ func (f *ForkChoice) updateBalances() error {
if pn != nil && vote.currentRoot != zHash {
if pending {
if pn.node.balance < oldBalance {
if pn.node.slot == 0 {
log.WithField("nodeRoot", fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:]))).
Debug("Genesis node pending balance underflow, clamping to zero")
} else {
log.WithFields(logrus.Fields{
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
"oldBalance": oldBalance,
"nodeBalance": pn.node.balance,
"nodeWeight": pn.node.weight,
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.proposerBoostRoot[:])),
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
"previousProposerBoostScore": f.store.previousProposerBoostScore,
}).Warning("node with invalid balance, setting it to zero")
}
log.WithFields(logrus.Fields{
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
"oldBalance": oldBalance,
"nodeBalance": pn.node.balance,
"nodeWeight": pn.node.weight,
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.proposerBoostRoot[:])),
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
"previousProposerBoostScore": f.store.previousProposerBoostScore,
}).Warning("node with invalid balance, setting it to zero")
pn.node.balance = 0
} else {
pn.node.balance -= oldBalance
@@ -383,12 +378,9 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
// SetOptimisticToValid sets the node with the given root as a fully validated node. The payload for this root MUST have been processed.
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
fn := f.store.fullNodeByRoot[root]
if fn == nil {
fn = f.store.emptyNodeByRoot[root]
if fn == nil {
return errors.Wrapf(ErrNilNode, "could not set node to valid, no node found for root: %#x", bytesutil.Trunc(root[:]))
}
fn, ok := f.store.fullNodeByRoot[root]
if !ok || fn == nil {
return errors.Wrap(ErrNilNode, "could not set node to valid")
}
return f.store.setNodeAndParentValidated(ctx, fn)
}
@@ -542,19 +534,6 @@ func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.B
bcp.JustifiedCheckpoint.Epoch, bcp.FinalizedCheckpoint.Epoch); err != nil {
return err
}
if bcp.HasPayload {
root := bcp.Block.Root()
en := f.store.emptyNodeByRoot[root]
if en != nil && f.store.fullNodeByRoot[root] == nil {
f.store.fullNodeByRoot[root] = &PayloadNode{
node: en.node,
optimistic: true,
timestamp: time.Now(),
full: true,
children: make([]*Node, 0),
}
}
}
if err := f.updateCheckpoints(ctx, bcp.JustifiedCheckpoint, bcp.FinalizedCheckpoint); err != nil {
return err
}

View File

@@ -372,28 +372,6 @@ func (s *Store) nodeTreeDump(ctx context.Context, n *Node, nodes []*forkchoice2.
return nodes, nil
}
// MarkFullNode creates a full payload node for an existing empty node at the
// given beacon block root. This is used during forkchoice tree reconstruction on
// startup to mark blocks whose execution payload was delivered. The caller must
// hold the forkchoice write lock.
func (f *ForkChoice) MarkFullNode(root [32]byte) {
s := f.store
en := s.emptyNodeByRoot[root]
if en == nil {
return
}
if _, ok := s.fullNodeByRoot[root]; ok {
return
}
s.fullNodeByRoot[root] = &PayloadNode{
node: en.node,
optimistic: true,
timestamp: time.Now(),
full: true,
children: make([]*Node, 0),
}
}
// InsertPayload inserts a full node into forkchoice after the Gloas fork.
func (f *ForkChoice) InsertPayload(pe interfaces.ROExecutionPayloadEnvelope) error {
if pe.IsNil() {

View File

@@ -79,7 +79,7 @@ func prepareGloasForkchoiceState(
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 64),
ProposerLookahead: make([]uint64, 64),
}
st, err := state_native.InitializeFromProtoUnsafeGloas(base)

View File

@@ -112,5 +112,4 @@ type Setter interface {
SetBalancesByRooter(BalancesByRooter)
InsertSlashedIndex(context.Context, primitives.ValidatorIndex)
SetPTCVote(root [32]byte, ptcIdx uint64, payloadPresent, blobDataAvailable bool)
MarkFullNode(root [32]byte)
}

View File

@@ -20,5 +20,4 @@ type BlockAndCheckpoints struct {
Block consensus_blocks.ROBlock
JustifiedCheckpoint *ethpb.Checkpoint
FinalizedCheckpoint *ethpb.Checkpoint
HasPayload bool
}

View File

@@ -775,7 +775,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithBlobStorage(b.BlobStorage),
blockchain.WithDataColumnStorage(b.DataColumnStorage),
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
blockchain.WithProposerPreferencesCache(b.proposerPreferencesCache),
blockchain.WithPayloadIDCache(b.payloadIDCache),
blockchain.WithSyncChecker(b.syncChecker),
blockchain.WithSlasherEnabled(b.slasherEnabled),

View File

@@ -7,7 +7,6 @@ go_library(
"block.go",
"kv.go",
"log.go",
"metrics.go",
"seen_bits.go",
"unaggregated.go",
],
@@ -25,8 +24,6 @@ go_library(
"//runtime/version:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],

View File

@@ -2,7 +2,6 @@ package kv
import (
"context"
"fmt"
"runtime"
"sync"
@@ -274,29 +273,18 @@ func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
filtered := make([]ethpb.Att, 0)
for _, a := range attList {
contains, err := att.GetAggregationBits().Contains(a.GetAggregationBits())
if err != nil {
return fmt.Errorf("aggregation bits contain: %w", err)
if c, err := att.GetAggregationBits().Contains(a.GetAggregationBits()); err != nil {
return err
} else if !c {
filtered = append(filtered, a)
}
if contains {
if err := c.insertSeenAggregatedAtt(a); err != nil {
return fmt.Errorf("insert aggregated att: %w", err)
}
continue
}
// If the attestation in the cache doesn't contain the bits of the attestation to delete, we keep it in the cache.
filtered = append(filtered, a)
}
if len(filtered) == 0 {
delete(c.aggregatedAtt, id)
return nil
} else {
c.aggregatedAtt[id] = filtered
}
c.aggregatedAtt[id] = filtered
return nil
}
@@ -306,118 +294,32 @@ func (c *AttCaches) HasAggregatedAttestation(att ethpb.Att) (bool, error) {
return false, err
}
has, err := c.hasAggregatedAtt(att)
if err != nil {
return false, fmt.Errorf("has aggregated att: %w", err)
}
if has {
return true, nil
}
has, err = c.hasBlockAtt(att)
if err != nil {
return false, fmt.Errorf("has block att: %w", err)
}
if has {
return true, nil
}
has, err = c.hasSeenAggregatedAtt(att)
if err != nil {
return false, fmt.Errorf("has seen aggregated att: %w", err)
}
if has {
savedBySeenAggregatedCache.Inc()
return true, nil
}
return false, nil
}
// hasAggregatedAtt checks if the attestation bits are contained in the aggregated attestation cache.
func (c *AttCaches) hasAggregatedAtt(att ethpb.Att) (bool, error) {
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return false, fmt.Errorf("could not create attestation ID: %w", err)
return false, errors.Wrap(err, "could not create attestation ID")
}
c.aggregatedAttLock.RLock()
defer c.aggregatedAttLock.RUnlock()
cacheAtts, ok := c.aggregatedAtt[id]
if !ok {
return false, nil
}
for _, cacheAtt := range cacheAtts {
contains, err := cacheAtt.GetAggregationBits().Contains(att.GetAggregationBits())
if err != nil {
return false, fmt.Errorf("aggregation bits contains: %w", err)
if atts, ok := c.aggregatedAtt[id]; ok {
for _, a := range atts {
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
return false, err
} else if c {
return true, nil
}
}
if contains {
return true, nil
}
}
return false, nil
}
// hasBlockAtt checks if the attestation bits are contained in the block attestation cache.
func (c *AttCaches) hasBlockAtt(att ethpb.Att) (bool, error) {
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return false, fmt.Errorf("could not create attestation ID: %w", err)
}
c.blockAttLock.RLock()
defer c.blockAttLock.RUnlock()
cacheAtts, ok := c.blockAtt[id]
if !ok {
return false, nil
}
for _, cacheAtt := range cacheAtts {
contains, err := cacheAtt.GetAggregationBits().Contains(att.GetAggregationBits())
if err != nil {
return false, fmt.Errorf("aggregation bits contains: %w", err)
}
if contains {
return true, nil
}
}
return false, nil
}
// hasSeenAggregatedAtt checks if the attestation bits are contained in the seen aggregated cache.
func (c *AttCaches) hasSeenAggregatedAtt(att ethpb.Att) (bool, error) {
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return false, fmt.Errorf("could not create attestation ID: %w", err)
}
c.seenAggregatedAttLock.RLock()
defer c.seenAggregatedAttLock.RUnlock()
cacheAtts, ok := c.seenAggregatedAtt[id]
if !ok {
return false, nil
}
for _, cacheAtt := range cacheAtts {
contains, err := cacheAtt.GetAggregationBits().Contains(att.GetAggregationBits())
if err != nil {
return false, fmt.Errorf("aggregation bits contains: %w", err)
}
if contains {
return true, nil
if atts, ok := c.blockAtt[id]; ok {
for _, a := range atts {
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
return false, err
} else if c {
return true, nil
}
}
}
@@ -430,58 +332,3 @@ func (c *AttCaches) AggregatedAttestationCount() int {
defer c.aggregatedAttLock.RUnlock()
return len(c.aggregatedAtt)
}
// insertSeenAggregatedAtt inserts an attestation into the seen aggregated cache.
func (c *AttCaches) insertSeenAggregatedAtt(att ethpb.Att) error {
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return fmt.Errorf("new ID: %w", err)
}
c.seenAggregatedAttLock.Lock()
defer c.seenAggregatedAttLock.Unlock()
cacheAtts, ok := c.seenAggregatedAtt[id]
if !ok {
c.seenAggregatedAtt[id] = []ethpb.Att{att.Clone()}
return nil
}
// Check if attestation is already contained
for _, cacheAtt := range cacheAtts {
contains, err := cacheAtt.GetAggregationBits().Contains(att.GetAggregationBits())
if err != nil {
return fmt.Errorf("aggregation bits contains: %w", err)
}
if contains {
return nil
}
}
c.seenAggregatedAtt[id] = append(cacheAtts, att.Clone())
return nil
}
// SeenAggregatedAttestationCount returns the number of keys in the seen aggregated cache.
func (c *AttCaches) SeenAggregatedAttestationCount() int {
c.seenAggregatedAttLock.RLock()
defer c.seenAggregatedAttLock.RUnlock()
return len(c.seenAggregatedAtt)
}
// DeleteSeenAggregatedAttestationsBefore deletes all attestations from the seen cache
// with a slot less than the provided slot.
func (c *AttCaches) DeleteSeenAggregatedAttestationsBefore(expirySlot primitives.Slot) {
c.seenAggregatedAttLock.Lock()
defer c.seenAggregatedAttLock.Unlock()
// The attestation ID contains the slot, so all attestations under the same ID
// share the same slot. We only need to check the first attestation's slot
// to determine whether to delete the entire entry.
for id, atts := range c.seenAggregatedAtt {
if len(atts) == 0 || atts[0].GetData().Slot < expirySlot {
delete(c.seenAggregatedAtt, id)
}
}
}

View File

@@ -544,209 +544,3 @@ func TestKV_Aggregated_AggregatedAttestationsBySlotIndexElectra(t *testing.T) {
returned = cache.AggregatedAttestationsBySlotIndexElectra(ctx, 2, 1)
assert.DeepEqual(t, []*ethpb.AttestationElectra{att3}, returned)
}
func TestKV_SeenAggregated_Cache(t *testing.T) {
t.Run("insert on delete from aggregated cache", func(t *testing.T) {
cache := NewAttCaches()
att1 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
att2 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
// Save attestations
require.NoError(t, cache.SaveAggregatedAttestation(att1))
require.NoError(t, cache.SaveAggregatedAttestation(att2))
// Seen aggregated cache should be empty before deletion
assert.Equal(t, 0, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should be empty before deletion")
// Delete one attestation
require.NoError(t, cache.DeleteAggregatedAttestation(att1))
// Seen aggregated cache should now contain the deleted attestation
assert.Equal(t, 1, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should have one entry after deletion")
// The deleted attestation should be found via HasAggregatedAttestation (through seen aggregated cache)
has, err := cache.HasAggregatedAttestation(att1)
require.NoError(t, err)
assert.Equal(t, true, has, "Deleted attestation should be found in seen aggregated cache")
})
t.Run("has aggregated attestation via seen aggregated cache", func(t *testing.T) {
cache := NewAttCaches()
att := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
// Save and delete attestation to populate seen aggregated cache
require.NoError(t, cache.SaveAggregatedAttestation(att))
require.NoError(t, cache.DeleteAggregatedAttestation(att))
// Attestation should not be in aggregated cache
assert.Equal(t, 0, cache.AggregatedAttestationCount(), "Aggregated cache should be empty")
// But should still be found via HasAggregatedAttestation (through seen aggregated cache)
has, err := cache.HasAggregatedAttestation(att)
require.NoError(t, err)
assert.Equal(t, true, has, "Attestation should be found in seen aggregated cache")
// Subset of bits should also be found
attSubset := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1100}})
has, err = cache.HasAggregatedAttestation(attSubset)
require.NoError(t, err)
assert.Equal(t, true, has, "Subset attestation should be found in seen aggregated cache")
})
t.Run("delete from seen aggregated cache", func(t *testing.T) {
cache := NewAttCaches()
att1 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
att2 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
// Save and delete attestations to populate seen aggregated cache
require.NoError(t, cache.SaveAggregatedAttestation(att1))
require.NoError(t, cache.SaveAggregatedAttestation(att2))
require.NoError(t, cache.DeleteAggregatedAttestation(att1))
require.NoError(t, cache.DeleteAggregatedAttestation(att2))
assert.Equal(t, 2, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should have two entries")
// Delete attestations before slot 2 from seen aggregated cache
cache.DeleteSeenAggregatedAttestationsBefore(2)
assert.Equal(t, 1, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should have one entry after deletion")
// att1 should no longer be found (slot 1 < 2)
has, err := cache.HasAggregatedAttestation(att1)
require.NoError(t, err)
assert.Equal(t, false, has, "Deleted seen aggregated attestation should not be found")
// att2 should still be found (slot 2 >= 2)
has, err = cache.HasAggregatedAttestation(att2)
require.NoError(t, err)
assert.Equal(t, true, has, "Non-deleted seen aggregated attestation should still be found")
})
t.Run("insert on delete from block cache", func(t *testing.T) {
cache := NewAttCaches()
att := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
// Save as block attestation
require.NoError(t, cache.SaveBlockAttestation(att))
// Seen aggregated cache should be empty before deletion
assert.Equal(t, 0, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should be empty before deletion")
// Delete block attestation
require.NoError(t, cache.DeleteBlockAttestation(att))
// Seen aggregated cache should now contain the deleted attestation
assert.Equal(t, 1, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should have one entry after block attestation deletion")
// The deleted attestation should be found via HasAggregatedAttestation (through seen aggregated cache)
has, err := cache.HasAggregatedAttestation(att)
require.NoError(t, err)
assert.Equal(t, true, has, "Deleted block attestation should be found in seen aggregated cache")
})
t.Run("no duplicates in seen aggregated cache", func(t *testing.T) {
cache := NewAttCaches()
att := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
// Save and delete the same attestation multiple times
require.NoError(t, cache.SaveAggregatedAttestation(att))
require.NoError(t, cache.DeleteAggregatedAttestation(att))
require.NoError(t, cache.SaveAggregatedAttestation(att))
require.NoError(t, cache.DeleteAggregatedAttestation(att))
// Seen aggregated cache should only have one entry (no duplicates)
assert.Equal(t, 1, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should not have duplicates")
})
t.Run("multiple attestations with different bits for same data", func(t *testing.T) {
cache := NewAttCaches()
// Create attestations with the same data but non-overlapping aggregation bits
att1 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b10011}})
att2 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11100}})
// Directly insert into seen aggregated cache to test the append path
require.NoError(t, cache.insertSeenAggregatedAtt(att1))
require.NoError(t, cache.insertSeenAggregatedAtt(att2))
// Seen aggregated cache should have one key with two attestations (since bits don't overlap)
assert.Equal(t, 1, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should have one key")
// Both should be found
has, err := cache.HasAggregatedAttestation(att1)
require.NoError(t, err)
assert.Equal(t, true, has, "First attestation should be found in seen aggregated cache")
has, err = cache.HasAggregatedAttestation(att2)
require.NoError(t, err)
assert.Equal(t, true, has, "Second attestation should be found in seen aggregated cache")
// A subset of att1 should be found
attSubset := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b10001}})
has, err = cache.HasAggregatedAttestation(attSubset)
require.NoError(t, err)
assert.Equal(t, true, has, "Subset of first attestation should be found in seen aggregated cache")
// An attestation with bits not contained in any cached attestation should not be found
attNotContained := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11111}})
has, err = cache.HasAggregatedAttestation(attNotContained)
require.NoError(t, err)
assert.Equal(t, false, has, "Attestation with bits not contained in cache should not be found")
})
t.Run("insert subset attestation into seen aggregated cache", func(t *testing.T) {
cache := NewAttCaches()
// Insert an attestation with some aggregation bits
att := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11111}})
require.NoError(t, cache.insertSeenAggregatedAtt(att))
assert.Equal(t, 1, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should have one key")
// Try to insert a subset attestation (bits are contained in the existing attestation)
attSubset := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b10011}})
require.NoError(t, cache.insertSeenAggregatedAtt(attSubset))
// Cache should still have only one key (subset was not added)
assert.Equal(t, 1, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should still have one key after inserting subset")
// The subset should still be found via HasAggregatedAttestation (because original contains it)
has, err := cache.HasAggregatedAttestation(attSubset)
require.NoError(t, err)
assert.Equal(t, true, has, "Subset attestation should be found in seen aggregated cache")
})
t.Run("delete before slot from seen aggregated cache with same key", func(t *testing.T) {
cache := NewAttCaches()
// Create attestations with the same data but different slots
att1 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b10011}})
att2 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11100}})
// Insert both into seen aggregated cache
require.NoError(t, cache.insertSeenAggregatedAtt(att1))
require.NoError(t, cache.insertSeenAggregatedAtt(att2))
// Verify both are in the cache (different keys due to different slots)
assert.Equal(t, 2, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should have two keys")
// Delete attestations before slot 2 from seen aggregated cache
cache.DeleteSeenAggregatedAttestationsBefore(2)
// Only one key should remain
assert.Equal(t, 1, cache.SeenAggregatedAttestationCount(), "seen aggregated cache should have one key")
// att1 should no longer be found (slot 1 < 2)
has, err := cache.HasAggregatedAttestation(att1)
require.NoError(t, err)
assert.Equal(t, false, has, "Deleted attestation should not be found")
// att2 should still be found (slot 2 >= 2)
has, err = cache.HasAggregatedAttestation(att2)
require.NoError(t, err)
assert.Equal(t, true, has, "Remaining attestation should still be found")
})
}

View File

@@ -1,8 +1,6 @@
package kv
import (
"fmt"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/attestation"
"github.com/pkg/errors"
@@ -65,16 +63,6 @@ func (c *AttCaches) DeleteBlockAttestation(att ethpb.Att) error {
c.blockAttLock.Lock()
defer c.blockAttLock.Unlock()
// Insert all attestations into the seen aggregated cache before deleting
if cacheAtts, ok := c.blockAtt[id]; ok {
for _, cacheAtt := range cacheAtts {
if err := c.insertSeenAggregatedAtt(cacheAtt); err != nil {
return fmt.Errorf("insert seen aggregated att: %w", err)
}
}
}
delete(c.blockAtt, id)
return nil

View File

@@ -18,16 +18,14 @@ import (
// These caches are KV store for various attestations
// such are unaggregated, aggregated or attestations within a block.
type AttCaches struct {
aggregatedAttLock sync.RWMutex
aggregatedAtt map[attestation.Id][]ethpb.Att
unAggregateAttLock sync.RWMutex
unAggregatedAtt map[attestation.Id]ethpb.Att
forkchoiceAtt *attmap.Attestations
blockAttLock sync.RWMutex
blockAtt map[attestation.Id][]ethpb.Att
seenAtt *cache.Cache
seenAggregatedAttLock sync.RWMutex
seenAggregatedAtt map[attestation.Id][]ethpb.Att
aggregatedAttLock sync.RWMutex
aggregatedAtt map[attestation.Id][]ethpb.Att
unAggregateAttLock sync.RWMutex
unAggregatedAtt map[attestation.Id]ethpb.Att
forkchoiceAtt *attmap.Attestations
blockAttLock sync.RWMutex
blockAtt map[attestation.Id][]ethpb.Att
seenAtt *cache.Cache
}
// NewAttCaches initializes a new attestation pool consists of multiple KV store in cache for
@@ -36,12 +34,11 @@ func NewAttCaches() *AttCaches {
secsInEpoch := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
c := cache.New(2*secsInEpoch*time.Second, 2*secsInEpoch*time.Second)
pool := &AttCaches{
unAggregatedAtt: make(map[attestation.Id]ethpb.Att),
aggregatedAtt: make(map[attestation.Id][]ethpb.Att),
forkchoiceAtt: attmap.New(),
blockAtt: make(map[attestation.Id][]ethpb.Att),
seenAtt: c,
seenAggregatedAtt: make(map[attestation.Id][]ethpb.Att),
unAggregatedAtt: make(map[attestation.Id]ethpb.Att),
aggregatedAtt: make(map[attestation.Id][]ethpb.Att),
forkchoiceAtt: attmap.New(),
blockAtt: make(map[attestation.Id][]ethpb.Att),
seenAtt: c,
}
return pool

View File

@@ -1,11 +0,0 @@
package kv
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var savedBySeenAggregatedCache = promauto.NewCounter(prometheus.CounterOpts{
Name: "attestation_saved_by_seen_aggregated_cache_total",
Help: "The number of times an attestation was found only in the seen aggregated cache and not in the regular caches.",
})

View File

@@ -30,12 +30,6 @@ var (
Name: "expired_block_atts_total",
Help: "The number of expired and deleted block attestations in the pool.",
})
seenAggregatedAttsCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "seen_aggregated_attestations_in_pool_total",
Help: "The number of aggregated attestations in the seen cache.",
},
)
attCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "attestations_in_pool_total",
@@ -65,7 +59,6 @@ var (
func (s *Service) updateMetrics() {
aggregatedAttsCount.Set(float64(s.cfg.Pool.AggregatedAttestationCount()))
unaggregatedAttsCount.Set(float64(s.cfg.Pool.UnaggregatedAttestationCount()))
seenAggregatedAttsCount.Set(float64(s.cfg.Pool.SeenAggregatedAttestationCount()))
}
func (s *Service) updateMetricsExperimental(numExpired uint64) {

View File

@@ -68,16 +68,6 @@ func (*PoolMock) AggregatedAttestationCount() int {
panic("implement me")
}
// DeleteSeenAggregatedAttestationsBefore --
func (*PoolMock) DeleteSeenAggregatedAttestationsBefore(_ primitives.Slot) {
panic("implement me")
}
// SeenAggregatedAttestationCount --
func (*PoolMock) SeenAggregatedAttestationCount() int {
panic("implement me")
}
// SaveUnaggregatedAttestation --
func (*PoolMock) SaveUnaggregatedAttestation(_ ethpb.Att) error {
panic("implement me")

View File

@@ -23,9 +23,6 @@ type Pool interface {
DeleteAggregatedAttestation(att ethpb.Att) error
HasAggregatedAttestation(att ethpb.Att) (bool, error)
AggregatedAttestationCount() int
// Seen aggregated attestations.
DeleteSeenAggregatedAttestationsBefore(expirySlot primitives.Slot)
SeenAggregatedAttestationCount() int
// For unaggregated attestations.
SaveUnaggregatedAttestation(att ethpb.Att) error
SaveUnaggregatedAttestations(atts []ethpb.Att) error

View File

@@ -81,14 +81,6 @@ func (s *Service) pruneExpiredAtts() {
expiredBlockAtts.Inc()
}
}
expirySlot, err := s.expirySlot()
if err != nil {
log.WithError(err).Error("Could not get expiry slot for seen aggregated attestations")
return
}
s.cfg.Pool.DeleteSeenAggregatedAttestationsBefore(expirySlot)
}
// Return true if the input slot has been expired.

View File

@@ -200,7 +200,6 @@ go_test(
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",

View File

@@ -64,31 +64,6 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
return s.broadcastObject(ctx, castMsg, fmt.Sprintf(topic, forkDigest))
}
// BroadcastForEpoch broadcasts a message using the fork digest for the given epoch.
// Use this when the target epoch's fork digest differs from the current one,
// e.g. broadcasting proposer preferences in the epoch before gloas activation.
func (s *Service) BroadcastForEpoch(ctx context.Context, msg proto.Message, epoch primitives.Epoch) error {
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastForEpoch")
defer span.End()
twoSlots := time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second
ctx, cancel := context.WithTimeout(ctx, twoSlots)
defer cancel()
forkDigest := params.ForkDigest(epoch)
topic, ok := GossipTypeMapping[reflect.TypeOf(msg)]
if !ok {
tracing.AnnotateError(span, ErrMessageNotMapped)
return ErrMessageNotMapped
}
castMsg, ok := msg.(ssz.Marshaler)
if !ok {
return errors.Errorf("message of %T does not support marshaller interface", msg)
}
return s.broadcastObject(ctx, castMsg, fmt.Sprintf(topic, forkDigest))
}
// BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
// broadcasted to the current fork.
func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att) error {
@@ -398,7 +373,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
topicFunc := func(sidecar blocks.VerifiedRODataColumn) (topic string, wrappedSubIdx uint64, subnet uint64) {
subnet = peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index())
subnet = peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
topic = dataColumnSubnetToTopic(subnet, forkDigest)
wrappedSubIdx = subnet + dataColumnSubnetVal
return
@@ -438,7 +413,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
topic, _, _ := topicFunc(sidecar)
if err := s.batchObject(ctx, &messageBatch, &sidecar, topic); err != nil {
if err := s.batchObject(ctx, &messageBatch, sidecar, topic); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot batch data column sidecar")
return
@@ -446,7 +421,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
if logLevel >= logrus.DebugLevel {
root := sidecar.BlockRoot()
timings.Store(rootAndIndex{root: root, index: sidecar.Index()}, time.Now())
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
}
})
}
@@ -468,7 +443,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
}
// Publish individually (not batched) since we just found peers.
if err := s.broadcastObject(ctx, &sidecar, topic); err != nil {
if err := s.broadcastObject(ctx, sidecar, topic); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot broadcast data column sidecar")
return
@@ -478,7 +453,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
if logLevel >= logrus.DebugLevel {
root := sidecar.BlockRoot()
timings.Store(rootAndIndex{root: root, index: sidecar.Index()}, time.Now())
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
}
})
}

View File

@@ -315,12 +315,9 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
}
}()
ps1Tracer := p2ptest.NewGossipTracer()
ps1, err := pubsub.NewGossipSub(t.Context(), hosts[0],
pubsub.WithMessageSigning(false),
pubsub.WithStrictSignatureVerification(false),
pubsub.WithRawTracer(ps1Tracer),
)
require.NoError(t, err)
@@ -372,17 +369,33 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
_, err = ps1Tracer.JoinAndWatchTopic(t.Context(), topic, p)
require.NoError(t, err)
// We don't use our internal subscribe method
// due to using floodsub over here.
tpHandle, err := p2.JoinTopic(topic)
require.NoError(t, err)
sub, err := tpHandle.Subscribe()
require.NoError(t, err)
// Block until gossipsub is ready to deliver a published message to p2.
require.NoError(t, ps1Tracer.CanPublishToPeer(t.Context(), topic, p2.PeerID()))
tpHandle, err = p.JoinTopic(topic)
require.NoError(t, err)
_, err = tpHandle.Subscribe()
require.NoError(t, err)
// This test specifically tests discovery-based peer finding, which requires
// time for nodes to discover each other. Using a fixed sleep here is intentional
// as we're testing the discovery timing behavior.
time.Sleep(500 * time.Millisecond)
// Verify mesh establishment after discovery
require.Eventually(t, func() bool {
return len(p.pubsub.ListPeers(topic)) > 0 && len(p2.pubsub.ListPeers(topic)) > 0
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
nodePeers := p.pubsub.ListPeers(topic)
nodePeers2 := p2.pubsub.ListPeers(topic)
assert.Equal(t, 1, len(nodePeers))
assert.Equal(t, 1, len(nodePeers2))
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
@@ -393,10 +406,10 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
defer cancel()
incomingMessage, err := sub.Next(ctx)
require.NoError(tt, err)
require.NoError(t, err)
result := &ethpb.Attestation{}
require.NoError(tt, p.Encoding().DecodeGossip(incomingMessage.Data, result))
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
if !proto.Equal(result, msg) {
tt.Errorf("Did not receive expected message, got %+v, wanted %+v", result, msg)
}
@@ -802,7 +815,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
var result ethpb.DataColumnSidecar
require.NoError(t, service.Encoding().DecodeGossip(msg.Data, &result))
require.DeepEqual(t, &result, verifiedRoSidecar.DataColumnSidecar())
require.DeepEqual(t, &result, verifiedRoSidecar)
}
type topicInvoked struct {

View File

@@ -186,6 +186,11 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
// Allow bootnode's table to have its initial refresh. This allows
// inbound nodes to be added in.
time.Sleep(5 * time.Second)
bootNode := bootListener.Self()
var listeners []*listenerWrapper
@@ -222,13 +227,15 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
}
}()
var nodes []*enode.Node
// Wait for the nodes to have their local routing tables to be populated with the other nodes
time.Sleep(discoveryWaitTime)
lastListener := listeners[len(listeners)-1]
require.Eventually(t, func() bool {
nodes = lastListener.Lookup(bootNode.ID())
return len(nodes) > 4
}, 10*time.Second, 100*time.Millisecond, fmt.Errorf("The node's local table doesn't have the expected number of nodes. "+
"Expected more than or equal to %d but got %d", 4, len(nodes)))
nodes := lastListener.Lookup(bootNode.ID())
if len(nodes) < 4 {
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
"Expected more than or equal to %d but got %d", 4, len(nodes))
}
}
func TestCreateLocalNode(t *testing.T) {

View File

@@ -52,9 +52,7 @@ const (
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
// our light client finality update topic.
lightClientFinalityUpdateWeight = 0.05
// signedProposerPreferencesWeight specifies the scoring weight that we apply to
// our signed proposer preferences topic.
signedProposerPreferencesWeight = 0.05
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
maxInMeshScore = 10
// maxFirstDeliveryScore describes the max score a peer can obtain from first deliveries.
@@ -153,9 +151,6 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
case strings.Contains(topic, GossipExecutionPayloadEnvelopeMessage):
// TODO: Revisit scoring params for execution payload envelope gossip.
return defaultBlockTopicParams(), nil
case strings.Contains(topic, GossipSignedProposerPreferencesMessage):
// TODO: Revisit scoring params for signed proposer preferences gossip.
return defaultBlockTopicParams(), nil
default:
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
}

View File

@@ -92,11 +92,6 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
return &ethpb.LightClientFinalityUpdateCapella{}
}
return gossipMessage(topic)
case DataColumnSubnetTopicFormat:
if epoch >= params.BeaconConfig().GloasForkEpoch {
return &ethpb.DataColumnSidecarGloas{}
}
return gossipMessage(topic)
default:
return gossipMessage(topic)
}
@@ -158,7 +153,6 @@ func init() {
GossipTypeMapping[reflect.TypeFor[*ethpb.SignedBeaconBlockFulu]()] = BlockSubnetTopicFormat
// Specially handle Gloas objects.
GossipTypeMapping[reflect.TypeFor[*ethpb.SignedBeaconBlockGloas]()] = BlockSubnetTopicFormat
GossipTypeMapping[reflect.TypeFor[*ethpb.DataColumnSidecarGloas]()] = DataColumnSubnetTopicFormat
// Payload attestation messages.
GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestationMessage]()] = PayloadAttestationMessageTopicFormat

View File

@@ -47,7 +47,6 @@ type (
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
Broadcaster interface {
Broadcast(context.Context, proto.Message) error
BroadcastForEpoch(context.Context, proto.Message, primitives.Epoch) error
BroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att) error
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error

View File

@@ -24,7 +24,6 @@ import (
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
"github.com/multiformats/go-multiaddr"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -36,12 +35,7 @@ func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP)
ipAddr := net.ParseIP("127.0.0.1")
listen, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
require.NoError(t, err, "Failed to p2p listen")
h, err := libp2p.New([]libp2p.Option{
privKeyOption(pkey),
libp2p.ListenAddrs(listen),
libp2p.Security(noise.ID, noise.New),
libp2p.Transport(libp2ptcp.NewTCPTransport, libp2ptcp.DisableReuseport()),
}...)
h, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.Security(noise.ID, noise.New)}...)
require.NoError(t, err)
return h, pkey, ipAddr
}

View File

@@ -5,8 +5,6 @@ go_library(
testonly = True,
srcs = [
"fuzz_p2p.go",
"gossiptracer.go",
"log.go",
"mock_broadcaster.go",
"mock_host.go",
"mock_listener.go",

View File

@@ -138,11 +138,6 @@ func (*FakeP2P) Disconnect(_ peer.ID) error {
return nil
}
// BroadcastForEpoch -- fake.
func (*FakeP2P) BroadcastForEpoch(_ context.Context, _ proto.Message, _ primitives.Epoch) error {
return nil
}
// Broadcast -- fake.
func (*FakeP2P) Broadcast(_ context.Context, _ proto.Message) error {
return nil

View File

@@ -1,298 +0,0 @@
package testing
import (
"context"
"errors"
"fmt"
"sync"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
type topicPeer struct {
topic string
peer peer.ID
}
// GossipTracer implements pubsub.RawTracer for use in tests. It allows callers
// to block until specific gossipsub-internal events have fired, which is useful
// for avoiding races between the various maps maintained by the pubsub event loop.
//
// Individual methods (RemovePeer, Prune, ValidateMessage, etc.) can be extended
// as needed by future tests.
type GossipTracer struct {
mu sync.Mutex
addPeerWaiters map[peer.ID]chan struct{}
addedPeers map[peer.ID]bool
joinedTopics map[string]bool
graftedPeers map[topicPeer]bool
graftWaiters map[topicPeer]chan struct{}
topicWaiters map[string]*topicEventWaiter
}
// NewGossipTracer returns a new tracer ready for use. Pass it to
// pubsub.NewGossipSub via pubsub.WithRawTracer(tracer).
func NewGossipTracer() *GossipTracer {
return &GossipTracer{
addPeerWaiters: make(map[peer.ID]chan struct{}),
addedPeers: make(map[peer.ID]bool),
joinedTopics: make(map[string]bool),
graftedPeers: make(map[topicPeer]bool),
graftWaiters: make(map[topicPeer]chan struct{}),
topicWaiters: make(map[string]*topicEventWaiter),
}
}
func (t *GossipTracer) waitForAddPeer(ctx context.Context, pid peer.ID) error {
t.mu.Lock()
if t.addedPeers[pid] {
t.mu.Unlock()
return nil
}
ch, ok := t.addPeerWaiters[pid]
if !ok {
ch = make(chan struct{})
t.addPeerWaiters[pid] = ch
}
t.mu.Unlock()
select {
case <-ch:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func (t *GossipTracer) waitForGraft(ctx context.Context, topic string, pid peer.ID) error {
key := topicPeer{topic: topic, peer: pid}
t.mu.Lock()
if t.graftedPeers[key] {
t.mu.Unlock()
return nil
}
ch, ok := t.graftWaiters[key]
if !ok {
ch = make(chan struct{})
t.graftWaiters[key] = ch
}
t.mu.Unlock()
select {
case <-ch:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func (t *GossipTracer) isSubscribed(topic string) bool {
t.mu.Lock()
defer t.mu.Unlock()
return t.joinedTopics[topic]
}
// CanPublishToPeer blocks until the gossipsub event loop is in a state where
// publishing a message on the given topic will successfully reach pid.
//
// The conditions depend on whether we have locally subscribed to the topic:
// - Subscribed (mesh path): waits until pid has been grafted into our mesh
// for the topic.
// - Not subscribed (fanout path): waits until both PeerJoin (pid is in
// p.topics[topic]) and AddPeer (pid is in p.peers with an rpcQueue) have
// fired.
//
// Note: You must call 'JoinAndWatchTopic' first before calling this method.
func (t *GossipTracer) CanPublishToPeer(ctx context.Context, topic string, pid peer.ID) error {
if t.isSubscribed(topic) {
return t.waitForGraft(ctx, topic, pid)
}
// Fanout path: need both PeerJoin and AddPeer.
w := t.getTopicWaiter(topic)
if w == nil {
return errors.New("topic waiter not found, please call JoinAndWatchTopic first")
}
if err := w.waitForPeerJoin(ctx, pid); err != nil {
return fmt.Errorf("wait for peer join: %w", err)
}
if err := t.waitForAddPeer(ctx, pid); err != nil {
return fmt.Errorf("wait for add peer: %w", err)
}
return nil
}
// topicEventWaiter tracks PeerJoin/PeerLeave events for a single topic.
type topicEventWaiter struct {
mu sync.Mutex
joined map[peer.ID]struct{}
waiters map[peer.ID]chan struct{}
}
type topicJoiner interface {
JoinTopic(topic string, opts ...pubsub.TopicOpt) (*pubsub.Topic, error)
}
func (t *GossipTracer) JoinAndWatchTopic(ctx context.Context, topic string, joiner topicJoiner) (*pubsub.Topic, error) {
topicHandle, err := joiner.JoinTopic(topic)
if err != nil {
return nil, fmt.Errorf("join topic: %w", err)
}
if err := t.watchTopic(ctx, topicHandle); err != nil {
return nil, fmt.Errorf("watch topic: %w", err)
}
return topicHandle, nil
}
func (t *GossipTracer) watchTopic(ctx context.Context, topicHandle *pubsub.Topic) error {
ev, err := topicHandle.EventHandler()
if err != nil {
return fmt.Errorf("event handler: %w", err)
}
w := &topicEventWaiter{
joined: make(map[peer.ID]struct{}),
waiters: make(map[peer.ID]chan struct{}),
}
// Register the waiter so CanPublishToPeer can find it.
t.mu.Lock()
defer t.mu.Unlock()
t.topicWaiters[topicHandle.String()] = w
go func() {
defer ev.Cancel()
for {
pe, err := ev.NextPeerEvent(ctx)
if err != nil {
if ctx.Err() == nil {
log.WithError(err).Debug("NextPeerEvent failed")
}
return
}
if pe.Type == pubsub.PeerJoin {
w.handlePeerJoin(pe.Peer)
}
}
}()
return nil
}
func (t *GossipTracer) getTopicWaiter(topic string) *topicEventWaiter {
t.mu.Lock()
defer t.mu.Unlock()
return t.topicWaiters[topic]
}
func (w *topicEventWaiter) handlePeerJoin(pid peer.ID) {
w.mu.Lock()
defer w.mu.Unlock()
w.joined[pid] = struct{}{}
if ch, ok := w.waiters[pid]; ok {
close(ch)
delete(w.waiters, pid)
}
}
func (w *topicEventWaiter) waitForPeerJoin(ctx context.Context, pid peer.ID) error {
w.mu.Lock()
if _, ok := w.joined[pid]; ok {
w.mu.Unlock()
return nil
}
ch, ok := w.waiters[pid]
if !ok {
ch = make(chan struct{})
w.waiters[pid] = ch
}
w.mu.Unlock()
select {
case <-ch:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// --- pubsub.RawTracer implementation ---
// AddPeer is invoked by the gossipsub event loop after a peer has been fully
// registered in p.peers (i.e., it has an rpcQueue and an outbound stream).
func (t *GossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
t.mu.Lock()
defer t.mu.Unlock()
t.addedPeers[p] = true
if ch, ok := t.addPeerWaiters[p]; ok {
close(ch)
delete(t.addPeerWaiters, p)
}
}
// RemovePeer can be extended by future tests to track peer removal.
func (t *GossipTracer) RemovePeer(p peer.ID) {}
// Join is invoked when we locally subscribe to a topic (a mesh is created).
func (t *GossipTracer) Join(topic string) {
t.mu.Lock()
defer t.mu.Unlock()
t.joinedTopics[topic] = true
}
// Leave is invoked when we unsubscribe from a topic (mesh is torn down).
func (t *GossipTracer) Leave(topic string) {
t.mu.Lock()
defer t.mu.Unlock()
delete(t.joinedTopics, topic)
}
// Graft is invoked when a peer is added to our mesh for a topic.
func (t *GossipTracer) Graft(p peer.ID, topic string) {
t.mu.Lock()
defer t.mu.Unlock()
key := topicPeer{topic: topic, peer: p}
t.graftedPeers[key] = true
if ch, ok := t.graftWaiters[key]; ok {
close(ch)
delete(t.graftWaiters, key)
}
}
// Prune can be extended by future tests to track mesh prunes.
func (t *GossipTracer) Prune(p peer.ID, topic string) {}
// ValidateMessage can be extended by future tests to track message validation.
func (t *GossipTracer) ValidateMessage(msg *pubsub.Message) {}
// DeliverMessage can be extended by future tests to track message delivery.
func (t *GossipTracer) DeliverMessage(msg *pubsub.Message) {}
// RejectMessage can be extended by future tests to track message rejection.
func (t *GossipTracer) RejectMessage(msg *pubsub.Message, reason string) {}
// DuplicateMessage can be extended by future tests to track duplicate messages.
func (t *GossipTracer) DuplicateMessage(msg *pubsub.Message) {}
// ThrottlePeer can be extended by future tests to track peer throttling.
func (t *GossipTracer) ThrottlePeer(p peer.ID) {}
// RecvRPC can be extended by future tests to track incoming RPCs.
func (t *GossipTracer) RecvRPC(rpc *pubsub.RPC) {}
// SendRPC can be extended by future tests to track outgoing RPCs.
func (t *GossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {}
// DropRPC can be extended by future tests to track dropped RPCs.
func (t *GossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {}
// UndeliverableMessage can be extended by future tests to track undeliverable messages.
func (t *GossipTracer) UndeliverableMessage(msg *pubsub.Message) {}

View File

@@ -1,5 +0,0 @@
package testing
import "github.com/sirupsen/logrus"
var log = logrus.WithField("package", "beacon-chain/p2p/testing")

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"google.golang.org/protobuf/proto"
)
@@ -16,7 +15,6 @@ import (
type MockBroadcaster struct {
BroadcastCalled atomic.Bool
BroadcastMessages []proto.Message
BroadcastEpochs []primitives.Epoch
BroadcastAttestations []ethpb.Att
msgLock sync.Mutex
attLock sync.Mutex
@@ -31,14 +29,6 @@ func (m *MockBroadcaster) Broadcast(_ context.Context, msg proto.Message) error
return nil
}
// BroadcastForEpoch records a broadcast occurred with the target epoch.
func (m *MockBroadcaster) BroadcastForEpoch(ctx context.Context, msg proto.Message, epoch primitives.Epoch) error {
m.msgLock.Lock()
m.BroadcastEpochs = append(m.BroadcastEpochs, epoch)
m.msgLock.Unlock()
return m.Broadcast(ctx, msg)
}
// BroadcastAttestation records a broadcast occurred.
func (m *MockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, a ethpb.Att) error {
m.BroadcastCalled.Store(true)

Some files were not shown because too many files have changed in this diff Show More