mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-04-19 03:01:06 -04:00
Compare commits
53 Commits
payloadCom
...
testing-e2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
32ef35c6ac | ||
|
|
bc211b9b33 | ||
|
|
9aa109d65a | ||
|
|
87debbc35e | ||
|
|
967d0cca4b | ||
|
|
474de75942 | ||
|
|
8c7f35dd8b | ||
|
|
a955f762eb | ||
|
|
4a0b986581 | ||
|
|
89909b18a9 | ||
|
|
6999943c3d | ||
|
|
90064edd54 | ||
|
|
393eb1e83c | ||
|
|
d2bcf75c50 | ||
|
|
89d3a6c66f | ||
|
|
bf2485eb71 | ||
|
|
a88f60f1fa | ||
|
|
33f899506f | ||
|
|
f7ead02e6e | ||
|
|
0abf17f6cd | ||
|
|
8307ee1098 | ||
|
|
7e4a039a7f | ||
|
|
d46621eea3 | ||
|
|
8b25ffaa45 | ||
|
|
17f1b78494 | ||
|
|
5874226067 | ||
|
|
544bc3eb45 | ||
|
|
de6d9947d7 | ||
|
|
e5382d95dd | ||
|
|
6d8445d440 | ||
|
|
7b16c34af3 | ||
|
|
91577760b6 | ||
|
|
9ced048510 | ||
|
|
0439151373 | ||
|
|
5f050566f4 | ||
|
|
8a43513f5a | ||
|
|
6c8504ef71 | ||
|
|
e99f86ecc5 | ||
|
|
0c36416677 | ||
|
|
f2db0faea8 | ||
|
|
e31f59e04e | ||
|
|
83a5210549 | ||
|
|
0aa4a08b7b | ||
|
|
9c5d4a1767 | ||
|
|
dd1ede572d | ||
|
|
ec7eb97d41 | ||
|
|
0c893bd1a6 | ||
|
|
9a7cae9e5c | ||
|
|
c114bc57d9 | ||
|
|
a9aea0ba25 | ||
|
|
9f2ade53ff | ||
|
|
5b19916067 | ||
|
|
e4cbb34c2f |
@@ -72,7 +72,6 @@ exceptions:
|
||||
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
|
||||
- GLOAS_FORK_EPOCH#gloas
|
||||
- GLOAS_FORK_VERSION#gloas
|
||||
- MAX_REQUEST_PAYLOADS#gloas
|
||||
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
|
||||
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
|
||||
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
|
||||
|
||||
16
.github/workflows/clang-format.yml
vendored
16
.github/workflows/clang-format.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Protobuf Format
|
||||
|
||||
on:
|
||||
on:
|
||||
push:
|
||||
branches: [ '*' ]
|
||||
pull_request:
|
||||
@@ -12,10 +12,14 @@ jobs:
|
||||
clang-format-checking:
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# Is this step failing for you?
|
||||
- uses: actions/checkout@v6
|
||||
# Is this step failing for you?
|
||||
# Run: clang-format -i proto/**/*.proto
|
||||
# See: https://clang.llvm.org/docs/ClangFormat.html
|
||||
- uses: RafikFarhad/clang-format-github-action@v3
|
||||
with:
|
||||
sources: "proto/**/*.proto"
|
||||
- name: Install clang-format
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -y clang-format
|
||||
- name: Check protobuf formatting
|
||||
run: |
|
||||
clang-format --style=LLVM --dry-run --Werror proto/**/*.proto
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -44,6 +44,3 @@ tmp
|
||||
|
||||
# spectest coverage reports
|
||||
report.txt
|
||||
|
||||
# execution client data
|
||||
execution/
|
||||
|
||||
@@ -92,7 +92,11 @@ func AcceptEncodingHeaderHandler() Middleware {
|
||||
return
|
||||
}
|
||||
|
||||
gz := gzip.NewWriter(w)
|
||||
gz, err := gzip.NewWriterLevel(w, gzip.BestSpeed)
|
||||
if err != nil {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
|
||||
defer func() {
|
||||
if !gzipRW.zip {
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"conversions_blob.go",
|
||||
"conversions_block.go",
|
||||
"conversions_block_execution.go",
|
||||
"conversions_gloas.go",
|
||||
"conversions_lightclient.go",
|
||||
"conversions_state.go",
|
||||
"endpoints_beacon.go",
|
||||
@@ -57,10 +58,12 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
],
|
||||
|
||||
@@ -577,3 +577,17 @@ func (s *SignedBeaconBlockGloas) MessageRawJson() ([]byte, error) {
|
||||
func (s *SignedBeaconBlockGloas) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type ExecutionPayloadEnvelope struct {
|
||||
Payload *ExecutionPayloadDeneb `json:"payload"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
BeaconBlockRoot string `json:"beacon_block_root"`
|
||||
Slot string `json:"slot"`
|
||||
StateRoot string `json:"state_root"`
|
||||
}
|
||||
|
||||
type SignedExecutionPayloadEnvelope struct {
|
||||
Message *ExecutionPayloadEnvelope `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
@@ -3275,3 +3275,26 @@ func (d *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, err
|
||||
BlobDataAvailable: d.BlobDataAvailable,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadEnvelopeFromConsensus converts a proto envelope to the API struct.
|
||||
func SignedExecutionPayloadEnvelopeFromConsensus(e *eth.SignedExecutionPayloadEnvelope) (*SignedExecutionPayloadEnvelope, error) {
|
||||
payload, err := ExecutionPayloadDenebFromConsensus(e.Message.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var requests *ExecutionRequests
|
||||
if e.Message.ExecutionRequests != nil {
|
||||
requests = ExecutionRequestsFromConsensus(e.Message.ExecutionRequests)
|
||||
}
|
||||
return &SignedExecutionPayloadEnvelope{
|
||||
Message: &ExecutionPayloadEnvelope{
|
||||
Payload: payload,
|
||||
ExecutionRequests: requests,
|
||||
BuilderIndex: fmt.Sprintf("%d", e.Message.BuilderIndex),
|
||||
BeaconBlockRoot: hexutil.Encode(e.Message.BeaconBlockRoot),
|
||||
Slot: fmt.Sprintf("%d", e.Message.Slot),
|
||||
StateRoot: hexutil.Encode(e.Message.StateRoot),
|
||||
},
|
||||
Signature: hexutil.Encode(e.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
89
api/server/structs/conversions_gloas.go
Normal file
89
api/server/structs/conversions_gloas.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func ROExecutionPayloadBidFromConsensus(b interfaces.ROExecutionPayloadBid) *ExecutionPayloadBid {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pbh := b.ParentBlockHash()
|
||||
pbr := b.ParentBlockRoot()
|
||||
bh := b.BlockHash()
|
||||
pr := b.PrevRandao()
|
||||
fr := b.FeeRecipient()
|
||||
commitments := b.BlobKzgCommitments()
|
||||
blobKzgCommitments := make([]string, 0, len(commitments))
|
||||
for _, commitment := range commitments {
|
||||
blobKzgCommitments = append(blobKzgCommitments, hexutil.Encode(commitment))
|
||||
}
|
||||
return &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(pbh[:]),
|
||||
ParentBlockRoot: hexutil.Encode(pbr[:]),
|
||||
BlockHash: hexutil.Encode(bh[:]),
|
||||
PrevRandao: hexutil.Encode(pr[:]),
|
||||
FeeRecipient: hexutil.Encode(fr[:]),
|
||||
GasLimit: fmt.Sprintf("%d", b.GasLimit()),
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex()),
|
||||
Slot: fmt.Sprintf("%d", b.Slot()),
|
||||
Value: fmt.Sprintf("%d", b.Value()),
|
||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment()),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
}
|
||||
}
|
||||
|
||||
func BuildersFromConsensus(builders []*ethpb.Builder) []*Builder {
|
||||
newBuilders := make([]*Builder, len(builders))
|
||||
for i, b := range builders {
|
||||
newBuilders[i] = BuilderFromConsensus(b)
|
||||
}
|
||||
return newBuilders
|
||||
}
|
||||
|
||||
func BuilderFromConsensus(b *ethpb.Builder) *Builder {
|
||||
return &Builder{
|
||||
Pubkey: hexutil.Encode(b.Pubkey),
|
||||
Version: hexutil.Encode(b.Version),
|
||||
ExecutionAddress: hexutil.Encode(b.ExecutionAddress),
|
||||
Balance: fmt.Sprintf("%d", b.Balance),
|
||||
DepositEpoch: fmt.Sprintf("%d", b.DepositEpoch),
|
||||
WithdrawableEpoch: fmt.Sprintf("%d", b.WithdrawableEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderPendingPaymentsFromConsensus(payments []*ethpb.BuilderPendingPayment) []*BuilderPendingPayment {
|
||||
newPayments := make([]*BuilderPendingPayment, len(payments))
|
||||
for i, p := range payments {
|
||||
newPayments[i] = BuilderPendingPaymentFromConsensus(p)
|
||||
}
|
||||
return newPayments
|
||||
}
|
||||
|
||||
func BuilderPendingPaymentFromConsensus(p *ethpb.BuilderPendingPayment) *BuilderPendingPayment {
|
||||
return &BuilderPendingPayment{
|
||||
Weight: fmt.Sprintf("%d", p.Weight),
|
||||
Withdrawal: BuilderPendingWithdrawalFromConsensus(p.Withdrawal),
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderPendingWithdrawalsFromConsensus(withdrawals []*ethpb.BuilderPendingWithdrawal) []*BuilderPendingWithdrawal {
|
||||
newWithdrawals := make([]*BuilderPendingWithdrawal, len(withdrawals))
|
||||
for i, w := range withdrawals {
|
||||
newWithdrawals[i] = BuilderPendingWithdrawalFromConsensus(w)
|
||||
}
|
||||
return newWithdrawals
|
||||
}
|
||||
|
||||
func BuilderPendingWithdrawalFromConsensus(w *ethpb.BuilderPendingWithdrawal) *BuilderPendingWithdrawal {
|
||||
return &BuilderPendingWithdrawal{
|
||||
FeeRecipient: hexutil.Encode(w.FeeRecipient),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
BuilderIndex: fmt.Sprintf("%d", w.BuilderIndex),
|
||||
}
|
||||
}
|
||||
@@ -972,3 +972,223 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
ProposerLookahead: lookahead,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Gloas
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateGloasFromConsensus(st beaconState.BeaconState) (*BeaconStateGloas, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
for i, r := range srcBr {
|
||||
br[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcSr := st.StateRoots()
|
||||
sr := make([]string, len(srcSr))
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcVotes := st.Eth1DataVotes()
|
||||
votes := make([]*Eth1Data, len(srcVotes))
|
||||
for i, e := range srcVotes {
|
||||
votes[i] = Eth1DataFromConsensus(e)
|
||||
}
|
||||
srcVals := st.Validators()
|
||||
vals := make([]*Validator, len(srcVals))
|
||||
for i, v := range srcVals {
|
||||
vals[i] = ValidatorFromConsensus(v)
|
||||
}
|
||||
srcBals := st.Balances()
|
||||
bals := make([]string, len(srcBals))
|
||||
for i, b := range srcBals {
|
||||
bals[i] = fmt.Sprintf("%d", b)
|
||||
}
|
||||
srcRm := st.RandaoMixes()
|
||||
rm := make([]string, len(srcRm))
|
||||
for i, m := range srcRm {
|
||||
rm[i] = hexutil.Encode(m)
|
||||
}
|
||||
srcSlashings := st.Slashings()
|
||||
slashings := make([]string, len(srcSlashings))
|
||||
for i, s := range srcSlashings {
|
||||
slashings[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
srcPrevPart, err := st.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevPart := make([]string, len(srcPrevPart))
|
||||
for i, p := range srcPrevPart {
|
||||
prevPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcCurrPart, err := st.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currPart := make([]string, len(srcCurrPart))
|
||||
for i, p := range srcCurrPart {
|
||||
currPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcIs, err := st.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is := make([]string, len(srcIs))
|
||||
for i, s := range srcIs {
|
||||
is[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
currSc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSc, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHs, err := st.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hs := make([]*HistoricalSummary, len(srcHs))
|
||||
for i, s := range srcHs {
|
||||
hs[i] = HistoricalSummaryFromConsensus(s)
|
||||
}
|
||||
nwi, err := st.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwvi, err := st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drsi, err := st.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbtc, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ebtc, err := st.ExitBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eee, err := st.EarliestExitEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cbtc, err := st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ece, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbd, err := st.PendingDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ppw, err := st.PendingPartialWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcLookahead, err := st.ProposerLookahead()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookahead := make([]string, len(srcLookahead))
|
||||
for i, v := range srcLookahead {
|
||||
lookahead[i] = fmt.Sprintf("%d", uint64(v))
|
||||
}
|
||||
// Gloas-specific fields
|
||||
lepb, err := st.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builders, err := st.Builders()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwbi, err := st.NextWithdrawalBuilderIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epa, err := st.ExecutionPayloadAvailabilityVector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bpp, err := st.BuilderPendingPayments()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bpw, err := st.BuilderPendingWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lbh, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pew, err := st.PayloadExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BeaconStateGloas{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: hr,
|
||||
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
|
||||
Eth1DataVotes: votes,
|
||||
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
RandaoMixes: rm,
|
||||
Slashings: slashings,
|
||||
PreviousEpochParticipation: prevPart,
|
||||
CurrentEpochParticipation: currPart,
|
||||
JustificationBits: hexutil.Encode(st.JustificationBits()),
|
||||
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
|
||||
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
|
||||
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
|
||||
InactivityScores: is,
|
||||
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
|
||||
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
|
||||
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
|
||||
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
|
||||
HistoricalSummaries: hs,
|
||||
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
|
||||
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
|
||||
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
|
||||
EarliestExitEpoch: fmt.Sprintf("%d", eee),
|
||||
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
|
||||
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
ProposerLookahead: lookahead,
|
||||
LatestExecutionPayloadBid: ROExecutionPayloadBidFromConsensus(lepb),
|
||||
Builders: BuildersFromConsensus(builders),
|
||||
NextWithdrawalBuilderIndex: fmt.Sprintf("%d", nwbi),
|
||||
ExecutionPayloadAvailability: hexutil.Encode(epa),
|
||||
BuilderPendingPayments: BuilderPendingPaymentsFromConsensus(bpp),
|
||||
BuilderPendingWithdrawals: BuilderPendingWithdrawalsFromConsensus(bpw),
|
||||
LatestBlockHash: hexutil.Encode(lbh[:]),
|
||||
PayloadExpectedWithdrawals: WithdrawalsFromConsensus(pew),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
@@ -355,3 +359,214 @@ func TestIndexedAttestation_ToConsensus(t *testing.T) {
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestROExecutionPayloadBidFromConsensus(t *testing.T) {
|
||||
t.Run("empty blobkzg commitments", func(t *testing.T) {
|
||||
bid := ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
|
||||
GasLimit: 100,
|
||||
BuilderIndex: 7,
|
||||
Slot: 9,
|
||||
Value: 11,
|
||||
ExecutionPayment: 22,
|
||||
BlobKzgCommitments: [][]byte{},
|
||||
}
|
||||
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
|
||||
require.NoError(t, err)
|
||||
|
||||
got := ROExecutionPayloadBidFromConsensus(roBid)
|
||||
want := &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(bid.BlockHash),
|
||||
PrevRandao: hexutil.Encode(bid.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
|
||||
GasLimit: "100",
|
||||
BuilderIndex: "7",
|
||||
Slot: "9",
|
||||
Value: "11",
|
||||
ExecutionPayment: "22",
|
||||
BlobKzgCommitments: []string{},
|
||||
}
|
||||
assert.DeepEqual(t, want, got)
|
||||
})
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
bid := ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
|
||||
GasLimit: 100,
|
||||
BuilderIndex: 7,
|
||||
Slot: 9,
|
||||
Value: 11,
|
||||
ExecutionPayment: 22,
|
||||
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x06}, 48)},
|
||||
}
|
||||
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
|
||||
require.NoError(t, err)
|
||||
|
||||
var bkcs []string
|
||||
for _, commitment := range roBid.BlobKzgCommitments() {
|
||||
bkcs = append(bkcs, hexutil.Encode(commitment))
|
||||
}
|
||||
|
||||
got := ROExecutionPayloadBidFromConsensus(roBid)
|
||||
want := &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(bid.BlockHash),
|
||||
PrevRandao: hexutil.Encode(bid.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
|
||||
GasLimit: "100",
|
||||
BuilderIndex: "7",
|
||||
Slot: "9",
|
||||
Value: "11",
|
||||
ExecutionPayment: "22",
|
||||
BlobKzgCommitments: bkcs,
|
||||
}
|
||||
assert.DeepEqual(t, want, got)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderConversionsFromConsensus(t *testing.T) {
|
||||
builder := ð.Builder{
|
||||
Pubkey: bytes.Repeat([]byte{0xAA}, 48),
|
||||
Version: bytes.Repeat([]byte{0x01}, 4),
|
||||
ExecutionAddress: bytes.Repeat([]byte{0xBB}, 20),
|
||||
Balance: 42,
|
||||
DepositEpoch: 3,
|
||||
WithdrawableEpoch: 4,
|
||||
}
|
||||
wantBuilder := &Builder{
|
||||
Pubkey: hexutil.Encode(builder.Pubkey),
|
||||
Version: hexutil.Encode(builder.Version),
|
||||
ExecutionAddress: hexutil.Encode(builder.ExecutionAddress),
|
||||
Balance: "42",
|
||||
DepositEpoch: "3",
|
||||
WithdrawableEpoch: "4",
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, wantBuilder, BuilderFromConsensus(builder))
|
||||
assert.DeepEqual(t, []*Builder{wantBuilder}, BuildersFromConsensus([]*eth.Builder{builder}))
|
||||
}
|
||||
|
||||
func TestBuilderPendingPaymentConversionsFromConsensus(t *testing.T) {
|
||||
withdrawal := ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x10}, 20),
|
||||
Amount: 15,
|
||||
BuilderIndex: 2,
|
||||
}
|
||||
payment := ð.BuilderPendingPayment{
|
||||
Weight: 5,
|
||||
Withdrawal: withdrawal,
|
||||
}
|
||||
wantWithdrawal := &BuilderPendingWithdrawal{
|
||||
FeeRecipient: hexutil.Encode(withdrawal.FeeRecipient),
|
||||
Amount: "15",
|
||||
BuilderIndex: "2",
|
||||
}
|
||||
wantPayment := &BuilderPendingPayment{
|
||||
Weight: "5",
|
||||
Withdrawal: wantWithdrawal,
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, wantPayment, BuilderPendingPaymentFromConsensus(payment))
|
||||
assert.DeepEqual(t, []*BuilderPendingPayment{wantPayment}, BuilderPendingPaymentsFromConsensus([]*eth.BuilderPendingPayment{payment}))
|
||||
assert.DeepEqual(t, wantWithdrawal, BuilderPendingWithdrawalFromConsensus(withdrawal))
|
||||
assert.DeepEqual(t, []*BuilderPendingWithdrawal{wantWithdrawal}, BuilderPendingWithdrawalsFromConsensus([]*eth.BuilderPendingWithdrawal{withdrawal}))
|
||||
}
|
||||
|
||||
func TestBeaconStateGloasFromConsensus(t *testing.T) {
|
||||
st, err := util.NewBeaconStateGloas(func(state *eth.BeaconStateGloas) error {
|
||||
state.GenesisTime = 123
|
||||
state.GenesisValidatorsRoot = bytes.Repeat([]byte{0x10}, 32)
|
||||
state.Slot = 5
|
||||
state.ProposerLookahead = []uint64{1, 2}
|
||||
state.LatestExecutionPayloadBid = ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x12}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x13}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x14}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x15}, 20),
|
||||
GasLimit: 64,
|
||||
BuilderIndex: 3,
|
||||
Slot: 5,
|
||||
Value: 99,
|
||||
ExecutionPayment: 7,
|
||||
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x16}, 48)},
|
||||
}
|
||||
state.Builders = []*eth.Builder{
|
||||
{
|
||||
Pubkey: bytes.Repeat([]byte{0x20}, 48),
|
||||
Version: bytes.Repeat([]byte{0x21}, 4),
|
||||
ExecutionAddress: bytes.Repeat([]byte{0x22}, 20),
|
||||
Balance: 88,
|
||||
DepositEpoch: 1,
|
||||
WithdrawableEpoch: 2,
|
||||
},
|
||||
}
|
||||
state.NextWithdrawalBuilderIndex = 9
|
||||
state.ExecutionPayloadAvailability = []byte{0x01, 0x02}
|
||||
state.BuilderPendingPayments = []*eth.BuilderPendingPayment{
|
||||
{
|
||||
Weight: 3,
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x23}, 20),
|
||||
Amount: 4,
|
||||
BuilderIndex: 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
state.BuilderPendingWithdrawals = []*eth.BuilderPendingWithdrawal{
|
||||
{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x24}, 20),
|
||||
Amount: 6,
|
||||
BuilderIndex: 7,
|
||||
},
|
||||
}
|
||||
state.LatestBlockHash = bytes.Repeat([]byte{0x25}, 32)
|
||||
state.PayloadExpectedWithdrawals = []*enginev1.Withdrawal{
|
||||
{Index: 1, ValidatorIndex: 2, Address: bytes.Repeat([]byte{0x26}, 20), Amount: 10},
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := BeaconStateGloasFromConsensus(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "123", got.GenesisTime)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x10}, 32)), got.GenesisValidatorsRoot)
|
||||
require.Equal(t, "5", got.Slot)
|
||||
require.DeepEqual(t, []string{"1", "2"}, got.ProposerLookahead)
|
||||
require.Equal(t, "9", got.NextWithdrawalBuilderIndex)
|
||||
require.Equal(t, hexutil.Encode([]byte{0x01, 0x02}), got.ExecutionPayloadAvailability)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x25}, 32)), got.LatestBlockHash)
|
||||
|
||||
require.NotNil(t, got.LatestExecutionPayloadBid)
|
||||
require.Equal(t, "64", got.LatestExecutionPayloadBid.GasLimit)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x11}, 32)), got.LatestExecutionPayloadBid.ParentBlockHash)
|
||||
|
||||
require.NotNil(t, got.Builders)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x20}, 48)), got.Builders[0].Pubkey)
|
||||
require.Equal(t, "88", got.Builders[0].Balance)
|
||||
|
||||
require.Equal(t, "3", got.BuilderPendingPayments[0].Weight)
|
||||
require.Equal(t, "4", got.BuilderPendingPayments[0].Withdrawal.Amount)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x23}, 20)), got.BuilderPendingPayments[0].Withdrawal.FeeRecipient)
|
||||
|
||||
require.Equal(t, "6", got.BuilderPendingWithdrawals[0].Amount)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x24}, 20)), got.BuilderPendingWithdrawals[0].FeeRecipient)
|
||||
|
||||
require.Equal(t, "1", got.PayloadExpectedWithdrawals[0].WithdrawalIndex)
|
||||
require.Equal(t, "2", got.PayloadExpectedWithdrawals[0].ValidatorIndex)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x26}, 20)), got.PayloadExpectedWithdrawals[0].ExecutionAddress)
|
||||
require.Equal(t, "10", got.PayloadExpectedWithdrawals[0].Amount)
|
||||
}
|
||||
|
||||
@@ -285,6 +285,13 @@ type GetBlobsResponse struct {
|
||||
Data []string `json:"data"` //blobs
|
||||
}
|
||||
|
||||
type GetExecutionPayloadEnvelopeResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data *SignedExecutionPayloadEnvelope `json:"data"`
|
||||
}
|
||||
|
||||
type SSZQueryRequest struct {
|
||||
Query string `json:"query"`
|
||||
IncludeProof bool `json:"include_proof,omitempty"`
|
||||
|
||||
@@ -112,3 +112,8 @@ type LightClientOptimisticUpdateEvent struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientOptimisticUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type PayloadEvent struct {
|
||||
Slot string `json:"slot"`
|
||||
BlockRoot string `json:"block_root"`
|
||||
}
|
||||
|
||||
@@ -262,3 +262,23 @@ type PendingConsolidation struct {
|
||||
SourceIndex string `json:"source_index"`
|
||||
TargetIndex string `json:"target_index"`
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
Version string `json:"version"`
|
||||
ExecutionAddress string `json:"execution_address"`
|
||||
Balance string `json:"balance"`
|
||||
DepositEpoch string `json:"deposit_epoch"`
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
type BuilderPendingPayment struct {
|
||||
Weight string `json:"weight"`
|
||||
Withdrawal *BuilderPendingWithdrawal `json:"withdrawal"`
|
||||
}
|
||||
|
||||
type BuilderPendingWithdrawal struct {
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
Amount string `json:"amount"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
}
|
||||
|
||||
@@ -221,3 +221,51 @@ type BeaconStateFulu struct {
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
}
|
||||
|
||||
type BeaconStateGloas struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *Fork `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots"`
|
||||
StateRoots []string `json:"state_roots"`
|
||||
HistoricalRoots []string `json:"historical_roots"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*Validator `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []string `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
LatestExecutionPayloadBid *ExecutionPayloadBid `json:"latest_execution_payload_bid"`
|
||||
Builders []*Builder `json:"builders"`
|
||||
NextWithdrawalBuilderIndex string `json:"next_withdrawal_builder_index"`
|
||||
ExecutionPayloadAvailability string `json:"execution_payload_availability"`
|
||||
BuilderPendingPayments []*BuilderPendingPayment `json:"builder_pending_payments"`
|
||||
BuilderPendingWithdrawals []*BuilderPendingWithdrawal `json:"builder_pending_withdrawals"`
|
||||
LatestBlockHash string `json:"latest_block_hash"`
|
||||
PayloadExpectedWithdrawals []*Withdrawal `json:"payload_expected_withdrawals"`
|
||||
}
|
||||
|
||||
@@ -136,6 +136,7 @@ go_test(
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"receive_payload_attestation_message_test.go",
|
||||
"service_norace_test.go",
|
||||
"service_test.go",
|
||||
"setup_forkchoice_test.go",
|
||||
@@ -153,6 +154,7 @@ go_test(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -114,6 +115,7 @@ type FinalizationFetcher interface {
|
||||
FinalizedBlockHash() [32]byte
|
||||
InForkchoice([32]byte) bool
|
||||
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
|
||||
ParentPayloadReady(interfaces.ReadOnlyBeaconBlock) bool
|
||||
}
|
||||
|
||||
// OptimisticModeFetcher retrieves information about optimistic status of the node.
|
||||
@@ -403,6 +405,32 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// ParentPayloadReady returns true if the block's parent payload is available
|
||||
// in forkchoice. For pre-Gloas blocks or blocks building on empty, this always
|
||||
// returns true. For blocks building on full, it checks that the full node
|
||||
// exists.
|
||||
func (s *Service) ParentPayloadReady(blk interfaces.ReadOnlyBeaconBlock) bool {
|
||||
if blk.Version() < version.Gloas {
|
||||
return true
|
||||
}
|
||||
parentRoot := blk.ParentRoot()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
blockHash, err := s.cfg.ForkChoiceStore.BlockHash(parentRoot)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
bid, err := blk.Body().SignedExecutionPayloadBid()
|
||||
if err != nil || bid == nil || bid.Message == nil {
|
||||
return false
|
||||
}
|
||||
parentBlockHash := [32]byte(bid.Message.ParentBlockHash)
|
||||
if parentBlockHash != blockHash {
|
||||
return true // builds on empty, no full node needed
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.HasFullNode(parentRoot)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
|
||||
@@ -136,6 +136,13 @@ func (s *Service) hashForGenesisBlock(ctx context.Context, root [32]byte) ([]byt
|
||||
return bytesutil.SafeCopyBytes(header.BlockHash()), nil
|
||||
}
|
||||
|
||||
// CanonicalNodeAtSlot wraps the corresponding method in forkchoice
|
||||
func (s *Service) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.CanonicalNodeAtSlot(slot)
|
||||
}
|
||||
|
||||
// DependentRoot wraps the corresponding method in forkchoice
|
||||
func (s *Service) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
|
||||
@@ -620,6 +620,121 @@ func TestService_IsFinalized(t *testing.T) {
|
||||
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
|
||||
}
|
||||
|
||||
func TestParentPayloadReady(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 0
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
fcs := tr.fcs
|
||||
|
||||
parentRoot := [32]byte{1}
|
||||
parentBlockHash := [32]byte{10}
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
|
||||
// Insert parent node into forkchoice.
|
||||
st, parentROBlock, err := prepareGloasForkchoiceState(ctx, 1, parentRoot, zeroHash, parentBlockHash, zeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, parentROBlock))
|
||||
|
||||
t.Run("pre-Gloas always true", func(t *testing.T) {
|
||||
blk := util.HydrateSignedBeaconBlockDeneb(ðpb.SignedBeaconBlockDeneb{
|
||||
Block: ðpb.BeaconBlockDeneb{ParentRoot: parentRoot[:]},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("parent not in forkchoice", func(t *testing.T) {
|
||||
unknownParent := [32]byte{99}
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: unknownParent[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("builds on empty", func(t *testing.T) {
|
||||
differentHash := [32]byte{99}
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: differentHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("builds on full without payload", func(t *testing.T) {
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("builds on full with payload", func(t *testing.T) {
|
||||
pe, err := blocks.WrappedROExecutionPayloadEnvelope(ðpb.ExecutionPayloadEnvelope{
|
||||
BeaconBlockRoot: parentRoot[:],
|
||||
Payload: &enginev1.ExecutionPayloadDeneb{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertPayload(pe))
|
||||
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_hashForGenesisRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -16,6 +18,14 @@ func (s *Service) getLookupParentRoot(b consensus_blocks.ROBlock) ([32]byte, err
|
||||
if b.Version() < version.Gloas {
|
||||
return parentRoot, nil
|
||||
}
|
||||
parentSlot, err := s.cfg.ForkChoiceStore.Slot(parentRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "failed to get slot for parent root")
|
||||
}
|
||||
|
||||
if slots.ToEpoch(parentSlot) < params.BeaconConfig().GloasForkEpoch {
|
||||
return parentRoot, nil
|
||||
}
|
||||
blockHash, err := s.cfg.ForkChoiceStore.BlockHash(parentRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "failed to get block hash for parent root")
|
||||
|
||||
@@ -3,12 +3,14 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
|
||||
mockExecution "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
@@ -19,6 +21,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func prepareGloasForkchoiceState(
|
||||
@@ -462,6 +466,12 @@ func TestGetLookupParentRoot_PreGloas(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_GloasBuildsOnEmpty(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 0
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, req := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -503,6 +513,12 @@ func TestGetLookupParentRoot_GloasBuildsOnEmpty(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_GloasBuildsOnFull(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 0
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, req := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -542,3 +558,91 @@ func TestGetLookupParentRoot_GloasBuildsOnFull(t *testing.T) {
|
||||
// parentBlockHash == parentNodeBlockHash, so it builds on full => returns parentBlockHash
|
||||
require.Equal(t, parentNodeBlockHash, got)
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_GloasParentPreForkEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 2
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, req := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
parentRoot := [32]byte{1}
|
||||
parentNodeBlockHash := [32]byte{10}
|
||||
parentSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
|
||||
require.NoError(t, err)
|
||||
parentSlot = parentSlot - 1
|
||||
|
||||
st, parentROBlock, err := prepareGloasForkchoiceState(
|
||||
ctx,
|
||||
parentSlot,
|
||||
parentRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
parentNodeBlockHash,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
0,
|
||||
0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, req.fcs.InsertNode(ctx, st, parentROBlock))
|
||||
|
||||
blockHash := [32]byte{20}
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: blockHash[:],
|
||||
ParentBlockHash: parentNodeBlockHash[:],
|
||||
},
|
||||
})
|
||||
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: parentSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
SignedExecutionPayloadBid: bid,
|
||||
},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := service.getLookupParentRoot(roblock)
|
||||
require.NoError(t, err)
|
||||
// Parent slot is pre-fork, so always return parentRoot.
|
||||
require.Equal(t, parentRoot, got)
|
||||
}
|
||||
|
||||
func TestLateBlockTasks_GloasFCU(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PrepareAllPayloads: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
|
||||
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
headRoot := bytesutil.ToBytes32([]byte("headroot"))
|
||||
service.head = &head{
|
||||
root: headRoot,
|
||||
state: st,
|
||||
slot: 1,
|
||||
}
|
||||
|
||||
// Set genesis time so CurrentSlot > HeadSlot, triggering late block logic.
|
||||
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
service.SetForkChoiceGenesisTime(service.genesisTime)
|
||||
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
|
||||
@@ -132,6 +132,15 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
}
|
||||
if block.Version() < version.Gloas {
|
||||
moreFields["dataAvailabilityWaitedTime"] = daWaitedTime
|
||||
} else {
|
||||
signedBid, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get signed execution payload bid for logging")
|
||||
} else {
|
||||
moreFields["blockHash"] = fmt.Sprintf("%#x", bytesutil.Trunc(signedBid.Message.BlockHash))
|
||||
moreFields["parentHash"] = fmt.Sprintf("%#x", bytesutil.Trunc(signedBid.Message.ParentBlockHash))
|
||||
moreFields["builderIndex"] = signedBid.Message.BuilderIndex
|
||||
}
|
||||
}
|
||||
|
||||
level := logs.PackageVerbosity("beacon-chain/blockchain")
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -82,6 +84,9 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
if err := s.handleBlockAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
if err := s.handleBlockPayloadAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's payload attestations")
|
||||
}
|
||||
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.roblock.Block().Body().AttesterSlashings())
|
||||
if cfg.isValidPayload {
|
||||
@@ -103,6 +108,11 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
}
|
||||
if cfg.roblock.Version() < version.Gloas {
|
||||
s.sendFCU(cfg)
|
||||
} else if s.isNewHead(cfg.headRoot) {
|
||||
if err := s.saveHead(ctx, cfg.headRoot, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(ctx, cfg.postState, cfg.roblock)
|
||||
}
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
@@ -306,7 +316,18 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
|
||||
slot := block.Slot()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), slot); err != nil {
|
||||
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
|
||||
}
|
||||
|
||||
@@ -368,6 +389,46 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
return nil
|
||||
}
|
||||
|
||||
// refreshCaches updates the next slot state cache and epoch boundary caches.
|
||||
// Before Fulu this is done synchronously, after Fulu it is deferred to a goroutine.
|
||||
func (s *Service) refreshCaches(ctx context.Context, currentSlot primitives.Slot, headRoot [32]byte, headState state.BeaconState, accessRoot [32]byte) {
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
if lastState.Version() < version.Fulu {
|
||||
s.updateCachesAndEpochBoundary(ctx, currentSlot, headState, accessRoot, lastRoot, lastState)
|
||||
} else {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
s.updateCachesAndEpochBoundary(ctx, currentSlot, headState, accessRoot, lastRoot, lastState)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// updateCachesAndEpochBoundary updates the next slot state cache and handles
|
||||
// epoch boundary processing. If the lastRoot matches accessRoot, the cached
|
||||
// last state is reused; otherwise, the head state is advanced instead.
|
||||
func (s *Service) updateCachesAndEpochBoundary(ctx context.Context, currentSlot primitives.Slot, headState state.BeaconState, accessRoot [32]byte, lastRoot []byte, lastState state.BeaconState) {
|
||||
if bytes.Equal(lastRoot, accessRoot[:]) {
|
||||
// Happy case, the last advanced state is head, we thus keep it
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
} else {
|
||||
// Last advanced state was not head, we do not advance this but rather use headstate
|
||||
headState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, accessRoot[:], headState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, accessRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
@@ -419,6 +480,36 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleBlockPayloadAttestations feeds payload attestations included in a Gloas block into forkchoice.
|
||||
func (s *Service) handleBlockPayloadAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
|
||||
if blk.Version() < version.Gloas {
|
||||
return nil
|
||||
}
|
||||
atts, err := blk.Body().PayloadAttestations()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(atts) == 0 {
|
||||
return nil
|
||||
}
|
||||
committee, err := gloas.PayloadCommittee(ctx, st, blk.Slot()-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, att := range atts {
|
||||
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
if !s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
continue
|
||||
}
|
||||
for i := range committee {
|
||||
if att.AggregationBits.BitAt(uint64(i)) {
|
||||
s.cfg.ForkChoiceStore.SetPTCVote(root, uint64(i), att.Data.PayloadPresent, att.Data.BlobDataAvailable)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
// This function requires a write lock on forkchoice.
|
||||
@@ -592,11 +683,22 @@ func (s *Service) runLateBlockTasks() {
|
||||
return
|
||||
}
|
||||
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
cfg := params.BeaconConfig()
|
||||
attDueBPS := cfg.AttestationDueBPS
|
||||
if slots.ToEpoch(s.CurrentSlot()) >= cfg.GloasForkEpoch {
|
||||
attDueBPS = cfg.AttestationDueBPSGloas
|
||||
}
|
||||
attThreshold := cfg.SlotComponentDuration(attDueBPS)
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, attThreshold, cfg.SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C():
|
||||
case slot := <-ticker.C():
|
||||
if attDueBPS != cfg.AttestationDueBPSGloas && slots.ToEpoch(slot) >= cfg.GloasForkEpoch {
|
||||
ticker.Done()
|
||||
attDueBPS = cfg.AttestationDueBPSGloas
|
||||
attThreshold = cfg.SlotComponentDuration(attDueBPS)
|
||||
ticker = slots.NewSlotTickerWithOffset(s.genesisTime, attThreshold, cfg.SecondsPerSlot)
|
||||
}
|
||||
s.lateBlockTasks(s.ctx)
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
@@ -673,7 +775,18 @@ func (s *Service) isDataAvailable(
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.areDataColumnsAvailable(ctx, root, block.Slot())
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
@@ -688,30 +801,15 @@ func (s *Service) isDataAvailable(
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
currentSlot := s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(slot), slots.ToEpoch(currentSlot)
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
@@ -765,7 +863,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, slot+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine slot start time: %w", err)
|
||||
}
|
||||
@@ -780,7 +878,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": helpers.SortedPrettySliceFromMap(peerInfo.CustodyColumns),
|
||||
"columnsWaiting": helpers.SortedPrettySliceFromMap(missing),
|
||||
@@ -826,7 +924,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", slot, root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -924,37 +1022,18 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
s.headLock.RUnlock()
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
var accessRoot [32]byte
|
||||
isFull, err := headState.IsParentBlockFull()
|
||||
if err != nil || !isFull {
|
||||
accessRoot = headRoot
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
accessRoot, err = headState.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash, using head root as access root")
|
||||
accessRoot = headRoot
|
||||
}
|
||||
}
|
||||
s.refreshCaches(ctx, currentSlot, headRoot, headState, accessRoot)
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
_, has := s.cfg.PayloadIDCache.PayloadID(s.CurrentSlot()+1, headRoot)
|
||||
@@ -968,26 +1047,38 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
if headState.Version() >= version.Gloas {
|
||||
bh, err := headState.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash")
|
||||
return
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdateGloas(ctx, bh, attribute)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
} else {
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
s.headLock.RUnlock()
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -64,20 +64,26 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig) (*fcuConfig,
|
||||
// block is not the head of the chain. It requires the caller holds a lock on
|
||||
// Forkchoice.
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.ConsensusNodeWeight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
headWeight, err := s.cfg.ForkChoiceStore.ConsensusNodeWeight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
fields := logrus.Fields{
|
||||
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"receivedWeight": receivedWeight,
|
||||
"headRoot": fmt.Sprintf("%#x", headRoot),
|
||||
"headWeight": headWeight,
|
||||
}).Debug("Head block is not the received block")
|
||||
}
|
||||
headEmpty, headFull, err := s.cfg.ForkChoiceStore.PayloadWeights(headRoot)
|
||||
if err == nil {
|
||||
fields["headEmptyWeight"] = headEmpty
|
||||
fields["headFullWeight"] = headFull
|
||||
}
|
||||
log.WithFields(fields).Debug("Head block is not the received block")
|
||||
}
|
||||
|
||||
// fcuArgsNonCanonicalBlock returns the arguments to the FCU call when the
|
||||
@@ -193,10 +199,32 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// GetPrestateToPropose returns the pre-state for a proposer to base its block on.
|
||||
// It is similar to GetBlockPreState but it lacks unnecessary verifications.
|
||||
func (s *Service) GetPrestateToPropose(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.GetPreStateToPropose")
|
||||
defer span.End()
|
||||
|
||||
accessRoot, err := s.getLookupParentRoot(b)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get lookup parent root")
|
||||
}
|
||||
|
||||
bl := b.Block()
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, accessRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", bl.Slot())
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, errors.Wrapf(err, "nil pre state for slot %d", bl.Slot())
|
||||
}
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// GetBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Service) getBlockPreState(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
|
||||
func (s *Service) GetBlockPreState(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
@@ -733,7 +734,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -785,7 +786,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -849,7 +850,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1324,7 +1325,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
@@ -1338,7 +1339,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
@@ -1352,7 +1353,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
@@ -1366,7 +1367,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
@@ -1444,7 +1445,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1466,7 +1467,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1489,7 +1490,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1514,7 +1515,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1551,7 +1552,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, rowsb)
|
||||
preState, err = service.GetBlockPreState(ctx, rowsb)
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
@@ -1580,7 +1581,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1649,7 +1650,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1672,7 +1673,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1694,7 +1695,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1725,7 +1726,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1755,7 +1756,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, rowsb)
|
||||
preState, err = service.GetBlockPreState(ctx, rowsb)
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
@@ -1795,7 +1796,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1822,7 +1823,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1852,7 +1853,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1914,7 +1915,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1936,7 +1937,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1958,7 +1959,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1991,7 +1992,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2023,7 +2024,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, rowsb)
|
||||
preState, err = service.GetBlockPreState(ctx, rowsb)
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
@@ -2115,7 +2116,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2183,7 +2184,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2456,7 +2457,7 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2516,7 +2517,7 @@ func TestRollbackBlock_SavePostStateInfo_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2574,7 +2575,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2591,7 +2592,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -3480,3 +3481,219 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleBlockPayloadAttestations(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.GloasForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("pre-Gloas block is no-op", func(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(t.Context(), wsb.Block(), st))
|
||||
})
|
||||
|
||||
t.Run("empty payload attestations", func(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
blk := util.NewBeaconBlockGloas()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(t.Context(), wsb.Block(), st))
|
||||
})
|
||||
|
||||
t.Run("unknown root is skipped", func(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
numVals := 2048
|
||||
headState := gloasStateWithValidators(t, 2, numVals)
|
||||
|
||||
unknownRoot := bytesutil.ToBytes32([]byte("unknown"))
|
||||
bits := bitfield.NewBitvector512()
|
||||
bits.SetBitAt(0, true)
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
PayloadAttestations: []*ethpb.PayloadAttestation{
|
||||
{
|
||||
AggregationBits: bits,
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: unknownRoot[:],
|
||||
Slot: 1,
|
||||
PayloadPresent: true,
|
||||
BlobDataAvailable: true,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
|
||||
})
|
||||
|
||||
t.Run("known root sets PTC votes", func(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
numVals := 2048
|
||||
headState := gloasStateWithValidators(t, 2, numVals)
|
||||
|
||||
base, insertBlk := testGloasState(t, 1, parentRoot, blockHash)
|
||||
insertGloasBlock(t, s, base, insertBlk, blockRoot)
|
||||
|
||||
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(ptc))
|
||||
|
||||
bits := bitfield.NewBitvector512()
|
||||
bits.SetBitAt(0, true)
|
||||
bits.SetBitAt(2, true)
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
PayloadAttestations: []*ethpb.PayloadAttestation{
|
||||
{
|
||||
AggregationBits: bits,
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Slot: 1,
|
||||
PayloadPresent: true,
|
||||
BlobDataAvailable: true,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
|
||||
})
|
||||
|
||||
t.Run("multiple attestations", func(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
numVals := 2048
|
||||
headState := gloasStateWithValidators(t, 2, numVals)
|
||||
|
||||
base, insertBlk := testGloasState(t, 1, parentRoot, blockHash)
|
||||
insertGloasBlock(t, s, base, insertBlk, blockRoot)
|
||||
|
||||
bits1 := bitfield.NewBitvector512()
|
||||
bits1.SetBitAt(0, true)
|
||||
bits2 := bitfield.NewBitvector512()
|
||||
bits2.SetBitAt(1, true)
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
PayloadAttestations: []*ethpb.PayloadAttestation{
|
||||
{
|
||||
AggregationBits: bits1,
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Slot: 1,
|
||||
PayloadPresent: true,
|
||||
BlobDataAvailable: false,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
{
|
||||
AggregationBits: bits2,
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Slot: 1,
|
||||
PayloadPresent: false,
|
||||
BlobDataAvailable: true,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateCachesAndEpochBoundary_MatchingRoots(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
accessRoot := [32]byte{'a'}
|
||||
|
||||
service.updateCachesAndEpochBoundary(t.Context(), 1, st, accessRoot, accessRoot[:], st)
|
||||
|
||||
cached := transition.NextSlotState(accessRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
}
|
||||
|
||||
func TestUpdateCachesAndEpochBoundary_DifferentRoots(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
headState, _ := util.DeterministicGenesisState(t, 1)
|
||||
lastState, _ := util.DeterministicGenesisState(t, 1)
|
||||
accessRoot := [32]byte{'a'}
|
||||
lastRoot := [32]byte{'b'}
|
||||
|
||||
service.updateCachesAndEpochBoundary(t.Context(), 1, headState, accessRoot, lastRoot[:], lastState)
|
||||
|
||||
// Cache should be keyed by accessRoot, not lastRoot.
|
||||
cached := transition.NextSlotState(accessRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
|
||||
cached = transition.NextSlotState(lastRoot[:], 1)
|
||||
require.Equal(t, true, cached == nil)
|
||||
}
|
||||
|
||||
func TestRefreshCaches_NoCachedState(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
headRoot := [32]byte{'h'}
|
||||
|
||||
service.refreshCaches(t.Context(), 1, headRoot, st, headRoot)
|
||||
|
||||
cached := transition.NextSlotState(headRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
}
|
||||
|
||||
func TestRefreshCaches_CachedStateMatchesAccessRoot(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
accessRoot := [32]byte{'a'}
|
||||
headRoot := [32]byte{'h'}
|
||||
|
||||
// Pre-populate the cache with accessRoot.
|
||||
require.NoError(t, transition.UpdateNextSlotCache(t.Context(), accessRoot[:], st))
|
||||
|
||||
service.refreshCaches(t.Context(), 1, headRoot, st, accessRoot)
|
||||
|
||||
cached := transition.NextSlotState(accessRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -174,7 +174,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -45,6 +45,8 @@ type BlockReceiver interface {
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
GetBlockPreState(ctx context.Context, b blocks.ROBlock) (state.BeaconState, error)
|
||||
GetPrestateToPropose(ctx context.Context, b blocks.ROBlock) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
@@ -100,7 +102,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, roblock)
|
||||
preState, err := s.GetBlockPreState(ctx, roblock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
|
||||
@@ -68,6 +70,16 @@ func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed in
|
||||
return err
|
||||
}
|
||||
|
||||
// DA check: verify data columns are available before inserting payload.
|
||||
bid, err := preState.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get latest execution payload bid")
|
||||
}
|
||||
if len(bid.BlobKzgCommitments()) > 0 {
|
||||
if err := s.areDataColumnsAvailable(ctx, root, envelope.Slot()); err != nil {
|
||||
return errors.Wrap(err, "data availability check failed for payload envelope")
|
||||
}
|
||||
}
|
||||
if err := s.savePostPayload(ctx, signed, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -92,9 +104,25 @@ func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed in
|
||||
return err
|
||||
}
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.PayloadProcessed,
|
||||
Data: &statefeed.PayloadProcessedData{
|
||||
Slot: envelope.Slot(),
|
||||
BlockRoot: root,
|
||||
},
|
||||
})
|
||||
|
||||
execution, err := envelope.Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload from envelope for logging")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": envelope.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", root),
|
||||
"slot": envelope.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(execution.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(execution.ParentHash())),
|
||||
}).Info("Processed execution payload envelope")
|
||||
return nil
|
||||
}
|
||||
@@ -113,9 +141,16 @@ func (s *Service) postPayloadHeadUpdate(ctx context.Context, envelope interfaces
|
||||
s.head.state = st
|
||||
s.headLock.Unlock()
|
||||
|
||||
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], st); err != nil {
|
||||
log.WithError(err).Error("Could not update next slot cache")
|
||||
}
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], st); err != nil {
|
||||
log.WithError(err).Error("Could not update next slot cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, envelope.Slot(), st, blockHash[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}()
|
||||
|
||||
attr := s.getPayloadAttribute(ctx, st, envelope.Slot()+1, headRoot)
|
||||
if s.inRegularSync() {
|
||||
|
||||
@@ -2,6 +2,11 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -12,8 +17,28 @@ type PayloadAttestationReceiver interface {
|
||||
ReceivePayloadAttestationMessage(context.Context, *ethpb.PayloadAttestationMessage) error
|
||||
}
|
||||
|
||||
// ReceivePayloadAttestationMessage accepts a payload attestation message.
|
||||
// ReceivePayloadAttestationMessage accepts a payload attestation message and updates the
|
||||
// forkchoice PTC vote bitvectors for the referenced beacon block.
|
||||
func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb.PayloadAttestationMessage) error {
|
||||
// TODO: Handle payload attestation message processing once Gloas is fully wired.
|
||||
if a == nil || a.Data == nil {
|
||||
return errors.New("nil payload attestation message")
|
||||
}
|
||||
root := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ptc, err := gloas.PayloadCommittee(ctx, st, a.Data.Slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx := slices.Index(ptc, a.ValidatorIndex)
|
||||
if idx == -1 {
|
||||
return errors.New("validator not in PTC")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.cfg.ForkChoiceStore.SetPTCVote(root, uint64(idx), a.Data.PayloadPresent, a.Data.BlobDataAvailable)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,143 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
mockExecution "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestReceivePayloadAttestationMessage_NilMessage(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
err := s.ReceivePayloadAttestationMessage(t.Context(), nil)
|
||||
require.ErrorContains(t, "nil payload attestation message", err)
|
||||
}
|
||||
|
||||
func TestReceivePayloadAttestationMessage_NilData(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
msg := ðpb.PayloadAttestationMessage{}
|
||||
err := s.ReceivePayloadAttestationMessage(t.Context(), msg)
|
||||
require.ErrorContains(t, "nil payload attestation message", err)
|
||||
}
|
||||
|
||||
func TestReceivePayloadAttestationMessage_ValidatorNotInPTC(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.GloasForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
numVals := 2048
|
||||
headState := gloasStateWithValidators(t, 1, numVals)
|
||||
|
||||
base, blk := testGloasState(t, 1, parentRoot, blockHash)
|
||||
insertGloasBlock(t, s, base, blk, blockRoot)
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s.head = &head{root: blockRoot, block: wsb, state: headState, slot: 1}
|
||||
|
||||
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Pick a validator index not in the PTC.
|
||||
inPTC := make(map[primitives.ValidatorIndex]bool)
|
||||
for _, idx := range ptc {
|
||||
inPTC[idx] = true
|
||||
}
|
||||
var notInPTC primitives.ValidatorIndex
|
||||
for i := primitives.ValidatorIndex(0); int(i) < numVals; i++ {
|
||||
if !inPTC[i] {
|
||||
notInPTC = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
msg := ðpb.PayloadAttestationMessage{
|
||||
ValidatorIndex: notInPTC,
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Slot: 1,
|
||||
},
|
||||
}
|
||||
err = s.ReceivePayloadAttestationMessage(ctx, msg)
|
||||
require.ErrorContains(t, "validator not in PTC", err)
|
||||
}
|
||||
|
||||
func TestReceivePayloadAttestationMessage_OK(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.GloasForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
headState := gloasStateWithValidators(t, 1, 2048)
|
||||
|
||||
base, blk := testGloasState(t, 1, parentRoot, blockHash)
|
||||
insertGloasBlock(t, s, base, blk, blockRoot)
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s.head = &head{root: blockRoot, block: wsb, state: headState, slot: 1}
|
||||
|
||||
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(ptc))
|
||||
|
||||
msg := ðpb.PayloadAttestationMessage{
|
||||
ValidatorIndex: ptc[0],
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Slot: 1,
|
||||
PayloadPresent: true,
|
||||
BlobDataAvailable: true,
|
||||
},
|
||||
}
|
||||
require.NoError(t, s.ReceivePayloadAttestationMessage(ctx, msg))
|
||||
}
|
||||
|
||||
// gloasStateWithValidators returns a Gloas beacon state with active validators
|
||||
// for PTC committee computation.
|
||||
func gloasStateWithValidators(t *testing.T, slot primitives.Slot, numVals int) state.BeaconState {
|
||||
t.Helper()
|
||||
validators := make([]*ethpb.Validator, numVals)
|
||||
balances := make([]uint64, numVals)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
st, err := util.NewBeaconStateGloas(func(s *ethpb.BeaconStateGloas) error {
|
||||
s.Slot = slot
|
||||
s.Validators = validators
|
||||
s.Balances = balances
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -77,6 +77,8 @@ type ChainService struct {
|
||||
DataColumns []blocks.VerifiedRODataColumn
|
||||
TargetRoot [32]byte
|
||||
MockHeadSlot *primitives.Slot
|
||||
ParentPayloadReadyVal *bool
|
||||
ForkchoiceRoots map[[32]byte]bool
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -334,6 +336,16 @@ func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOn
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockPreState mocks the same method in the chain service.
|
||||
func (s *ChainService) GetBlockPreState(_ context.Context, _ blocks.ROBlock) (state.BeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
// GetPrestateToPropose mocks the same method in the chain service.
|
||||
func (s *ChainService) GetPrestateToPropose(_ context.Context, _ blocks.ROBlock) (state.BeaconState, error) {
|
||||
return s.State.Copy(), nil
|
||||
}
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (s *ChainService) HeadSlot() primitives.Slot {
|
||||
if s.MockHeadSlot != nil {
|
||||
@@ -569,7 +581,10 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
|
||||
}
|
||||
|
||||
// InForkchoice mocks the same method in the chain service
|
||||
func (s *ChainService) InForkchoice(_ [32]byte) bool {
|
||||
func (s *ChainService) InForkchoice(root [32]byte) bool {
|
||||
if s.ForkchoiceRoots != nil {
|
||||
return s.ForkchoiceRoots[root]
|
||||
}
|
||||
return !s.NotFinalized
|
||||
}
|
||||
|
||||
@@ -775,6 +790,14 @@ func (c *ChainService) ReceiveExecutionPayloadEnvelope(_ context.Context, _ inte
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParentPayloadReady mocks the same method in the chain service.
|
||||
func (s *ChainService) ParentPayloadReady(_ interfaces.ReadOnlyBeaconBlock) bool {
|
||||
if s.ParentPayloadReadyVal != nil {
|
||||
return *s.ParentPayloadReadyVal
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DependentRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
2
beacon-chain/cache/BUILD.bazel
vendored
2
beacon-chain/cache/BUILD.bazel
vendored
@@ -18,7 +18,6 @@ go_library(
|
||||
"interfaces.go",
|
||||
"log.go",
|
||||
"payload_attestation.go",
|
||||
"payload_committee.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
@@ -79,7 +78,6 @@ go_test(
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"payload_committee_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
|
||||
127
beacon-chain/cache/payload_committee.go
vendored
127
beacon-chain/cache/payload_committee.go
vendored
@@ -1,127 +0,0 @@
|
||||
//go:build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxPayloadCommitteeCacheSize is the max number of payload committee entries to cache.
|
||||
// 64 covers two full epochs of slots.
|
||||
maxPayloadCommitteeCacheSize = 64
|
||||
)
|
||||
|
||||
var (
|
||||
// PayloadCommitteeCacheMiss tracks the number of payload committee requests that aren't present in the cache.
|
||||
PayloadCommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "payload_committee_cache_miss",
|
||||
Help: "The number of payload committee requests that aren't present in the cache.",
|
||||
})
|
||||
// PayloadCommitteeCacheHit tracks the number of payload committee requests that are in the cache.
|
||||
PayloadCommitteeCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "payload_committee_cache_hit",
|
||||
Help: "The number of payload committee requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// PayloadCommitteeCache is an LRU cache for payload timeliness committee results keyed by ptcSeed.
|
||||
type PayloadCommitteeCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
inProgress map[string]bool
|
||||
}
|
||||
|
||||
// NewPayloadCommitteeCache creates a new cache for storing payload committee results.
|
||||
func NewPayloadCommitteeCache() *PayloadCommitteeCache {
|
||||
c := &PayloadCommitteeCache{}
|
||||
c.Clear()
|
||||
return c
|
||||
}
|
||||
|
||||
// Get returns the cached payload committee for the given seed. Returns nil on cache miss.
|
||||
// Blocks if another goroutine is computing the same seed.
|
||||
func (c *PayloadCommitteeCache) Get(ctx context.Context, seed [32]byte) ([]primitives.ValidatorIndex, error) {
|
||||
if err := c.checkInProgress(ctx, seed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, exists := c.cache.Get(key(seed))
|
||||
if exists {
|
||||
PayloadCommitteeCacheHit.Inc()
|
||||
} else {
|
||||
PayloadCommitteeCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
indices, ok := obj.([]primitives.ValidatorIndex)
|
||||
if !ok {
|
||||
return nil, ErrIncorrectType
|
||||
}
|
||||
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
// Add stores a payload committee result in the cache.
|
||||
func (c *PayloadCommitteeCache) Add(seed [32]byte, indices []primitives.ValidatorIndex) {
|
||||
c.cache.Add(key(seed), indices)
|
||||
}
|
||||
|
||||
// MarkInProgress marks a seed as being computed. Returns ErrAlreadyInProgress if another
|
||||
// goroutine is already computing it.
|
||||
func (c *PayloadCommitteeCache) MarkInProgress(seed [32]byte) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
s := key(seed)
|
||||
if c.inProgress[s] {
|
||||
return ErrAlreadyInProgress
|
||||
}
|
||||
c.inProgress[s] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotInProgress releases the in-progress lock on a given seed.
|
||||
func (c *PayloadCommitteeCache) MarkNotInProgress(seed [32]byte) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
s := key(seed)
|
||||
delete(c.inProgress, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear resets the cache to its initial state.
|
||||
func (c *PayloadCommitteeCache) Clear() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.cache = lruwrpr.New(maxPayloadCommitteeCacheSize)
|
||||
c.inProgress = make(map[string]bool)
|
||||
}
|
||||
|
||||
func (c *PayloadCommitteeCache) checkInProgress(ctx context.Context, seed [32]byte) error {
|
||||
delay := minDelay
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
if !c.inProgress[key(seed)] {
|
||||
c.lock.RUnlock()
|
||||
break
|
||||
}
|
||||
c.lock.RUnlock()
|
||||
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = min(delay, maxDelay)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
39
beacon-chain/cache/payload_committee_disabled.go
vendored
39
beacon-chain/cache/payload_committee_disabled.go
vendored
@@ -1,39 +0,0 @@
|
||||
//go:build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass the payload committee cache.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// FakePayloadCommitteeCache is a no-op implementation of the payload committee cache for fuzz builds.
|
||||
type FakePayloadCommitteeCache struct{}
|
||||
|
||||
// NewPayloadCommitteeCache creates a new fake cache.
|
||||
func NewPayloadCommitteeCache() *FakePayloadCommitteeCache {
|
||||
return &FakePayloadCommitteeCache{}
|
||||
}
|
||||
|
||||
// Get is a stub.
|
||||
func (c *FakePayloadCommitteeCache) Get(_ context.Context, _ [32]byte) ([]primitives.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Add is a stub.
|
||||
func (c *FakePayloadCommitteeCache) Add(_ [32]byte, _ []primitives.ValidatorIndex) {}
|
||||
|
||||
// MarkInProgress is a stub.
|
||||
func (c *FakePayloadCommitteeCache) MarkInProgress(_ [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotInProgress is a stub.
|
||||
func (c *FakePayloadCommitteeCache) MarkNotInProgress(_ [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear is a stub.
|
||||
func (c *FakePayloadCommitteeCache) Clear() {}
|
||||
94
beacon-chain/cache/payload_committee_test.go
vendored
94
beacon-chain/cache/payload_committee_test.go
vendored
@@ -1,94 +0,0 @@
|
||||
//go:build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestPayloadCommitteeCache_MissOnEmpty(t *testing.T) {
|
||||
c := NewPayloadCommitteeCache()
|
||||
seed := [32]byte{'A'}
|
||||
indices, err := c.Get(t.Context(), seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, indices == nil, "Expected nil on empty cache")
|
||||
}
|
||||
|
||||
func TestPayloadCommitteeCache_AddThenHit(t *testing.T) {
|
||||
c := NewPayloadCommitteeCache()
|
||||
seed := [32]byte{'A'}
|
||||
want := []primitives.ValidatorIndex{1, 2, 3, 4, 5}
|
||||
|
||||
c.Add(seed, want)
|
||||
|
||||
got, err := c.Get(t.Context(), seed)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, want, got)
|
||||
}
|
||||
|
||||
func TestPayloadCommitteeCache_LRUEviction(t *testing.T) {
|
||||
c := NewPayloadCommitteeCache()
|
||||
|
||||
// Fill beyond capacity.
|
||||
for i := range maxPayloadCommitteeCacheSize + 10 {
|
||||
s := bytesutil.ToBytes32([]byte(strconv.Itoa(i)))
|
||||
c.Add(s, []primitives.ValidatorIndex{primitives.ValidatorIndex(i)})
|
||||
}
|
||||
|
||||
// Oldest entries should be evicted.
|
||||
s := bytesutil.ToBytes32([]byte(strconv.Itoa(0)))
|
||||
got, err := c.Get(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, got == nil, "Expected oldest entry to be evicted")
|
||||
|
||||
// Newest entry should still be present.
|
||||
s = bytesutil.ToBytes32([]byte(strconv.Itoa(maxPayloadCommitteeCacheSize + 9)))
|
||||
got, err = c.Get(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(got))
|
||||
}
|
||||
|
||||
func TestPayloadCommitteeCache_CancelledContext(t *testing.T) {
|
||||
c := NewPayloadCommitteeCache()
|
||||
seed := [32]byte{'A'}
|
||||
|
||||
// Mark in progress so Get blocks.
|
||||
require.NoError(t, c.MarkInProgress(seed))
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
_, err := c.Get(ctx, seed)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
|
||||
require.NoError(t, c.MarkNotInProgress(seed))
|
||||
}
|
||||
|
||||
func TestPayloadCommitteeCache_MarkInProgressDuplicate(t *testing.T) {
|
||||
c := NewPayloadCommitteeCache()
|
||||
seed := [32]byte{'A'}
|
||||
|
||||
require.NoError(t, c.MarkInProgress(seed))
|
||||
err := c.MarkInProgress(seed)
|
||||
assert.Equal(t, ErrAlreadyInProgress, err)
|
||||
require.NoError(t, c.MarkNotInProgress(seed))
|
||||
}
|
||||
|
||||
func TestPayloadCommitteeCache_Clear(t *testing.T) {
|
||||
c := NewPayloadCommitteeCache()
|
||||
seed := [32]byte{'A'}
|
||||
c.Add(seed, []primitives.ValidatorIndex{1, 2, 3})
|
||||
|
||||
c.Clear()
|
||||
|
||||
got, err := c.Get(t.Context(), seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, got == nil, "Expected nil after Clear")
|
||||
}
|
||||
@@ -33,6 +33,8 @@ const (
|
||||
LightClientOptimisticUpdate
|
||||
// PayloadAttributes events are fired upon a missed slot or new head.
|
||||
PayloadAttributes
|
||||
// PayloadProcessed is sent after a payload envelope has been processed.
|
||||
PayloadProcessed
|
||||
)
|
||||
|
||||
// BlockProcessedData is the data sent with BlockProcessed events.
|
||||
@@ -72,3 +74,9 @@ type InitializedData struct {
|
||||
// GenesisValidatorsRoot represents state.validators.HashTreeRoot().
|
||||
GenesisValidatorsRoot []byte
|
||||
}
|
||||
|
||||
// PayloadProcessedData is the data sent with PayloadProcessed events.
|
||||
type PayloadProcessedData struct {
|
||||
Slot primitives.Slot
|
||||
BlockRoot [32]byte
|
||||
}
|
||||
|
||||
@@ -12,11 +12,11 @@ go_library(
|
||||
"pending_payment.go",
|
||||
"proposer_slashing.go",
|
||||
"upgrade.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/requests:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
@@ -53,6 +53,7 @@ go_test(
|
||||
"pending_payment_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
"upgrade_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -69,13 +70,11 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -225,7 +225,20 @@ func ApplyExecutionPayload(
|
||||
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
|
||||
}
|
||||
|
||||
if err := processExecutionRequests(ctx, st, envelope.ExecutionRequests()); err != nil {
|
||||
if err := ApplyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), [32]byte(payload.BlockHash())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ApplyExecutionPayloadStateMutations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
executionRequests *enginev1.ExecutionRequests,
|
||||
blockHash [32]byte,
|
||||
) error {
|
||||
if err := processExecutionRequests(ctx, st, executionRequests); err != nil {
|
||||
return errors.Wrap(err, "could not process execution requests")
|
||||
}
|
||||
|
||||
@@ -237,7 +250,7 @@ func ApplyExecutionPayload(
|
||||
return errors.Wrap(err, "could not set execution payload availability")
|
||||
}
|
||||
|
||||
if err := st.SetLatestBlockHash([32]byte(payload.BlockHash())); err != nil {
|
||||
if err := st.SetLatestBlockHash(blockHash); err != nil {
|
||||
return errors.Wrap(err, "could not set latest block hash")
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
@@ -123,37 +122,6 @@ func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try cache, then acquire the in-progress lock. If another goroutine
|
||||
// is already computing, wait for it and retry. This loop ensures that
|
||||
// exactly one goroutine computes at a time, avoiding thundering herd
|
||||
// when a prior computation fails without populating the cache.
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
cached, err := helpers.PayloadCommitteeFromCache(ctx, seed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cached != nil {
|
||||
return cached, nil
|
||||
}
|
||||
if err := helpers.MarkPayloadCommitteeInProgress(seed); err != nil {
|
||||
if !errors.Is(err, cache.ErrAlreadyInProgress) {
|
||||
return nil, err
|
||||
}
|
||||
// Another goroutine is computing. PayloadCommitteeFromCache
|
||||
// will block (via checkInProgress backoff) until it finishes,
|
||||
// then we loop back to check the cache and retry.
|
||||
continue
|
||||
}
|
||||
// We own the in-progress lock.
|
||||
break
|
||||
}
|
||||
defer func() {
|
||||
_ = helpers.MarkPayloadCommitteeNotInProgress(seed)
|
||||
}()
|
||||
|
||||
activeCount, err := helpers.ActiveValidatorCount(ctx, st, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -185,8 +153,6 @@ func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
|
||||
}
|
||||
}
|
||||
|
||||
helpers.AddPayloadCommittee(seed, selected)
|
||||
|
||||
return selected, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,9 +2,7 @@ package gloas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
@@ -17,10 +15,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
testutil "github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -308,115 +303,3 @@ func (s *validatorLookupErrState) ValidatorAtIndexReadOnly(idx primitives.Valida
|
||||
}
|
||||
return s.BeaconState.ValidatorAtIndexReadOnly(idx)
|
||||
}
|
||||
|
||||
// ptcSeed mirrors the seed derivation in PayloadCommittee so the test can
|
||||
// pre-mark the seed as in-progress.
|
||||
func ptcSeed(t *testing.T, st state.ReadOnlyBeaconState, slot primitives.Slot) [32]byte {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
seed, err := helpers.Seed(st, epoch, params.BeaconConfig().DomainPTCAttester)
|
||||
require.NoError(t, err)
|
||||
return hash.Hash(append(seed[:], bytesutil.Bytes8(uint64(slot))...))
|
||||
}
|
||||
|
||||
// TestPayloadCommittee_ConcurrentInProgress verifies that when another
|
||||
// goroutine holds the in-progress lock and then releases WITHOUT populating
|
||||
// the cache (simulating a failed computation), PayloadCommittee falls through
|
||||
// and computes the result itself instead of returning an error.
|
||||
func TestPayloadCommittee_ConcurrentInProgress(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk1 := newKey(t)
|
||||
_, pk2 := newKey(t)
|
||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
||||
st := newTestState(t, vals, 2)
|
||||
|
||||
slot := primitives.Slot(1)
|
||||
seed := ptcSeed(t, st, slot)
|
||||
|
||||
// Simulate another goroutine holding the lock.
|
||||
require.NoError(t, helpers.MarkPayloadCommitteeInProgress(seed))
|
||||
|
||||
// Release the lock after a short delay WITHOUT adding to cache,
|
||||
// simulating the other goroutine failing.
|
||||
go func() {
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
_ = helpers.MarkPayloadCommitteeNotInProgress(seed)
|
||||
}()
|
||||
|
||||
// PayloadCommittee should wait, see no cache entry, and compute itself.
|
||||
ptc, err := gloas.PayloadCommittee(t.Context(), st, slot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, len(ptc) > 0, "expected non-empty PTC")
|
||||
}
|
||||
|
||||
// TestPayloadCommittee_ConcurrentCacheHit verifies that when another goroutine
|
||||
// holds the in-progress lock and then populates the cache, a concurrent caller
|
||||
// gets the cached result.
|
||||
func TestPayloadCommittee_ConcurrentCacheHit(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk1 := newKey(t)
|
||||
_, pk2 := newKey(t)
|
||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
||||
st := newTestState(t, vals, 2)
|
||||
|
||||
slot := primitives.Slot(1)
|
||||
seed := ptcSeed(t, st, slot)
|
||||
|
||||
// First, compute the expected result.
|
||||
expected, err := gloas.PayloadCommittee(t.Context(), st, slot)
|
||||
require.NoError(t, err)
|
||||
helpers.ClearCache()
|
||||
|
||||
// Simulate another goroutine that will populate the cache.
|
||||
require.NoError(t, helpers.MarkPayloadCommitteeInProgress(seed))
|
||||
go func() {
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
helpers.AddPayloadCommittee(seed, expected)
|
||||
_ = helpers.MarkPayloadCommitteeNotInProgress(seed)
|
||||
}()
|
||||
|
||||
ptc, err := gloas.PayloadCommittee(t.Context(), st, slot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, expected, ptc)
|
||||
}
|
||||
|
||||
// TestPayloadCommittee_ParallelCallers verifies that multiple concurrent
|
||||
// callers all get the correct result without errors.
|
||||
func TestPayloadCommittee_ParallelCallers(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk1 := newKey(t)
|
||||
_, pk2 := newKey(t)
|
||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
||||
st := newTestState(t, vals, 2)
|
||||
|
||||
slot := primitives.Slot(1)
|
||||
|
||||
// Compute expected result first.
|
||||
expected, err := gloas.PayloadCommittee(t.Context(), st, slot)
|
||||
require.NoError(t, err)
|
||||
helpers.ClearCache()
|
||||
|
||||
const numCallers = 8
|
||||
var wg sync.WaitGroup
|
||||
errs := make([]error, numCallers)
|
||||
results := make([][]primitives.ValidatorIndex, numCallers)
|
||||
|
||||
for i := range numCallers {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
results[idx], errs[idx] = gloas.PayloadCommittee(t.Context(), st, slot)
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
for i := range numCallers {
|
||||
require.NoError(t, errs[i], "caller %d returned error", i)
|
||||
assert.DeepEqual(t, expected, results[i], "caller %d got wrong result", i)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -242,6 +242,23 @@ func TestProcessExecutionPayload_Success(t *testing.T) {
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestApplyExecutionPayloadStateMutations_UpdatesAvailabilityAndLatestHash(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
newHash := [32]byte{}
|
||||
newHash[0] = 0x99
|
||||
|
||||
require.NoError(t, ApplyExecutionPayloadStateMutations(t.Context(), fixture.state, fixture.envelope.ExecutionRequests, newHash))
|
||||
|
||||
latestHash, err := fixture.state.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newHash, latestHash)
|
||||
|
||||
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), available)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayload_PrevRandaoMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, func(_ *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, _ *ethpb.ExecutionPayloadEnvelope) {
|
||||
bid.PrevRandao = bytes.Repeat([]byte{0xFF}, 32)
|
||||
|
||||
105
beacon-chain/core/gloas/withdrawals.go
Normal file
105
beacon-chain/core/gloas/withdrawals.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessWithdrawals applies withdrawals to the state for Gloas.
|
||||
//
|
||||
// <spec fn="process_withdrawals" fork="gloas" hash="16d9ad2a">
|
||||
// def process_withdrawals(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// # Removed `payload`
|
||||
//
|
||||
// ) -> None:
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Return early if the parent block is empty
|
||||
// if not is_parent_block_full(state):
|
||||
// return
|
||||
//
|
||||
// # Get expected withdrawals
|
||||
// expected = get_expected_withdrawals(state)
|
||||
//
|
||||
// # Apply expected withdrawals
|
||||
// apply_withdrawals(state, expected.withdrawals)
|
||||
//
|
||||
// # Update withdrawals fields in the state
|
||||
// update_next_withdrawal_index(state, expected.withdrawals)
|
||||
// # [New in Gloas:EIP7732]
|
||||
// update_payload_expected_withdrawals(state, expected.withdrawals)
|
||||
// # [New in Gloas:EIP7732]
|
||||
// update_builder_pending_withdrawals(state, expected.processed_builder_withdrawals_count)
|
||||
// update_pending_partial_withdrawals(state, expected.processed_partial_withdrawals_count)
|
||||
// # [New in Gloas:EIP7732]
|
||||
// update_next_withdrawal_builder_index(state, expected.processed_builders_sweep_count)
|
||||
// update_next_withdrawal_validator_index(state, expected.withdrawals)
|
||||
//
|
||||
// </spec>
|
||||
func ProcessWithdrawals(st state.BeaconState) error {
|
||||
// Must be called before ProcessExecutionPayloadBid for the current block.
|
||||
full, err := st.IsParentBlockFull()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent block full status")
|
||||
}
|
||||
if !full {
|
||||
return nil
|
||||
}
|
||||
|
||||
expected, err := st.ExpectedWithdrawalsGloas()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
|
||||
if err := st.DecreaseWithdrawalBalances(expected.Withdrawals); err != nil {
|
||||
return errors.Wrap(err, "could not decrease withdrawal balances")
|
||||
}
|
||||
|
||||
if len(expected.Withdrawals) > 0 {
|
||||
if err := st.SetNextWithdrawalIndex(expected.Withdrawals[len(expected.Withdrawals)-1].Index + 1); err != nil {
|
||||
return errors.Wrap(err, "could not set next withdrawal index")
|
||||
}
|
||||
}
|
||||
|
||||
if err := st.SetPayloadExpectedWithdrawals(expected.Withdrawals); err != nil {
|
||||
return errors.Wrap(err, "could not set payload expected withdrawals")
|
||||
}
|
||||
|
||||
if err := st.DequeueBuilderPendingWithdrawals(expected.ProcessedBuilderWithdrawalsCount); err != nil {
|
||||
return errors.Wrap(err, "unable to dequeue builder pending withdrawals from state")
|
||||
}
|
||||
|
||||
if err := st.DequeuePendingPartialWithdrawals(expected.ProcessedPartialWithdrawalsCount); err != nil {
|
||||
return errors.Wrap(err, "unable to dequeue partial withdrawals from state")
|
||||
}
|
||||
|
||||
err = st.SetNextWithdrawalBuilderIndex(expected.NextWithdrawalBuilderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not set next withdrawal builder index")
|
||||
}
|
||||
|
||||
var nextValidatorIndex primitives.ValidatorIndex
|
||||
if uint64(len(expected.Withdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get next withdrawal validator index")
|
||||
}
|
||||
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
|
||||
} else {
|
||||
nextValidatorIndex = expected.Withdrawals[len(expected.Withdrawals)-1].ValidatorIndex + 1
|
||||
if nextValidatorIndex == primitives.ValidatorIndex(st.NumValidators()) {
|
||||
nextValidatorIndex = 0
|
||||
}
|
||||
}
|
||||
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
|
||||
return errors.Wrap(err, "could not set next withdrawal validator index")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
388
beacon-chain/core/gloas/withdrawals_test.go
Normal file
388
beacon-chain/core/gloas/withdrawals_test.go
Normal file
@@ -0,0 +1,388 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessWithdrawals(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
build func(t *testing.T) *withdrawalsState
|
||||
check func(t *testing.T, st *withdrawalsState)
|
||||
}{
|
||||
{
|
||||
name: "parent block not full",
|
||||
build: func(t *testing.T) *withdrawalsState {
|
||||
return &withdrawalsState{
|
||||
BeaconState: newGloasState(t, nil, nil),
|
||||
parentFull: false,
|
||||
}
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, false, st.expectedCalled)
|
||||
require.Equal(t, false, st.decreaseCalled)
|
||||
require.Equal(t, false, st.setNextWithdrawalIndexCalled)
|
||||
require.Equal(t, false, st.setPayloadExpectedWithdrawalsCalled)
|
||||
require.Equal(t, false, st.dequeueBuilderCalled)
|
||||
require.Equal(t, false, st.dequeuePartialCalled)
|
||||
require.Equal(t, false, st.setNextBuilderIndexCalled)
|
||||
require.Equal(t, false, st.nextValidatorIndexCalled)
|
||||
require.Equal(t, false, st.setNextValidatorIndexCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "updates indexes when not full payload",
|
||||
build: func(t *testing.T) *withdrawalsState {
|
||||
return &withdrawalsState{
|
||||
BeaconState: newGloasState(t, nil, nil),
|
||||
parentFull: true,
|
||||
numValidators: 10,
|
||||
nextValidatorIndex: 3,
|
||||
expectedResult: state.ExpectedWithdrawalsGloasResult{
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
{Index: 7, ValidatorIndex: 2, Amount: 1, Address: []byte{0x01}},
|
||||
{Index: 8, ValidatorIndex: 4, Amount: 2, Address: []byte{0x02}},
|
||||
},
|
||||
ProcessedBuilderWithdrawalsCount: 5,
|
||||
ProcessedPartialWithdrawalsCount: 2,
|
||||
NextWithdrawalBuilderIndex: 7,
|
||||
},
|
||||
}
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.expectedCalled)
|
||||
require.Equal(t, true, st.decreaseCalled)
|
||||
require.NotNil(t, st.setNextWithdrawalIndexArg)
|
||||
require.Equal(t, uint64(9), *st.setNextWithdrawalIndexArg)
|
||||
require.DeepEqual(t, st.expectedResult.Withdrawals, st.setPayloadExpectedWithdrawalsArg)
|
||||
require.Equal(t, uint64(5), *st.dequeueBuilderArg)
|
||||
require.Equal(t, uint64(2), *st.dequeuePartialArg)
|
||||
require.Equal(t, primitives.BuilderIndex(7), *st.setNextBuilderIndexArg)
|
||||
require.Equal(t, true, st.nextValidatorIndexCalled)
|
||||
|
||||
expectedNext := (uint64(st.nextValidatorIndex) + uint64(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)) % st.numValidators
|
||||
require.Equal(t, primitives.ValidatorIndex(expectedNext), *st.setNextValidatorIndexArg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "full payload uses last validator index",
|
||||
build: func(t *testing.T) *withdrawalsState {
|
||||
max := int(params.BeaconConfig().MaxWithdrawalsPerPayload)
|
||||
withdrawals := make([]*enginev1.Withdrawal, max)
|
||||
for i := range max {
|
||||
withdrawals[i] = &enginev1.Withdrawal{
|
||||
Index: uint64(i),
|
||||
ValidatorIndex: 0,
|
||||
Amount: 1,
|
||||
Address: []byte{0x03},
|
||||
}
|
||||
}
|
||||
withdrawals[max-1].ValidatorIndex = 4
|
||||
|
||||
return &withdrawalsState{
|
||||
BeaconState: newGloasState(t, nil, nil),
|
||||
parentFull: true,
|
||||
numValidators: 5,
|
||||
expectedResult: state.ExpectedWithdrawalsGloasResult{
|
||||
Withdrawals: withdrawals,
|
||||
NextWithdrawalBuilderIndex: 1,
|
||||
},
|
||||
}
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
max := int(params.BeaconConfig().MaxWithdrawalsPerPayload)
|
||||
require.NotNil(t, st.setNextWithdrawalIndexArg)
|
||||
require.Equal(t, uint64(max), *st.setNextWithdrawalIndexArg)
|
||||
require.Equal(t, false, st.nextValidatorIndexCalled)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), *st.setNextValidatorIndexArg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty withdrawals skips next index update",
|
||||
build: func(t *testing.T) *withdrawalsState {
|
||||
return &withdrawalsState{
|
||||
BeaconState: newGloasState(t, nil, nil),
|
||||
parentFull: true,
|
||||
numValidators: 8,
|
||||
expectedResult: state.ExpectedWithdrawalsGloasResult{
|
||||
Withdrawals: []*enginev1.Withdrawal{},
|
||||
ProcessedBuilderWithdrawalsCount: 1,
|
||||
ProcessedPartialWithdrawalsCount: 2,
|
||||
NextWithdrawalBuilderIndex: 4,
|
||||
},
|
||||
}
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, false, st.setNextWithdrawalIndexCalled)
|
||||
require.Equal(t, true, st.setPayloadExpectedWithdrawalsCalled)
|
||||
require.Equal(t, true, st.dequeueBuilderCalled)
|
||||
require.Equal(t, true, st.dequeuePartialCalled)
|
||||
require.Equal(t, true, st.setNextBuilderIndexCalled)
|
||||
require.Equal(t, true, st.nextValidatorIndexCalled)
|
||||
require.Equal(t, true, st.setNextValidatorIndexCalled)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st := tc.build(t)
|
||||
require.NoError(t, ProcessWithdrawals(st))
|
||||
if tc.check != nil {
|
||||
tc.check(t, st)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessWithdrawals_ErrorPaths(t *testing.T) {
|
||||
base := func(t *testing.T) *withdrawalsState {
|
||||
return &withdrawalsState{
|
||||
BeaconState: newGloasState(t, nil, nil),
|
||||
parentFull: true,
|
||||
numValidators: 16,
|
||||
expectedResult: state.ExpectedWithdrawalsGloasResult{
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
{Index: 1, ValidatorIndex: 2, Amount: 1, Address: []byte{0x01}},
|
||||
},
|
||||
ProcessedBuilderWithdrawalsCount: 1,
|
||||
ProcessedPartialWithdrawalsCount: 1,
|
||||
NextWithdrawalBuilderIndex: 2,
|
||||
},
|
||||
nextValidatorIndex: 5,
|
||||
}
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
set func(st *withdrawalsState, err error)
|
||||
check func(t *testing.T, st *withdrawalsState)
|
||||
}{
|
||||
{
|
||||
name: "parent block full error",
|
||||
err: errors.New("parent err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.parentErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, false, st.expectedCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "expected withdrawals error",
|
||||
err: errors.New("expected err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.expectedErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.expectedCalled)
|
||||
require.Equal(t, false, st.decreaseCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "decrease balances error",
|
||||
err: errors.New("decrease err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.decreaseErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.decreaseCalled)
|
||||
require.Equal(t, false, st.setNextWithdrawalIndexCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set next withdrawal index error",
|
||||
err: errors.New("next index err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.setNextWithdrawalIndexErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.setNextWithdrawalIndexCalled)
|
||||
require.Equal(t, false, st.setPayloadExpectedWithdrawalsCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set payload expected withdrawals error",
|
||||
err: errors.New("payload expected err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.setPayloadExpectedWithdrawalsErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.setPayloadExpectedWithdrawalsCalled)
|
||||
require.Equal(t, false, st.dequeueBuilderCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dequeue builder pending withdrawals error",
|
||||
err: errors.New("dequeue builder err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.dequeueBuilderErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.dequeueBuilderCalled)
|
||||
require.Equal(t, false, st.dequeuePartialCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dequeue pending partial withdrawals error",
|
||||
err: errors.New("dequeue partial err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.dequeuePartialErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.dequeuePartialCalled)
|
||||
require.Equal(t, false, st.setNextBuilderIndexCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set next withdrawal builder index error",
|
||||
err: errors.New("next builder err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.setNextBuilderIndexErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.setNextBuilderIndexCalled)
|
||||
require.Equal(t, false, st.nextValidatorIndexCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "next withdrawal validator index error",
|
||||
err: errors.New("next validator err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.nextValidatorIndexErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.nextValidatorIndexCalled)
|
||||
require.Equal(t, false, st.setNextValidatorIndexCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set next withdrawal validator index error",
|
||||
err: errors.New("set next validator err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.setNextValidatorIndexErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.setNextValidatorIndexCalled)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st := base(t)
|
||||
tc.set(st, tc.err)
|
||||
err := ProcessWithdrawals(st)
|
||||
require.ErrorIs(t, err, tc.err)
|
||||
if tc.check != nil {
|
||||
tc.check(t, st)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type withdrawalsState struct {
|
||||
setNextValidatorIndexCalled bool
|
||||
nextValidatorIndexCalled bool
|
||||
setNextBuilderIndexCalled bool
|
||||
dequeuePartialCalled bool
|
||||
dequeueBuilderCalled bool
|
||||
setPayloadExpectedWithdrawalsCalled bool
|
||||
setNextWithdrawalIndexCalled bool
|
||||
parentFull bool
|
||||
expectedCalled bool
|
||||
decreaseCalled bool
|
||||
numValidators uint64
|
||||
setNextWithdrawalIndexArg *uint64
|
||||
nextValidatorIndex primitives.ValidatorIndex
|
||||
setNextBuilderIndexArg *primitives.BuilderIndex
|
||||
dequeuePartialArg *uint64
|
||||
setNextValidatorIndexArg *primitives.ValidatorIndex
|
||||
dequeueBuilderArg *uint64
|
||||
state.BeaconState
|
||||
setNextValidatorIndexErr error
|
||||
setNextBuilderIndexErr error
|
||||
dequeuePartialErr error
|
||||
dequeueBuilderErr error
|
||||
setPayloadExpectedWithdrawalsErr error
|
||||
nextValidatorIndexErr error
|
||||
decreaseErr error
|
||||
expectedErr error
|
||||
parentErr error
|
||||
setNextWithdrawalIndexErr error
|
||||
setPayloadExpectedWithdrawalsArg []*enginev1.Withdrawal
|
||||
expectedResult state.ExpectedWithdrawalsGloasResult
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) IsParentBlockFull() (bool, error) {
|
||||
return w.parentFull, w.parentErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
|
||||
w.expectedCalled = true
|
||||
if w.expectedErr != nil {
|
||||
return state.ExpectedWithdrawalsGloasResult{}, w.expectedErr
|
||||
}
|
||||
return w.expectedResult, nil
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) DecreaseWithdrawalBalances(_ []*enginev1.Withdrawal) error {
|
||||
w.decreaseCalled = true
|
||||
return w.decreaseErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) SetNextWithdrawalIndex(index uint64) error {
|
||||
w.setNextWithdrawalIndexCalled = true
|
||||
w.setNextWithdrawalIndexArg = &index
|
||||
return w.setNextWithdrawalIndexErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error {
|
||||
w.setPayloadExpectedWithdrawalsCalled = true
|
||||
w.setPayloadExpectedWithdrawalsArg = withdrawals
|
||||
return w.setPayloadExpectedWithdrawalsErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) DequeueBuilderPendingWithdrawals(n uint64) error {
|
||||
w.dequeueBuilderCalled = true
|
||||
w.dequeueBuilderArg = &n
|
||||
return w.dequeueBuilderErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) DequeuePendingPartialWithdrawals(n uint64) error {
|
||||
w.dequeuePartialCalled = true
|
||||
w.dequeuePartialArg = &n
|
||||
return w.dequeuePartialErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) SetNextWithdrawalBuilderIndex(index primitives.BuilderIndex) error {
|
||||
w.setNextBuilderIndexCalled = true
|
||||
w.setNextBuilderIndexArg = &index
|
||||
return w.setNextBuilderIndexErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) NextWithdrawalValidatorIndex() (primitives.ValidatorIndex, error) {
|
||||
w.nextValidatorIndexCalled = true
|
||||
if w.nextValidatorIndexErr != nil {
|
||||
return 0, w.nextValidatorIndexErr
|
||||
}
|
||||
return w.nextValidatorIndex, nil
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) NumValidators() int {
|
||||
return int(w.numValidators)
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) SetNextWithdrawalValidatorIndex(index primitives.ValidatorIndex) error {
|
||||
w.setNextValidatorIndexCalled = true
|
||||
w.setNextValidatorIndexArg = &index
|
||||
return w.setNextValidatorIndexErr
|
||||
}
|
||||
@@ -27,9 +27,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
committeeCache = cache.NewCommitteesCache()
|
||||
proposerIndicesCache = cache.NewProposerIndicesCache()
|
||||
payloadCommitteeCache = cache.NewPayloadCommitteeCache()
|
||||
committeeCache = cache.NewCommitteesCache()
|
||||
proposerIndicesCache = cache.NewProposerIndicesCache()
|
||||
)
|
||||
|
||||
type beaconCommitteeFunc = func(
|
||||
@@ -614,28 +613,6 @@ func ClearCache() {
|
||||
proposerIndicesCache.Prune(0)
|
||||
syncCommitteeCache.Clear()
|
||||
balanceCache.Clear()
|
||||
payloadCommitteeCache.Clear()
|
||||
}
|
||||
|
||||
// PayloadCommitteeFromCache returns the cached payload committee for the given seed.
|
||||
// Returns nil on cache miss.
|
||||
func PayloadCommitteeFromCache(ctx context.Context, seed [32]byte) ([]primitives.ValidatorIndex, error) {
|
||||
return payloadCommitteeCache.Get(ctx, seed)
|
||||
}
|
||||
|
||||
// AddPayloadCommittee stores a payload committee result in the cache.
|
||||
func AddPayloadCommittee(seed [32]byte, indices []primitives.ValidatorIndex) {
|
||||
payloadCommitteeCache.Add(seed, indices)
|
||||
}
|
||||
|
||||
// MarkPayloadCommitteeInProgress marks a payload committee seed as being computed.
|
||||
func MarkPayloadCommitteeInProgress(seed [32]byte) error {
|
||||
return payloadCommitteeCache.MarkInProgress(seed)
|
||||
}
|
||||
|
||||
// MarkPayloadCommitteeNotInProgress releases the in-progress lock on a payload committee seed.
|
||||
func MarkPayloadCommitteeNotInProgress(seed [32]byte) error {
|
||||
return payloadCommitteeCache.MarkNotInProgress(seed)
|
||||
}
|
||||
|
||||
// ComputeCommittee returns the requested shuffled committee out of the total committees using
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"electra.go",
|
||||
"errors.go",
|
||||
"gloas.go",
|
||||
"log.go",
|
||||
"skip_slot_cache.go",
|
||||
"state.go",
|
||||
@@ -68,6 +69,7 @@ go_test(
|
||||
"benchmarks_test.go",
|
||||
"electra_test.go",
|
||||
"exports_test.go",
|
||||
"gloas_operations_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"state_fuzz_test.go",
|
||||
"state_test.go",
|
||||
@@ -94,6 +96,7 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
|
||||
@@ -15,5 +15,6 @@ var (
|
||||
ErrProcessDepositsFailed = errors.New("process deposits failed")
|
||||
ErrProcessVoluntaryExitsFailed = errors.New("process voluntary exits failed")
|
||||
ErrProcessBLSChangesFailed = errors.New("process BLS to execution changes failed")
|
||||
ErrProcessPayloadAttestationsFailed = errors.New("process payload attestations failed")
|
||||
ErrProcessSyncAggregateFailed = errors.New("process sync aggregate failed")
|
||||
)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
package transition
|
||||
|
||||
var ElectraOperations = electraOperations
|
||||
var GloasOperations = gloasOperations
|
||||
|
||||
226
beacon-chain/core/transition/gloas.go
Normal file
226
beacon-chain/core/transition/gloas.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/epoch/precompute"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/fulu"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessSlotsForBlock advances the given state to the slot of the given block.
|
||||
// This function assumes that the parent state is the latest state that has been processed before the given block.
|
||||
// In particular, all that it is needed to get the blocks's prestate is to advance slots and possible epoch transitions.
|
||||
func ProcessSlotsForBlock(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
b interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
accessRoot := b.ParentRoot()
|
||||
if st.Version() < version.Gloas {
|
||||
return ProcessSlotsUsingNextSlotCache(ctx, st, accessRoot[:], b.Slot())
|
||||
}
|
||||
full, err := st.IsParentBlockFull()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not determine if parent block is full")
|
||||
}
|
||||
if full {
|
||||
accessRoot, err = st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest block hash")
|
||||
}
|
||||
}
|
||||
return ProcessSlotsUsingNextSlotCache(ctx, st, accessRoot[:], b.Slot())
|
||||
}
|
||||
|
||||
// ProcessOperations
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// <spec fn="process_operations" fork="gloas" hash="05a7a4ea">
|
||||
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(
|
||||
// state.eth1_data.deposit_count, state.deposit_requests_start_index
|
||||
// )
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(
|
||||
// MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index
|
||||
// )
|
||||
// else:
|
||||
// assert len(body.deposits) == 0
|
||||
//
|
||||
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
// for operation in operations:
|
||||
// fn(state, operation)
|
||||
//
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
// for_ops(body.attester_slashings, process_attester_slashing)
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// for_ops(body.attestations, process_attestation)
|
||||
// for_ops(body.deposits, process_deposit)
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// for_ops(body.voluntary_exits, process_voluntary_exit)
|
||||
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// # Removed `process_deposit_request`
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// # Removed `process_withdrawal_request`
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// # Removed `process_consolidation_request`
|
||||
// # [New in Gloas:EIP7732]
|
||||
// for_ops(body.payload_attestations, process_payload_attestation)
|
||||
// </spec>
|
||||
func gloasOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
bb := block.Body()
|
||||
var exitInfo *v.ExitInfo
|
||||
hasSlashings := len(bb.ProposerSlashings()) > 0 || len(bb.AttesterSlashings()) > 0
|
||||
hasExits := len(bb.VoluntaryExits()) > 0
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = blocks.ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = blocks.ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = electra.ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
}
|
||||
if _, err := electra.ProcessDeposits(ctx, st, bb.Deposits()); err != nil {
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
}
|
||||
st, err = blocks.ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
}
|
||||
st, err = blocks.ProcessBLSToExecutionChanges(st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
|
||||
}
|
||||
if err := gloas.ProcessPayloadAttestations(ctx, st, bb); err != nil {
|
||||
return nil, errors.Wrap(ErrProcessPayloadAttestationsFailed, err.Error())
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// processEpochGloas describes the per epoch operations that are performed on the beacon state.
|
||||
// It's optimized by pre computing validator attested info and epoch total/attested balances upfront.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// <spec fn="process_epoch" fork="gloas" hash="393b69ef">
|
||||
// def process_epoch(state: BeaconState) -> None:
|
||||
// process_justification_and_finalization(state)
|
||||
// process_inactivity_updates(state)
|
||||
// process_rewards_and_penalties(state)
|
||||
// process_registry_updates(state)
|
||||
// process_slashings(state)
|
||||
// process_eth1_data_reset(state)
|
||||
// process_pending_deposits(state)
|
||||
// process_pending_consolidations(state)
|
||||
// # [New in Gloas:EIP7732]
|
||||
// process_builder_pending_payments(state)
|
||||
// process_effective_balance_updates(state)
|
||||
// process_slashings_reset(state)
|
||||
// process_randao_mixes_reset(state)
|
||||
// process_historical_summaries_update(state)
|
||||
// process_participation_flag_updates(state)
|
||||
// process_sync_committee_updates(state)
|
||||
// process_proposer_lookahead(state)
|
||||
// </spec>
|
||||
func processEpochGloas(ctx context.Context, state state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "gloas.ProcessEpoch")
|
||||
defer span.End()
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
vp, bp, err := electra.InitializePrecomputeValidators(ctx, state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vp, bp, err = electra.ProcessEpochParticipation(ctx, state, bp, vp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process justification")
|
||||
}
|
||||
state, vp, err = electra.ProcessInactivityScores(ctx, state, vp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process inactivity updates")
|
||||
}
|
||||
state, err = electra.ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process rewards and penalties")
|
||||
}
|
||||
if err := electra.ProcessRegistryUpdates(ctx, state); err != nil {
|
||||
return errors.Wrap(err, "could not process registry updates")
|
||||
}
|
||||
if err := electra.ProcessSlashings(state); err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = electra.ProcessEth1DataReset(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = electra.ProcessPendingDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = electra.ProcessPendingConsolidations(ctx, state); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = gloas.ProcessBuilderPendingPayments(state); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = electra.ProcessEffectiveBalanceUpdates(state); err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = electra.ProcessSlashingsReset(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = electra.ProcessRandaoMixesReset(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = electra.ProcessHistoricalDataUpdate(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = electra.ProcessParticipationFlagUpdates(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = electra.ProcessSyncCommitteeUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fulu.ProcessProposerLookahead(ctx, state); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
235
beacon-chain/core/transition/gloas_operations_test.go
Normal file
235
beacon-chain/core/transition/gloas_operations_test.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func newGloasBlock(t *testing.T, body *ethpb.BeaconBlockBodyGloas) interfaces.ReadOnlyBeaconBlock {
|
||||
t.Helper()
|
||||
hydrated := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{Body: body},
|
||||
})
|
||||
signed, err := blocks.NewSignedBeaconBlock(hydrated)
|
||||
require.NoError(t, err)
|
||||
return signed.Block()
|
||||
}
|
||||
|
||||
func emptyGloasBody() *ethpb.BeaconBlockBodyGloas {
|
||||
return util.HydrateBeaconBlockBodyGloas(nil)
|
||||
}
|
||||
|
||||
func TestGloasOperations_HappyPath(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 16)
|
||||
// A plain Electra state is fine here because we exercise zero operations.
|
||||
blk := newGloasBlock(t, emptyGloasBody())
|
||||
|
||||
_, err := transition.GloasOperations(context.Background(), st, blk)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestGloasOperations_ProcessingErrors covers every sentinel error the
|
||||
// function can return, one sub-test per operation step.
|
||||
func TestGloasOperations_ProcessingErrors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(*ethpb.BeaconBlockBodyGloas)
|
||||
errSentinel error
|
||||
errSubstr string
|
||||
}{
|
||||
{
|
||||
name: "ErrProcessProposerSlashingsFailed – out-of-bounds proposer index",
|
||||
modifyBlk: func(b *ethpb.BeaconBlockBodyGloas) {
|
||||
b.ProposerSlashings = []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 999999,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 999999,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
errSentinel: transition.ErrProcessProposerSlashingsFailed,
|
||||
errSubstr: "process proposer slashings failed",
|
||||
},
|
||||
{
|
||||
name: "ErrProcessAttesterSlashingsFailed – out-of-bounds attesting index",
|
||||
modifyBlk: func(b *ethpb.BeaconBlockBodyGloas) {
|
||||
makeIndexed := func(root []byte) *ethpb.IndexedAttestationElectra {
|
||||
return ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{999999},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: root,
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
}
|
||||
root1 := make([]byte, 32)
|
||||
root2 := make([]byte, 32)
|
||||
root2[0] = 0xff // different roots → slashable
|
||||
b.AttesterSlashings = []*ethpb.AttesterSlashingElectra{
|
||||
{
|
||||
Attestation_1: makeIndexed(root1),
|
||||
Attestation_2: makeIndexed(root2),
|
||||
},
|
||||
}
|
||||
},
|
||||
errSentinel: transition.ErrProcessAttesterSlashingsFailed,
|
||||
errSubstr: "process attester slashings failed",
|
||||
},
|
||||
|
||||
{
|
||||
name: "ErrProcessAttestationsFailed – invalid committee index",
|
||||
modifyBlk: func(b *ethpb.BeaconBlockBodyGloas) {
|
||||
b.Attestations = []*ethpb.AttestationElectra{
|
||||
{
|
||||
AggregationBits: []byte{0b00000001},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 999999, // no such committee
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
CommitteeBits: []byte{0b00000001},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errSentinel: transition.ErrProcessAttestationsFailed,
|
||||
errSubstr: "process attestations failed",
|
||||
},
|
||||
|
||||
{
|
||||
name: "ErrProcessDepositsFailed – empty merkle proof",
|
||||
modifyBlk: func(b *ethpb.BeaconBlockBodyGloas) {
|
||||
b.Deposits = []*ethpb.Deposit{
|
||||
{
|
||||
Proof: [][]byte{}, // invalid: proof must not be empty
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 32_000_000_000,
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
errSentinel: transition.ErrProcessDepositsFailed,
|
||||
errSubstr: "process deposits failed",
|
||||
},
|
||||
|
||||
{
|
||||
name: "ErrProcessVoluntaryExitsFailed – out-of-bounds validator index",
|
||||
modifyBlk: func(b *ethpb.BeaconBlockBodyGloas) {
|
||||
b.VoluntaryExits = []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 0,
|
||||
ValidatorIndex: 999999,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errSentinel: transition.ErrProcessVoluntaryExitsFailed,
|
||||
errSubstr: "process voluntary exits failed",
|
||||
},
|
||||
|
||||
{
|
||||
name: "ErrProcessBLSChangesFailed – out-of-bounds validator index",
|
||||
modifyBlk: func(b *ethpb.BeaconBlockBodyGloas) {
|
||||
b.BlsToExecutionChanges = []*ethpb.SignedBLSToExecutionChange{
|
||||
{
|
||||
Message: ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: 999999,
|
||||
FromBlsPubkey: make([]byte, 48),
|
||||
ToExecutionAddress: make([]byte, 20),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errSentinel: transition.ErrProcessBLSChangesFailed,
|
||||
errSubstr: "process BLS to execution changes failed",
|
||||
},
|
||||
|
||||
{
|
||||
name: "ErrProcessPayloadAttestationsFailed – wrong beacon block root",
|
||||
modifyBlk: func(b *ethpb.BeaconBlockBodyGloas) {
|
||||
b.PayloadAttestations = []*ethpb.PayloadAttestation{
|
||||
{
|
||||
AggregationBits: bitfield.NewBitvector512(),
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32), // all-zeros ≠ header.parent_root
|
||||
Slot: 0,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errSentinel: transition.ErrProcessPayloadAttestationsFailed,
|
||||
errSubstr: "process payload attestations failed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 128)
|
||||
|
||||
// For the payload-attestation sub-test we need the state's latest block
|
||||
// header to have a non-zero parent root so the all-zeros root in the
|
||||
// attestation definitely mismatches.
|
||||
if tc.errSentinel == transition.ErrProcessPayloadAttestationsFailed {
|
||||
hdr := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
hdr.ParentRoot[0] = 0xde
|
||||
require.NoError(t, st.SetLatestBlockHeader(hdr))
|
||||
}
|
||||
|
||||
body := emptyGloasBody()
|
||||
tc.modifyBlk(body)
|
||||
|
||||
gloasBlk := newGloasBlock(t, body)
|
||||
|
||||
_, err := transition.GloasOperations(ctx, st, gloasBlk)
|
||||
require.NotNil(t, err, "expected an error but got nil")
|
||||
require.ErrorContains(t, tc.errSubstr, err)
|
||||
require.Equal(t, true, errors.Is(err, tc.errSentinel))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -317,7 +317,11 @@ func ProcessSlotsCore(ctx context.Context, span trace.Span, state state.BeaconSt
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
var err error
|
||||
if time.CanProcessEpoch(state) {
|
||||
if state.Version() >= version.Fulu {
|
||||
if state.Version() >= version.Gloas {
|
||||
if err = processEpochGloas(ctx, state); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
} else if state.Version() >= version.Fulu {
|
||||
if err = fulu.ProcessEpoch(ctx, state); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
|
||||
@@ -73,12 +73,12 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
|
||||
ExecutionPayloadAvailability: availability,
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
|
||||
b "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
@@ -62,8 +63,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
|
||||
interop.WriteStateToDisk(st)
|
||||
|
||||
parentRoot := signed.Block().ParentRoot()
|
||||
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
|
||||
st, err = ProcessSlotsForBlock(ctx, st, signed.Block())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not process slots")
|
||||
}
|
||||
@@ -134,8 +134,7 @@ func CalculateStateRoot(
|
||||
|
||||
// Execute per slots transition.
|
||||
var err error
|
||||
parentRoot := signed.Block().ParentRoot()
|
||||
state, err = ProcessSlotsUsingNextSlotCache(ctx, state, parentRoot[:], signed.Block().Slot())
|
||||
state, err = ProcessSlotsForBlock(ctx, state, signed.Block())
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process slots")
|
||||
}
|
||||
@@ -356,11 +355,16 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
} else if beaconBlock.Version() < version.Gloas {
|
||||
state, err = electraOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
state, err = gloasOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
@@ -403,23 +407,47 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not process block header")
|
||||
}
|
||||
|
||||
enabled, err := b.IsExecutionEnabled(state, blk.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if enabled {
|
||||
executionData, err := blk.Body().Execution()
|
||||
if state.Version() >= version.Gloas {
|
||||
// <spec fn="process_block" fork="gloas" hash="cc0f05ee">
|
||||
// def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
// process_block_header(state, block)
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// process_withdrawals(state)
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// # Removed `process_execution_payload`
|
||||
// # [New in Gloas:EIP7732]
|
||||
// process_execution_payload_bid(state, block)
|
||||
// process_randao(state, block.body)
|
||||
// process_eth1_data(state, block.body)
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// process_operations(state, block.body)
|
||||
// process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
// </spec>
|
||||
if err := gloas.ProcessWithdrawals(state); err != nil {
|
||||
return nil, errors.Wrap(ErrProcessWithdrawalsFailed, err.Error())
|
||||
}
|
||||
if err := gloas.ProcessExecutionPayloadBid(state, blk); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload bid")
|
||||
}
|
||||
} else {
|
||||
enabled, err := b.IsExecutionEnabled(state, blk.Body())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if state.Version() >= version.Capella {
|
||||
state, err = b.ProcessWithdrawals(state, executionData)
|
||||
if enabled {
|
||||
executionData, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessWithdrawalsFailed, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
if state.Version() >= version.Capella {
|
||||
state, err = b.ProcessWithdrawals(state, executionData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessWithdrawalsFailed, err.Error())
|
||||
}
|
||||
}
|
||||
if err = b.ProcessPayload(state, blk.Body()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
}
|
||||
if err = b.ProcessPayload(state, blk.Body()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -557,6 +557,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasGloasKey(enc):
|
||||
protoState := ðpb.BeaconStateGloas{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(gloasKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Gloas")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
protoState.Validators = validatorEntries
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeGloas(protoState)
|
||||
case hasFuluKey(enc):
|
||||
protoState := ðpb.BeaconStateFulu{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
|
||||
@@ -742,6 +755,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(fuluKey, rawObj...)), nil
|
||||
case version.Gloas:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
if rState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
rawObj, err := rState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(gloasKey, rawObj...)), nil
|
||||
default:
|
||||
return nil, errors.New("invalid inner state")
|
||||
}
|
||||
|
||||
@@ -134,8 +134,15 @@ type Reconstructor interface {
|
||||
ReconstructFullBellatrixBlockBatch(
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructFullExecutionPayloadByHash(
|
||||
ctx context.Context, blockHash [32]byte,
|
||||
) (*pb.ExecutionPayloadDeneb, error)
|
||||
ReconstructFullExecutionPayloadsByHash(
|
||||
ctx context.Context, blockHashes [][32]byte,
|
||||
) (map[[32]byte]*pb.ExecutionPayloadDeneb, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
|
||||
ReconstructExecutionPayloadEnvelope(ctx context.Context, envelope *ethpb.SignedBlindedExecutionPayloadEnvelope) (*ethpb.SignedExecutionPayloadEnvelope, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -646,6 +653,186 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
return unb, nil
|
||||
}
|
||||
|
||||
// ReconstructExecutionPayloadEnvelope takes a blinded execution payload envelope and
|
||||
// reconstructs the full envelope by fetching the execution payload from the EL via
|
||||
// eth_getBlockByHash.
|
||||
func (s *Service) ReconstructExecutionPayloadEnvelope(
|
||||
ctx context.Context, envelope *ethpb.SignedBlindedExecutionPayloadEnvelope,
|
||||
) (*ethpb.SignedExecutionPayloadEnvelope, error) {
|
||||
if envelope == nil || envelope.Message == nil {
|
||||
return nil, errors.New("nil blinded execution payload envelope")
|
||||
}
|
||||
blockHash := bytesutil.ToBytes32(envelope.Message.BlockHash)
|
||||
payload, err := s.ReconstructFullExecutionPayloadByHash(ctx, blockHash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not reconstruct execution payload")
|
||||
}
|
||||
return ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: ðpb.ExecutionPayloadEnvelope{
|
||||
Payload: payload,
|
||||
ExecutionRequests: envelope.Message.ExecutionRequests,
|
||||
BuilderIndex: envelope.Message.BuilderIndex,
|
||||
BeaconBlockRoot: envelope.Message.BeaconBlockRoot,
|
||||
Slot: envelope.Message.Slot,
|
||||
StateRoot: envelope.Message.StateRoot,
|
||||
},
|
||||
Signature: envelope.Signature,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReconstructFullExecutionPayloadByHash reconstructs a full deneb payload from EL data by block hash.
|
||||
func (s *Service) ReconstructFullExecutionPayloadByHash(
|
||||
ctx context.Context, blockHash [32]byte,
|
||||
) (*pb.ExecutionPayloadDeneb, error) {
|
||||
payloads, err := s.ReconstructFullExecutionPayloadsByHash(ctx, [][32]byte{blockHash})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, ok := payloads[blockHash]
|
||||
if !ok || payload == nil {
|
||||
return nil, errors.New("execution payload not found")
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// ReconstructFullExecutionPayloadsByHash reconstructs full deneb payloads from EL data by block hashes.
|
||||
func (s *Service) ReconstructFullExecutionPayloadsByHash(
|
||||
ctx context.Context, blockHashes [][32]byte,
|
||||
) (map[[32]byte]*pb.ExecutionPayloadDeneb, error) {
|
||||
payloads := make(map[[32]byte]*pb.ExecutionPayloadDeneb, len(blockHashes))
|
||||
if len(blockHashes) == 0 {
|
||||
return payloads, nil
|
||||
}
|
||||
|
||||
uniqueSet := make(map[[32]byte]struct{}, len(blockHashes))
|
||||
uniqueHashes := make([][32]byte, 0, len(blockHashes))
|
||||
for i := range blockHashes {
|
||||
h := blockHashes[i]
|
||||
if _, ok := uniqueSet[h]; ok {
|
||||
continue
|
||||
}
|
||||
uniqueSet[h] = struct{}{}
|
||||
uniqueHashes = append(uniqueHashes, h)
|
||||
}
|
||||
|
||||
requestHashes := make([]common.Hash, 0, len(uniqueHashes))
|
||||
for i := range uniqueHashes {
|
||||
if uniqueHashes[i] == params.BeaconConfig().ZeroHash {
|
||||
// Empty execution payload.
|
||||
payloads[uniqueHashes[i]] = &pb.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}
|
||||
continue
|
||||
}
|
||||
requestHashes = append(requestHashes, uniqueHashes[i])
|
||||
}
|
||||
|
||||
blocksByHash := make(map[[32]byte]*pb.ExecutionBlock, len(requestHashes))
|
||||
if len(requestHashes) > 0 {
|
||||
execBlocks, err := s.ExecutionBlocksByHashes(ctx, requestHashes, true) // with txs
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := range requestHashes {
|
||||
blocksByHash[requestHashes[i]] = execBlocks[i]
|
||||
}
|
||||
}
|
||||
|
||||
for i := range uniqueHashes {
|
||||
h := uniqueHashes[i]
|
||||
if _, ok := payloads[h]; ok {
|
||||
continue
|
||||
}
|
||||
blk := blocksByHash[h]
|
||||
payload, err := executionPayloadDenebFromExecutionBlock(h, blk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloads[h] = payload
|
||||
}
|
||||
|
||||
return payloads, nil
|
||||
}
|
||||
|
||||
func executionPayloadDenebFromExecutionBlock(
|
||||
requestedHash [32]byte, blk *pb.ExecutionBlock,
|
||||
) (*pb.ExecutionPayloadDeneb, error) {
|
||||
if requestedHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, errors.New("zero hash must be handled before block conversion")
|
||||
}
|
||||
if blk == nil {
|
||||
return nil, errors.New("execution block not found")
|
||||
}
|
||||
if blk.Hash == (common.Hash{}) {
|
||||
return nil, errors.New("execution block not found")
|
||||
}
|
||||
if blk.Hash != requestedHash {
|
||||
return nil, errors.New("execution block hash mismatch")
|
||||
}
|
||||
if blk.Number == nil {
|
||||
return nil, errors.New("execution block number is nil")
|
||||
}
|
||||
if blk.BaseFee == nil {
|
||||
return nil, errors.New("execution block base fee is nil")
|
||||
}
|
||||
|
||||
txs := make([][]byte, 0, len(blk.Transactions))
|
||||
for i := range blk.Transactions {
|
||||
if blk.Transactions[i] == nil {
|
||||
return nil, errors.New("nil transaction in execution block")
|
||||
}
|
||||
txBytes, err := blk.Transactions[i].MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal execution transaction")
|
||||
}
|
||||
txs = append(txs, txBytes)
|
||||
}
|
||||
|
||||
var blobGasUsed uint64
|
||||
if blk.BlobGasUsed == nil {
|
||||
return nil, errors.New("execution block blob gas used is nil")
|
||||
}
|
||||
blobGasUsed = *blk.BlobGasUsed
|
||||
var excessBlobGas uint64
|
||||
if blk.ExcessBlobGas == nil {
|
||||
return nil, errors.New("execution block excess blob gas is nil")
|
||||
}
|
||||
excessBlobGas = *blk.ExcessBlobGas
|
||||
withdrawals := blk.Withdrawals
|
||||
if withdrawals == nil {
|
||||
withdrawals = make([]*pb.Withdrawal, 0)
|
||||
}
|
||||
|
||||
return &pb.ExecutionPayloadDeneb{
|
||||
ParentHash: blk.ParentHash.Bytes(),
|
||||
FeeRecipient: blk.Coinbase.Bytes(),
|
||||
StateRoot: blk.Root.Bytes(),
|
||||
ReceiptsRoot: blk.ReceiptHash.Bytes(),
|
||||
LogsBloom: blk.Bloom.Bytes(),
|
||||
PrevRandao: blk.MixDigest.Bytes(),
|
||||
BlockNumber: blk.Number.Uint64(),
|
||||
GasLimit: blk.GasLimit,
|
||||
GasUsed: blk.GasUsed,
|
||||
Timestamp: blk.Time,
|
||||
ExtraData: blk.Extra,
|
||||
BaseFeePerGas: bytesutil.PadTo(bytesutil.ReverseByteOrder(blk.BaseFee.Bytes()), fieldparams.RootLength),
|
||||
BlockHash: blk.Hash.Bytes(),
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobSidecars reconstructs the verified blob sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and proofs,
|
||||
// and constructs the corresponding verified read-only blob sidecars.
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestGraffitiInfo_GenerateGraffiti(t *testing.T) {
|
||||
elCommit: "abcd1234",
|
||||
userGraffiti: []byte("1234567890123456789"), // 19 chars, leaves 13 bytes = full format + space
|
||||
wantPrefix: "1234567890123456789",
|
||||
wantSuffix: " GEabcdPR",
|
||||
wantSuffix: " GEabcdPR",
|
||||
},
|
||||
{
|
||||
name: "With EL - reduced commits (24 char user, 8 bytes available) - no space, would reduce tier",
|
||||
@@ -99,7 +99,7 @@ func TestGraffitiInfo_GenerateGraffiti(t *testing.T) {
|
||||
elCommit: "abcd1234",
|
||||
userGraffiti: []byte("123456789012345678901234"), // 24 chars, leaves exactly 8 bytes = reduced format, no room for space
|
||||
wantPrefix: "123456789012345678901234",
|
||||
wantSuffix: "GEabPR",
|
||||
wantSuffix: "GEabPR",
|
||||
},
|
||||
{
|
||||
name: "With EL - reduced commits (23 char user, 9 bytes available) - space fits",
|
||||
@@ -107,7 +107,7 @@ func TestGraffitiInfo_GenerateGraffiti(t *testing.T) {
|
||||
elCommit: "abcd1234",
|
||||
userGraffiti: []byte("12345678901234567890123"), // 23 chars, leaves 9 bytes = reduced format + space
|
||||
wantPrefix: "12345678901234567890123",
|
||||
wantSuffix: " GEabPR",
|
||||
wantSuffix: " GEabPR",
|
||||
},
|
||||
{
|
||||
name: "With EL - codes only (28 char user, 4 bytes available) - no space, would reduce tier",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
pb "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
@@ -115,6 +116,52 @@ func (e *EngineClient) ReconstructFullBellatrixBlockBatch(
|
||||
return fullBlocks, nil
|
||||
}
|
||||
|
||||
// ReconstructFullExecutionPayloadByHash --
|
||||
func (e *EngineClient) ReconstructFullExecutionPayloadByHash(
|
||||
_ context.Context, blockHash [32]byte,
|
||||
) (*pb.ExecutionPayloadDeneb, error) {
|
||||
if p, ok := e.ExecutionPayloadByBlockHash[blockHash]; ok {
|
||||
return &pb.ExecutionPayloadDeneb{
|
||||
ParentHash: p.ParentHash,
|
||||
FeeRecipient: p.FeeRecipient,
|
||||
StateRoot: p.StateRoot,
|
||||
ReceiptsRoot: p.ReceiptsRoot,
|
||||
LogsBloom: p.LogsBloom,
|
||||
PrevRandao: p.PrevRandao,
|
||||
BlockNumber: p.BlockNumber,
|
||||
GasLimit: p.GasLimit,
|
||||
GasUsed: p.GasUsed,
|
||||
Timestamp: p.Timestamp,
|
||||
ExtraData: p.ExtraData,
|
||||
BaseFeePerGas: p.BaseFeePerGas,
|
||||
BlockHash: p.BlockHash,
|
||||
Transactions: p.Transactions,
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}, nil
|
||||
}
|
||||
if e.GetPayloadResponse != nil && e.GetPayloadResponse.ExecutionData != nil {
|
||||
if p, ok := e.GetPayloadResponse.ExecutionData.Proto().(*pb.ExecutionPayloadDeneb); ok {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("payload not found")
|
||||
}
|
||||
|
||||
// ReconstructFullExecutionPayloadsByHash --
|
||||
func (e *EngineClient) ReconstructFullExecutionPayloadsByHash(
|
||||
_ context.Context, blockHashes [][32]byte,
|
||||
) (map[[32]byte]*pb.ExecutionPayloadDeneb, error) {
|
||||
payloads := make(map[[32]byte]*pb.ExecutionPayloadDeneb, len(blockHashes))
|
||||
for i := range blockHashes {
|
||||
p, err := e.ReconstructFullExecutionPayloadByHash(context.Background(), blockHashes[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloads[blockHashes[i]] = p
|
||||
}
|
||||
return payloads, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobSidecars is a mock implementation of the ReconstructBlobSidecars method.
|
||||
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [fieldparams.RootLength]byte, func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
@@ -125,6 +172,49 @@ func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.Cons
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
// ReconstructExecutionPayloadEnvelope --
|
||||
func (e *EngineClient) ReconstructExecutionPayloadEnvelope(
|
||||
_ context.Context, envelope *ethpb.SignedBlindedExecutionPayloadEnvelope,
|
||||
) (*ethpb.SignedExecutionPayloadEnvelope, error) {
|
||||
if e.Err != nil {
|
||||
return nil, e.Err
|
||||
}
|
||||
payload, ok := e.ExecutionPayloadByBlockHash[bytesutil.ToBytes32(envelope.Message.BlockHash)]
|
||||
if !ok {
|
||||
return nil, errors.New("execution payload not found for block hash")
|
||||
}
|
||||
return ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: ðpb.ExecutionPayloadEnvelope{
|
||||
Payload: payloadToPayloadDeneb(payload),
|
||||
ExecutionRequests: envelope.Message.ExecutionRequests,
|
||||
BuilderIndex: envelope.Message.BuilderIndex,
|
||||
BeaconBlockRoot: envelope.Message.BeaconBlockRoot,
|
||||
Slot: envelope.Message.Slot,
|
||||
StateRoot: envelope.Message.StateRoot,
|
||||
},
|
||||
Signature: envelope.Signature,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func payloadToPayloadDeneb(p *pb.ExecutionPayload) *pb.ExecutionPayloadDeneb {
|
||||
return &pb.ExecutionPayloadDeneb{
|
||||
ParentHash: p.ParentHash,
|
||||
FeeRecipient: p.FeeRecipient,
|
||||
StateRoot: p.StateRoot,
|
||||
ReceiptsRoot: p.ReceiptsRoot,
|
||||
LogsBloom: p.LogsBloom,
|
||||
PrevRandao: p.PrevRandao,
|
||||
BlockNumber: p.BlockNumber,
|
||||
GasLimit: p.GasLimit,
|
||||
GasUsed: p.GasUsed,
|
||||
Timestamp: p.Timestamp,
|
||||
ExtraData: p.ExtraData,
|
||||
BaseFeePerGas: p.BaseFeePerGas,
|
||||
BlockHash: p.BlockHash,
|
||||
Transactions: p.Transactions,
|
||||
}
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
|
||||
@@ -43,6 +43,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -69,6 +70,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
@@ -82,5 +84,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -68,6 +68,7 @@ func (f *ForkChoice) Head(
|
||||
if err := f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
|
||||
}
|
||||
f.store.removeProposerBoostFromParent()
|
||||
|
||||
jc := f.JustifiedCheckpoint()
|
||||
fc := f.FinalizedCheckpoint()
|
||||
@@ -623,7 +624,8 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
|
||||
f.balancesByRoot = handler
|
||||
}
|
||||
|
||||
// Weight returns the weight of the given root if found on the store
|
||||
// Weight returns the payload-node weight of the given root if found on the store.
|
||||
// For Gloas, this is the node weight used for forkchoice on the payload tree.
|
||||
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
@@ -632,6 +634,30 @@ func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
|
||||
return n.weight, nil
|
||||
}
|
||||
|
||||
// ConsensusNodeWeight returns the consensus-node weight for the given root if found on the store.
|
||||
// For Gloas blocks, this includes both empty and full payload node weights.
|
||||
func (f *ForkChoice) ConsensusNodeWeight(root [32]byte) (uint64, error) {
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return 0, ErrNilNode
|
||||
}
|
||||
return n.node.weight, nil
|
||||
}
|
||||
|
||||
// PayloadWeights returns the empty and full payload node weights for the given root.
|
||||
func (f *ForkChoice) PayloadWeights(root [32]byte) (emptyWeight, fullWeight uint64, err error) {
|
||||
en, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
return 0, 0, ErrNilNode
|
||||
}
|
||||
emptyWeight = en.weight
|
||||
fn := f.store.fullNodeByRoot[root]
|
||||
if fn != nil {
|
||||
fullWeight = fn.weight
|
||||
}
|
||||
return emptyWeight, fullWeight, nil
|
||||
}
|
||||
|
||||
// updateJustifiedBalances updates the validators balances on the justified checkpoint pointed by root.
|
||||
func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte) error {
|
||||
balances, err := f.balancesByRoot(ctx, root)
|
||||
|
||||
@@ -6,14 +6,40 @@ import (
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CanonicalNodeAtSlot returns the full node that exists at the given slot in the canonical chain.
|
||||
// The boolean indicates whether the payload was present for the returned blockroot.
|
||||
// If the slot for the given blockroot is the current wall clock slot, it returns the pending node, that is, it sets the boolean to false.
|
||||
// If the slot is in the past the boolean indicates between full or empty.
|
||||
func (f *ForkChoice) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool) {
|
||||
s := f.store
|
||||
n := s.headNode
|
||||
for n != nil && n.slot > slot {
|
||||
if n.parent == nil {
|
||||
n = nil
|
||||
} else {
|
||||
n = n.parent.node
|
||||
}
|
||||
}
|
||||
if n == nil {
|
||||
return [32]byte{}, false
|
||||
}
|
||||
if n.slot == s.currentSlot() {
|
||||
return n.root, false
|
||||
}
|
||||
pn := s.choosePayloadContent(n)
|
||||
return pn.node.root, pn.full
|
||||
}
|
||||
|
||||
func (s *Store) resolveParentPayloadStatus(block interfaces.ReadOnlyBeaconBlock, parent **PayloadNode, blockHash *[32]byte) error {
|
||||
sb, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
@@ -212,24 +238,58 @@ func (s *Store) updateBestDescendantConsensusNode(ctx context.Context, n *Node,
|
||||
n.bestDescendant = en.bestDescendant
|
||||
return nil
|
||||
}
|
||||
// TODO Gloas: pick between full or empty
|
||||
if err := s.updateBestDescendantPayloadNode(ctx, fn, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
n.bestDescendant = fn.bestDescendant
|
||||
n.bestDescendant = s.choosePayloadContent(n).bestDescendant
|
||||
return nil
|
||||
}
|
||||
|
||||
// choosePayloadContent chooses between empty or full for the passed consensus node. TODO Gloas: use PTC to choose.
|
||||
func (s *Store) currentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
}
|
||||
|
||||
func (s *Store) shouldExtendPayload(fn *PayloadNode) bool {
|
||||
if fn == nil {
|
||||
return false
|
||||
}
|
||||
n := fn.node
|
||||
if n.payloadAvailabilityVote.Count() > fieldparams.PTCSize/2 && n.payloadDataAvailabilityVote.Count() > fieldparams.PTCSize/2 {
|
||||
return true
|
||||
}
|
||||
if s.proposerBoostRoot == [32]byte{} {
|
||||
return true
|
||||
}
|
||||
pn := s.emptyNodeByRoot[s.proposerBoostRoot]
|
||||
if pn == nil {
|
||||
return true
|
||||
}
|
||||
if pn.node.parent.node != fn.node {
|
||||
return true
|
||||
}
|
||||
return pn.node.parent.full
|
||||
}
|
||||
|
||||
// choosePayloadContent chooses between empty or full for the passed consensus node.
|
||||
func (s *Store) choosePayloadContent(n *Node) *PayloadNode {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn != nil {
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
if fn == nil {
|
||||
return en
|
||||
}
|
||||
if fn.weight > en.weight {
|
||||
return fn
|
||||
} else if fn.weight < en.weight {
|
||||
return en
|
||||
}
|
||||
previousSlot := n.slot+1 == s.currentSlot()
|
||||
if !previousSlot || s.shouldExtendPayload(fn) {
|
||||
return fn
|
||||
}
|
||||
return s.emptyNodeByRoot[n.root]
|
||||
return en
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
@@ -325,6 +385,36 @@ func (f *ForkChoice) updateNewFullNodeWeight(fn *PayloadNode) {
|
||||
fn.weight = fn.balance
|
||||
}
|
||||
|
||||
// SetPTCVote sets the PTC vote bits on the consensus node identified by root.
|
||||
func (f *ForkChoice) SetPTCVote(root [32]byte, ptcIdx uint64, payloadPresent, blobDataAvailable bool) {
|
||||
n := f.store.emptyNodeByRoot[root]
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
if payloadPresent {
|
||||
n.node.setPayloadAvailabilityVote(ptcIdx)
|
||||
}
|
||||
if blobDataAvailable {
|
||||
n.node.setPayloadDataAvailabilityVote(ptcIdx)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) setPayloadAvailabilityVote(idx uint64) {
|
||||
n.payloadAvailabilityVote.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
func (n *Node) setPayloadDataAvailabilityVote(idx uint64) {
|
||||
n.payloadDataAvailabilityVote.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
func (n *Node) payloadAvailabilityVoteCount() uint64 {
|
||||
return n.payloadAvailabilityVote.Count()
|
||||
}
|
||||
|
||||
func (n *Node) payloadDataAvailabilityVoteCount() uint64 {
|
||||
return n.payloadDataAvailabilityVote.Count()
|
||||
}
|
||||
|
||||
// resolveVoteNode returns the node that should receive the balance of a vote. It returns always a PayloadNode, but the boolean indicates
|
||||
// whether the vote should be applied to the pending node (true) or not.
|
||||
func (s *Store) resolveVoteNode(r [32]byte, slot primitives.Slot, payloadStatus bool) (*PayloadNode, bool) {
|
||||
@@ -338,6 +428,12 @@ func (s *Store) resolveVoteNode(r [32]byte, slot primitives.Slot, payloadStatus
|
||||
return en, slot == en.node.slot
|
||||
}
|
||||
|
||||
// HasFullNode returns true if a full (payload) node exists for the given beacon block root.
|
||||
func (f *ForkChoice) HasFullNode(root [32]byte) bool {
|
||||
_, ok := f.store.fullNodeByRoot[root]
|
||||
return ok
|
||||
}
|
||||
|
||||
// BlockHash returns the hash committed in the given block
|
||||
func (f *ForkChoice) BlockHash(root [32]byte) ([32]byte, error) {
|
||||
s := f.store
|
||||
@@ -347,3 +443,49 @@ func (f *ForkChoice) BlockHash(root [32]byte) ([32]byte, error) {
|
||||
}
|
||||
return en.node.blockHash, nil
|
||||
}
|
||||
|
||||
func (s *Store) shouldApplyProposerBoost() bool {
|
||||
if s.proposerBoostRoot == [32]byte{} {
|
||||
return false
|
||||
}
|
||||
if slots.ToEpoch(s.currentSlot()) < params.BeaconConfig().GloasForkEpoch {
|
||||
return true
|
||||
}
|
||||
en := s.emptyNodeByRoot[s.proposerBoostRoot]
|
||||
if en == nil {
|
||||
return false
|
||||
}
|
||||
n := en.node
|
||||
p := n.parent
|
||||
if p == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if p.node.slot+1 != n.slot {
|
||||
return true
|
||||
}
|
||||
return p.weight*100 >= s.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold
|
||||
}
|
||||
|
||||
// removeProposerBoostFromParent removes the proposer boost that must have been applied to the parent of the current proposer boost node
|
||||
// in some circumstances.
|
||||
func (s *Store) removeProposerBoostFromParent() {
|
||||
if s.proposerBoostRoot == [32]byte{} {
|
||||
return
|
||||
}
|
||||
pn := s.emptyNodeByRoot[s.proposerBoostRoot]
|
||||
if pn == nil {
|
||||
return
|
||||
}
|
||||
n := pn.node
|
||||
p := n.parent
|
||||
if p.node.slot+1 != s.currentSlot() {
|
||||
return
|
||||
}
|
||||
if p.weight < s.previousProposerBoostScore {
|
||||
p.weight = 0
|
||||
} else {
|
||||
p.weight -= s.previousProposerBoostScore
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,29 +10,49 @@ import (
|
||||
func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
s := f.store
|
||||
proposerScore := uint64(0)
|
||||
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
previousNode, ok := s.emptyNodeByRoot[s.previousProposerBoostRoot]
|
||||
if !ok || previousNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
|
||||
} else {
|
||||
previousNode.balance -= s.previousProposerBoostScore
|
||||
}
|
||||
}
|
||||
|
||||
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
currentNode, ok := s.emptyNodeByRoot[s.proposerBoostRoot]
|
||||
if !ok || currentNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
|
||||
} else {
|
||||
proposerScore = (s.committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
|
||||
currentNode.balance += proposerScore
|
||||
}
|
||||
s.removePreviousProposerBoost()
|
||||
proposerScore = 0
|
||||
if s.shouldApplyProposerBoost() {
|
||||
proposerScore = s.applyNewProposerBoost()
|
||||
}
|
||||
s.previousProposerBoostRoot = s.proposerBoostRoot
|
||||
s.previousProposerBoostScore = proposerScore
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) removePreviousProposerBoost() {
|
||||
if s.previousProposerBoostRoot == params.BeaconConfig().ZeroHash {
|
||||
return
|
||||
}
|
||||
previousNode, ok := s.emptyNodeByRoot[s.previousProposerBoostRoot]
|
||||
if !ok || previousNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
|
||||
return
|
||||
}
|
||||
n := previousNode.node
|
||||
if n.balance < s.previousProposerBoostScore {
|
||||
log.Errorf("invalid proposer boost score %d for node balance %d", s.previousProposerBoostScore, n.balance)
|
||||
n.balance = 0
|
||||
return
|
||||
}
|
||||
n.balance -= s.previousProposerBoostScore
|
||||
}
|
||||
|
||||
// applyNewProposerBoost applies the new proposer boost and returns the new proposer boost score.
|
||||
func (s *Store) applyNewProposerBoost() uint64 {
|
||||
if s.proposerBoostRoot == params.BeaconConfig().ZeroHash {
|
||||
return 0
|
||||
}
|
||||
currentNode, ok := s.emptyNodeByRoot[s.proposerBoostRoot]
|
||||
if !ok || currentNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
|
||||
return 0
|
||||
}
|
||||
proposerScore := (s.committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
|
||||
currentNode.node.balance += proposerScore
|
||||
return proposerScore
|
||||
}
|
||||
|
||||
// ProposerBoost of fork choice store.
|
||||
func (s *Store) proposerBoost() [fieldparams.RootLength]byte {
|
||||
return s.proposerBoostRoot
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -107,14 +108,16 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
|
||||
n := &Node{
|
||||
slot: slot,
|
||||
root: root,
|
||||
parent: parent,
|
||||
justifiedEpoch: justifiedEpoch,
|
||||
unrealizedJustifiedEpoch: justifiedEpoch,
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
blockHash: *blockHash,
|
||||
slot: slot,
|
||||
root: root,
|
||||
parent: parent,
|
||||
justifiedEpoch: justifiedEpoch,
|
||||
unrealizedJustifiedEpoch: justifiedEpoch,
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
blockHash: *blockHash,
|
||||
payloadAvailabilityVote: bitfield.NewBitvector512(),
|
||||
payloadDataAvailabilityVote: bitfield.NewBitvector512(),
|
||||
}
|
||||
// Set the node's target checkpoint
|
||||
if slot%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -49,18 +50,20 @@ type Store struct {
|
||||
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.
|
||||
// This is used as an array based stateful DAG for efficient fork choice look up.
|
||||
type Node struct {
|
||||
slot primitives.Slot // slot of the block converted to the node.
|
||||
root [fieldparams.RootLength]byte // root of the block converted to the node.
|
||||
blockHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
|
||||
parent *PayloadNode // parent index of this node.
|
||||
target *Node // target checkpoint for
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
|
||||
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
|
||||
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
|
||||
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
|
||||
balance uint64 // the balance that voted for this node directly
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
slot primitives.Slot // slot of the block converted to the node.
|
||||
root [fieldparams.RootLength]byte // root of the block converted to the node.
|
||||
blockHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
|
||||
parent *PayloadNode // parent index of this node.
|
||||
target *Node // target checkpoint for
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
|
||||
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
|
||||
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
|
||||
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
|
||||
balance uint64 // the balance that voted for this node directly
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
payloadAvailabilityVote bitfield.Bitvector512 // PTC payload availability votes
|
||||
payloadDataAvailabilityVote bitfield.Bitvector512 // PTC payload data availability votes
|
||||
}
|
||||
|
||||
// PayloadNode defines a full Forkchoice node after the Gloas fork, with the payload status either empty of full
|
||||
|
||||
@@ -71,6 +71,7 @@ type Getter interface {
|
||||
type FastGetter interface {
|
||||
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
FinalizedPayloadBlockHash() [32]byte
|
||||
HasFullNode([32]byte) bool
|
||||
HasNode([32]byte) bool
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
HighestReceivedBlockRoot() [32]byte
|
||||
@@ -90,8 +91,11 @@ type FastGetter interface {
|
||||
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
|
||||
UnrealizedJustifiedPayloadBlockHash() [32]byte
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
ConsensusNodeWeight(root [32]byte) (uint64, error)
|
||||
PayloadWeights(root [32]byte) (emptyWeight, fullWeight uint64, err error)
|
||||
ParentRoot(root [32]byte) ([32]byte, error)
|
||||
BlockHash(root [32]byte) ([32]byte, error)
|
||||
CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool)
|
||||
}
|
||||
|
||||
// Setter allows to set forkchoice information
|
||||
@@ -105,4 +109,5 @@ type Setter interface {
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
SetBalancesByRooter(BalancesByRooter)
|
||||
InsertSlashedIndex(context.Context, primitives.ValidatorIndex)
|
||||
SetPTCVote(root [32]byte, ptcIdx uint64, payloadPresent, blobDataAvailable bool)
|
||||
}
|
||||
|
||||
@@ -30,6 +30,13 @@ func NewROForkChoice(w ROWrappable) *ROForkChoice {
|
||||
return &ROForkChoice{getter: w, l: w}
|
||||
}
|
||||
|
||||
// HasFullNode delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HasFullNode(root [32]byte) bool {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.HasFullNode(root)
|
||||
}
|
||||
|
||||
// HasNode delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HasNode(root [32]byte) bool {
|
||||
ro.l.RLock()
|
||||
@@ -135,6 +142,20 @@ func (ro *ROForkChoice) Weight(root [32]byte) (uint64, error) {
|
||||
return ro.getter.Weight(root)
|
||||
}
|
||||
|
||||
// ConsensusNodeWeight delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) ConsensusNodeWeight(root [32]byte) (uint64, error) {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.ConsensusNodeWeight(root)
|
||||
}
|
||||
|
||||
// PayloadWeights delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) PayloadWeights(root [32]byte) (uint64, uint64, error) {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.PayloadWeights(root)
|
||||
}
|
||||
|
||||
// IsOptimistic delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
ro.l.RLock()
|
||||
@@ -190,3 +211,10 @@ func (ro *ROForkChoice) BlockHash(root [32]byte) ([32]byte, error) {
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.BlockHash(root)
|
||||
}
|
||||
|
||||
// CanonicalNodeAtSlot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool) {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.CanonicalNodeAtSlot(slot)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ const (
|
||||
unlockCalled
|
||||
rlockCalled
|
||||
runlockCalled
|
||||
hasFullNodeCalled
|
||||
hasNodeCalled
|
||||
proposerBoostCalled
|
||||
isCanonicalCalled
|
||||
@@ -32,6 +33,7 @@ const (
|
||||
highestReceivedBlockRootCalled
|
||||
receivedBlocksLastEpochCalled
|
||||
weightCalled
|
||||
consensusNodeWeightCalled
|
||||
isOptimisticCalled
|
||||
shouldOverrideFCUCalled
|
||||
slotCalled
|
||||
@@ -41,6 +43,8 @@ const (
|
||||
blockHashCalled
|
||||
dependentRootCalled
|
||||
dependentRootForEpochCalled
|
||||
canonicalNodeAtSlotCalled
|
||||
payloadWeightsCalled
|
||||
)
|
||||
|
||||
func _discard(t *testing.T, e error) {
|
||||
@@ -58,6 +62,11 @@ func TestROLocking(t *testing.T) {
|
||||
call mockCall
|
||||
cb func(FastGetter)
|
||||
}{
|
||||
{
|
||||
name: "hasFullNodeCalled",
|
||||
call: hasFullNodeCalled,
|
||||
cb: func(g FastGetter) { g.HasFullNode([32]byte{}) },
|
||||
},
|
||||
{
|
||||
name: "hasNodeCalled",
|
||||
call: hasNodeCalled,
|
||||
@@ -128,6 +137,11 @@ func TestROLocking(t *testing.T) {
|
||||
call: weightCalled,
|
||||
cb: func(g FastGetter) { _, err := g.Weight([32]byte{}); _discard(t, err) },
|
||||
},
|
||||
{
|
||||
name: "consensusNodeWeightCalled",
|
||||
call: consensusNodeWeightCalled,
|
||||
cb: func(g FastGetter) { _, err := g.ConsensusNodeWeight([32]byte{}); _discard(t, err) },
|
||||
},
|
||||
{
|
||||
name: "isOptimisticCalled",
|
||||
call: isOptimisticCalled,
|
||||
@@ -153,6 +167,11 @@ func TestROLocking(t *testing.T) {
|
||||
call: dependentRootCalled,
|
||||
cb: func(g FastGetter) { _, err := g.DependentRoot(0); _discard(t, err) },
|
||||
},
|
||||
{
|
||||
name: "canonicalNodeAtSlotCalled",
|
||||
call: canonicalNodeAtSlotCalled,
|
||||
cb: func(g FastGetter) { g.CanonicalNodeAtSlot(0) },
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
@@ -190,6 +209,11 @@ func (ro *mockROForkchoice) RUnlock() {
|
||||
ro.calls = append(ro.calls, runlockCalled)
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HasFullNode(_ [32]byte) bool {
|
||||
ro.calls = append(ro.calls, hasFullNodeCalled)
|
||||
return false
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HasNode(_ [32]byte) bool {
|
||||
ro.calls = append(ro.calls, hasNodeCalled)
|
||||
return false
|
||||
@@ -265,6 +289,16 @@ func (ro *mockROForkchoice) Weight(_ [32]byte) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) ConsensusNodeWeight(_ [32]byte) (uint64, error) {
|
||||
ro.calls = append(ro.calls, consensusNodeWeightCalled)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) PayloadWeights(_ [32]byte) (uint64, uint64, error) {
|
||||
ro.calls = append(ro.calls, payloadWeightsCalled)
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) IsOptimistic(_ [32]byte) (bool, error) {
|
||||
ro.calls = append(ro.calls, isOptimisticCalled)
|
||||
return false, nil
|
||||
@@ -307,3 +341,8 @@ func (ro *mockROForkchoice) BlockHash(_ [32]byte) ([32]byte, error) {
|
||||
ro.calls = append(ro.calls, blockHashCalled)
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) CanonicalNodeAtSlot(_ primitives.Slot) ([32]byte, bool) {
|
||||
ro.calls = append(ro.calls, canonicalNodeAtSlotCalled)
|
||||
return [32]byte{}, false
|
||||
}
|
||||
|
||||
@@ -954,60 +954,61 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
|
||||
p2pService := b.fetchP2P()
|
||||
rpcService := rpc.NewService(b.ctx, &rpc.Config{
|
||||
ExecutionEngineCaller: web3Service,
|
||||
ExecutionReconstructor: web3Service,
|
||||
Host: host,
|
||||
Port: port,
|
||||
BeaconMonitoringHost: beaconMonitoringHost,
|
||||
BeaconMonitoringPort: beaconMonitoringPort,
|
||||
CertFlag: cert,
|
||||
KeyFlag: key,
|
||||
BeaconDB: b.db,
|
||||
Broadcaster: p2pService,
|
||||
PeersFetcher: p2pService,
|
||||
PeerManager: p2pService,
|
||||
MetadataProvider: p2pService,
|
||||
ChainInfoFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
CanonicalFetcher: chainService,
|
||||
ForkFetcher: chainService,
|
||||
ForkchoiceFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
AttestationCache: b.attestationCache,
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
BLSChangesPool: b.blsToExecPool,
|
||||
SyncCommitteeObjectPool: b.syncCommitteePool,
|
||||
ExecutionChainService: web3Service,
|
||||
ExecutionChainInfoFetcher: web3Service,
|
||||
ChainStartFetcher: chainStartFetcher,
|
||||
MockEth1Votes: mockEth1DataVotes,
|
||||
SyncService: syncService,
|
||||
DepositFetcher: depositFetcher,
|
||||
PendingDepositFetcher: b.depositCache,
|
||||
BlockNotifier: b,
|
||||
StateNotifier: b,
|
||||
OperationNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
Router: router,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
GraffitiInfo: web3Service.GraffitiInfo(),
|
||||
ExecutionEngineCaller: web3Service,
|
||||
ExecutionReconstructor: web3Service,
|
||||
Host: host,
|
||||
Port: port,
|
||||
BeaconMonitoringHost: beaconMonitoringHost,
|
||||
BeaconMonitoringPort: beaconMonitoringPort,
|
||||
CertFlag: cert,
|
||||
KeyFlag: key,
|
||||
BeaconDB: b.db,
|
||||
Broadcaster: p2pService,
|
||||
PeersFetcher: p2pService,
|
||||
PeerManager: p2pService,
|
||||
MetadataProvider: p2pService,
|
||||
ChainInfoFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
CanonicalFetcher: chainService,
|
||||
ForkFetcher: chainService,
|
||||
ForkchoiceFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
ExecutionPayloadEnvelopeReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
AttestationCache: b.attestationCache,
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
BLSChangesPool: b.blsToExecPool,
|
||||
SyncCommitteeObjectPool: b.syncCommitteePool,
|
||||
ExecutionChainService: web3Service,
|
||||
ExecutionChainInfoFetcher: web3Service,
|
||||
ChainStartFetcher: chainStartFetcher,
|
||||
MockEth1Votes: mockEth1DataVotes,
|
||||
SyncService: syncService,
|
||||
DepositFetcher: depositFetcher,
|
||||
PendingDepositFetcher: b.depositCache,
|
||||
BlockNotifier: b,
|
||||
StateNotifier: b,
|
||||
OperationNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
Router: router,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
GraffitiInfo: web3Service.GraffitiInfo(),
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
|
||||
@@ -145,6 +145,12 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultLightClientOptimisticUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
|
||||
return defaultLightClientFinalityUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipPayloadAttestationMessageMessage):
|
||||
// TODO: Revisit scoring params for payload attestation gossip.
|
||||
return defaultBlockTopicParams(), nil
|
||||
case strings.Contains(topic, GossipExecutionPayloadEnvelopeMessage):
|
||||
// TODO: Revisit scoring params for execution payload envelope gossip.
|
||||
return defaultBlockTopicParams(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
|
||||
}
|
||||
|
||||
@@ -37,6 +37,9 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
if epoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
return ðpb.SignedBeaconBlockFulu{}
|
||||
}
|
||||
if epoch >= params.BeaconConfig().GloasForkEpoch {
|
||||
return ðpb.SignedBeaconBlockGloas{}
|
||||
}
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.SignedBeaconBlockElectra{}
|
||||
}
|
||||
@@ -146,6 +149,8 @@ func init() {
|
||||
|
||||
// Specially handle Fulu objects.
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.SignedBeaconBlockFulu]()] = BlockSubnetTopicFormat
|
||||
// Specially handle Gloas objects.
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.SignedBeaconBlockGloas]()] = BlockSubnetTopicFormat
|
||||
|
||||
// Payload attestation messages.
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestationMessage]()] = PayloadAttestationMessageTopicFormat
|
||||
|
||||
@@ -31,6 +31,7 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
capellaForkEpoch := primitives.Epoch(300)
|
||||
denebForkEpoch := primitives.Epoch(400)
|
||||
electraForkEpoch := primitives.Epoch(500)
|
||||
gloasForkEpoch := primitives.Epoch(550)
|
||||
fuluForkEpoch := primitives.Epoch(600)
|
||||
|
||||
bCfg.AltairForkEpoch = altairForkEpoch
|
||||
@@ -38,12 +39,14 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
bCfg.CapellaForkEpoch = capellaForkEpoch
|
||||
bCfg.DenebForkEpoch = denebForkEpoch
|
||||
bCfg.ElectraForkEpoch = electraForkEpoch
|
||||
bCfg.GloasForkEpoch = gloasForkEpoch
|
||||
bCfg.FuluForkEpoch = fuluForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = primitives.Epoch(100)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.BellatrixForkVersion)] = primitives.Epoch(200)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.CapellaForkVersion)] = primitives.Epoch(300)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.DenebForkVersion)] = primitives.Epoch(400)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.ElectraForkVersion)] = primitives.Epoch(500)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.GloasForkVersion)] = primitives.Epoch(550)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.FuluForkVersion)] = primitives.Epoch(600)
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
@@ -160,4 +163,9 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(LightClientFinalityUpdateTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientFinalityUpdateElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Gloas Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, gloasForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockGloas)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
@@ -67,6 +67,12 @@ const (
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
// ExecutionPayloadEnvelopesByRootName is the name for the ExecutionPayloadEnvelopesByRoot v1 message topic.
|
||||
ExecutionPayloadEnvelopesByRootName = "/execution_payload_envelopes_by_root"
|
||||
|
||||
// ExecutionPayloadEnvelopesByRangeName is the name for the ExecutionPayloadEnvelopesByRange v1 message topic.
|
||||
ExecutionPayloadEnvelopesByRangeName = "/execution_payload_envelopes_by_range"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -106,6 +112,13 @@ const (
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
// RPCExecutionPayloadEnvelopesByRootTopicV1 is a topic for requesting execution payload envelopes by their beacon block root.
|
||||
// /eth2/beacon_chain/req/execution_payload_envelopes_by_root/1 - New in Gloas.
|
||||
RPCExecutionPayloadEnvelopesByRootTopicV1 = protocolPrefix + ExecutionPayloadEnvelopesByRootName + SchemaVersionV1
|
||||
// RPCExecutionPayloadEnvelopesByRangeTopicV1 is a topic for requesting execution payload envelopes
|
||||
// in the slot range [start_slot, start_slot + count). New in Gloas.
|
||||
// /eth2/beacon_chain/req/execution_payload_envelopes_by_range/1/
|
||||
RPCExecutionPayloadEnvelopesByRangeTopicV1 = protocolPrefix + ExecutionPayloadEnvelopesByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
|
||||
@@ -168,8 +181,14 @@ var (
|
||||
// DataColumnSidecarsByRange v1 Message
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
|
||||
// ExecutionPayloadEnvelopesByRange v1 Message
|
||||
RPCExecutionPayloadEnvelopesByRangeTopicV1: new(pb.ExecutionPayloadEnvelopesByRangeRequest),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
|
||||
// ExecutionPayloadEnvelopesByRoot v1 Message
|
||||
RPCExecutionPayloadEnvelopesByRootTopicV1: new(p2ptypes.ExecutionPayloadEnvelopesByRootReq),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -179,20 +198,22 @@ var (
|
||||
|
||||
// Maps all the protocol message names for the different rpc topics.
|
||||
messageMapping = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
LightClientBootstrapName: true,
|
||||
LightClientUpdatesByRangeName: true,
|
||||
LightClientFinalityUpdateName: true,
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
LightClientBootstrapName: true,
|
||||
LightClientUpdatesByRangeName: true,
|
||||
LightClientFinalityUpdateName: true,
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
ExecutionPayloadEnvelopesByRootName: true,
|
||||
ExecutionPayloadEnvelopesByRangeName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
|
||||
@@ -49,7 +49,7 @@ const (
|
||||
// GossipPayloadAttestationMessageMessage is the name for the payload attestation message type.
|
||||
GossipPayloadAttestationMessageMessage = "payload_attestation_message"
|
||||
// GossipExecutionPayloadEnvelopeMessage is the name for the execution payload envelope message type.
|
||||
GossipExecutionPayloadEnvelopeMessage = "execution_payload_envelope"
|
||||
GossipExecutionPayloadEnvelopeMessage = "execution_payload"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
|
||||
@@ -80,6 +80,11 @@ func InitializeDataMaps() {
|
||||
ðpb.SignedBeaconBlockElectra{Block: ðpb.BeaconBlockElectra{Body: ðpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}, ExecutionRequests: &enginev1.ExecutionRequests{}}}},
|
||||
)
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GloasForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
return blocks.NewSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockGloas{Block: ðpb.BeaconBlockGloas{Body: ðpb.BeaconBlockBodyGloas{}}},
|
||||
)
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
return blocks.NewSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockFulu{Block: ðpb.BeaconBlockElectra{Body: ðpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}, ExecutionRequests: &enginev1.ExecutionRequests{}}}},
|
||||
@@ -107,6 +112,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GloasForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV2(ðpb.MetaDataV2{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV2(ðpb.MetaDataV2{}), nil
|
||||
},
|
||||
@@ -132,6 +140,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.SingleAttestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GloasForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.SingleAttestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.SingleAttestation{}, nil
|
||||
},
|
||||
@@ -157,6 +168,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GloasForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}, nil
|
||||
},
|
||||
@@ -182,6 +196,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.AttSlashing, error) {
|
||||
return ðpb.AttesterSlashingElectra{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GloasForkVersion): func() (ethpb.AttSlashing, error) {
|
||||
return ðpb.AttesterSlashingElectra{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (ethpb.AttSlashing, error) {
|
||||
return ðpb.AttesterSlashingElectra{}, nil
|
||||
},
|
||||
@@ -204,6 +221,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GloasForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
@@ -226,6 +246,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GloasForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
@@ -86,3 +87,29 @@ func TestInitializeDataMaps(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitializeDataMaps_Gloas(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
InitializeDataMaps()
|
||||
|
||||
gloasVersion := bytesutil.ToBytes4(params.BeaconConfig().GloasForkVersion)
|
||||
bFunc, ok := BlockMap[gloasVersion]
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
b, err := bFunc()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, version.Gloas, b.Version())
|
||||
|
||||
mdFunc, ok := MetaDataMap[gloasVersion]
|
||||
require.Equal(t, true, ok)
|
||||
md, err := mdFunc()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, md.MetadataObjV2())
|
||||
|
||||
attFunc, ok := AttestationMap[gloasVersion]
|
||||
require.Equal(t, true, ok)
|
||||
att, err := attFunc()
|
||||
require.NoError(t, err)
|
||||
_, ok = att.(*ethpb.SingleAttestation)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
@@ -14,8 +14,9 @@ var (
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
|
||||
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrMaxDataColumnReqExceeded = errors.New("requested more than MAX_REQUEST_DATA_COLUMN_SIDECARS")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrMaxDataColumnReqExceeded = errors.New("requested more than MAX_REQUEST_DATA_COLUMN_SIDECARS")
|
||||
ErrMaxPayloadEnvelopeReqExceeded = errors.New("requested more than MAX_REQUEST_PAYLOADS")
|
||||
|
||||
ErrResourceUnavailable = errors.New("resource requested unavailable")
|
||||
)
|
||||
|
||||
@@ -210,6 +210,64 @@ func (s BlobSidecarsByRootReq) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// ExecutionPayloadEnvelopesByRootReq section
|
||||
|
||||
// ExecutionPayloadEnvelopesByRootReq specifies the execution payload envelopes by roots request type.
|
||||
type ExecutionPayloadEnvelopesByRootReq [][fieldparams.RootLength]byte
|
||||
|
||||
// MarshalSSZTo marshals the execution payload envelopes by roots request with the provided byte slice.
|
||||
func (r *ExecutionPayloadEnvelopesByRootReq) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
marshalledObj, err := r.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(dst, marshalledObj...), nil
|
||||
}
|
||||
|
||||
// MarshalSSZ Marshals the execution payload envelopes by roots request type into the serialized object.
|
||||
func (r *ExecutionPayloadEnvelopesByRootReq) MarshalSSZ() ([]byte, error) {
|
||||
if len(*r) > int(params.BeaconConfig().MaxRequestPayloads) {
|
||||
return nil, errors.Errorf("execution payload envelopes by roots request exceeds max size: %d > %d", len(*r), params.BeaconConfig().MaxRequestPayloads)
|
||||
}
|
||||
buf := make([]byte, 0, r.SizeSSZ())
|
||||
for _, root := range *r {
|
||||
buf = append(buf, root[:]...)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// SizeSSZ returns the size of the serialized representation.
|
||||
func (r *ExecutionPayloadEnvelopesByRootReq) SizeSSZ() int {
|
||||
return len(*r) * fieldparams.RootLength
|
||||
}
|
||||
|
||||
// UnmarshalSSZ unmarshals the provided bytes buffer into the
|
||||
// execution payload envelopes by roots request object.
|
||||
func (r *ExecutionPayloadEnvelopesByRootReq) UnmarshalSSZ(buf []byte) error {
|
||||
bufLen := len(buf)
|
||||
maxLength := int(params.BeaconConfig().MaxRequestPayloads * fieldparams.RootLength)
|
||||
if bufLen > maxLength {
|
||||
return errors.Errorf("expected buffer with length of up to %d but received length %d", maxLength, bufLen)
|
||||
}
|
||||
if bufLen%fieldparams.RootLength != 0 {
|
||||
return ssz.ErrIncorrectByteSize
|
||||
}
|
||||
numOfRoots := bufLen / fieldparams.RootLength
|
||||
roots := make([][fieldparams.RootLength]byte, 0, numOfRoots)
|
||||
for i := range numOfRoots {
|
||||
var rt [fieldparams.RootLength]byte
|
||||
copy(rt[:], buf[i*fieldparams.RootLength:(i+1)*fieldparams.RootLength])
|
||||
roots = append(roots, rt)
|
||||
}
|
||||
*r = roots
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len returns the number of roots in the request.
|
||||
func (r ExecutionPayloadEnvelopesByRootReq) Len() int {
|
||||
return len(r)
|
||||
}
|
||||
|
||||
// ====================================
|
||||
// DataColumnsByRootIdentifiers section
|
||||
// ====================================
|
||||
|
||||
@@ -205,6 +205,41 @@ func hexDecodeOrDie(t *testing.T, str string) []byte {
|
||||
return decoded
|
||||
}
|
||||
|
||||
func TestExecutionPayloadEnvelopesByRootReq_RoundTrip(t *testing.T) {
|
||||
roots := make([][32]byte, 10)
|
||||
for i := range roots {
|
||||
roots[i] = [32]byte{byte(i)}
|
||||
}
|
||||
req := ExecutionPayloadEnvelopesByRootReq(roots)
|
||||
|
||||
marshalled, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(roots)*fieldparams.RootLength, len(marshalled))
|
||||
|
||||
got := &ExecutionPayloadEnvelopesByRootReq{}
|
||||
require.NoError(t, got.UnmarshalSSZ(marshalled))
|
||||
assert.DeepEqual(t, roots, [][32]byte(*got))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadEnvelopesByRootReq_Limit(t *testing.T) {
|
||||
roots := make([][32]byte, params.BeaconConfig().MaxRequestPayloads+1)
|
||||
req := ExecutionPayloadEnvelopesByRootReq(roots)
|
||||
|
||||
_, err := req.MarshalSSZ()
|
||||
require.ErrorContains(t, "exceeds max size", err)
|
||||
|
||||
buf := make([]byte, (params.BeaconConfig().MaxRequestPayloads+1)*fieldparams.RootLength)
|
||||
got := &ExecutionPayloadEnvelopesByRootReq{}
|
||||
require.ErrorContains(t, "expected buffer with length of up to", got.UnmarshalSSZ(buf))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadEnvelopesByRootReq_UnmarshalBadSize(t *testing.T) {
|
||||
// Buffer not a multiple of RootLength should fail.
|
||||
buf := make([]byte, fieldparams.RootLength+1)
|
||||
got := &ExecutionPayloadEnvelopesByRootReq{}
|
||||
require.ErrorIs(t, got.UnmarshalSSZ(buf), ssz.ErrIncorrectByteSize)
|
||||
}
|
||||
|
||||
// ====================================
|
||||
// DataColumnsByRootIdentifiers section
|
||||
// ====================================
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"beacon.go",
|
||||
"duties.go",
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"service.go",
|
||||
@@ -45,17 +46,23 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validator_test.go"],
|
||||
srcs = [
|
||||
"duties_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -63,6 +70,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
],
|
||||
|
||||
266
beacon-chain/rpc/core/duties.go
Normal file
266
beacon-chain/rpc/core/duties.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/validator"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// AttesterDutyResult is a transport-agnostic representation of attester duty.
|
||||
type AttesterDutyResult struct {
|
||||
Pubkey [fieldparams.BLSPubkeyLength]byte
|
||||
ValidatorIndex primitives.ValidatorIndex
|
||||
CommitteeIndex primitives.CommitteeIndex
|
||||
CommitteeLength uint64
|
||||
CommitteesAtSlot uint64
|
||||
ValidatorCommitteeIndex uint64
|
||||
Slot primitives.Slot
|
||||
}
|
||||
|
||||
// ProposerDutyResult is a transport-agnostic representation of proposer duty.
|
||||
type ProposerDutyResult struct {
|
||||
Pubkey [fieldparams.BLSPubkeyLength]byte
|
||||
ValidatorIndex primitives.ValidatorIndex
|
||||
Slot primitives.Slot
|
||||
}
|
||||
|
||||
// SyncCommitteeDutyResult is a transport-agnostic representation of sync committee duty.
|
||||
type SyncCommitteeDutyResult struct {
|
||||
Pubkey [fieldparams.BLSPubkeyLength]byte
|
||||
ValidatorIndex primitives.ValidatorIndex
|
||||
ValidatorSyncCommitteeIndices []uint64
|
||||
}
|
||||
|
||||
// AttesterDuties computes attester duties for the requested validators at the given epoch.
|
||||
// The caller is responsible for providing a state that is adequate for the requested epoch.
|
||||
func (s *Service) AttesterDuties(ctx context.Context, st state.BeaconState, epoch primitives.Epoch, indices []primitives.ValidatorIndex) ([]*AttesterDutyResult, *RpcError) {
|
||||
ctx, span := trace.StartSpan(ctx, "coreService.AttesterDuties")
|
||||
defer span.End()
|
||||
|
||||
assignments, err := helpers.CommitteeAssignments(ctx, st, epoch, indices)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not compute committee assignments"), Reason: Internal}
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, st, epoch)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get active validator count"), Reason: Internal}
|
||||
}
|
||||
committeesAtSlot := helpers.SlotCommitteeCount(activeValidatorCount)
|
||||
|
||||
duties := make([]*AttesterDutyResult, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
pubkey := st.PubkeyAtIndex(index)
|
||||
var zeroPubkey [fieldparams.BLSPubkeyLength]byte
|
||||
if bytes.Equal(pubkey[:], zeroPubkey[:]) {
|
||||
return nil, &RpcError{Err: errors.Errorf("Invalid validator index %d", index), Reason: BadRequest}
|
||||
}
|
||||
committee := assignments[index]
|
||||
if committee == nil {
|
||||
continue
|
||||
}
|
||||
duties = append(duties, &AttesterDutyResult{
|
||||
Pubkey: pubkey,
|
||||
ValidatorIndex: index,
|
||||
CommitteeIndex: committee.CommitteeIndex,
|
||||
CommitteeLength: uint64(len(committee.Committee)),
|
||||
CommitteesAtSlot: committeesAtSlot,
|
||||
ValidatorCommitteeIndex: findValidatorIndexInCommittee(committee.Committee, index),
|
||||
Slot: committee.AttesterSlot,
|
||||
})
|
||||
}
|
||||
return duties, nil
|
||||
}
|
||||
|
||||
// ProposerDuties computes proposer duties for the given epoch.
|
||||
// Results are sorted by slot.
|
||||
func (s *Service) ProposerDuties(ctx context.Context, st state.BeaconState, epoch primitives.Epoch) ([]*ProposerDutyResult, *RpcError) {
|
||||
ctx, span := trace.StartSpan(ctx, "coreService.ProposerDuties")
|
||||
defer span.End()
|
||||
|
||||
assignments, err := helpers.ProposerAssignments(ctx, st, epoch)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not compute proposer assignments"), Reason: Internal}
|
||||
}
|
||||
|
||||
duties := make([]*ProposerDutyResult, 0, params.BeaconConfig().SlotsPerEpoch)
|
||||
for index, proposalSlots := range assignments {
|
||||
pubkey := st.PubkeyAtIndex(index)
|
||||
for _, slot := range proposalSlots {
|
||||
duties = append(duties, &ProposerDutyResult{
|
||||
Pubkey: pubkey,
|
||||
ValidatorIndex: index,
|
||||
Slot: slot,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(duties, func(i, j int) bool {
|
||||
return duties[i].Slot < duties[j].Slot
|
||||
})
|
||||
|
||||
return duties, nil
|
||||
}
|
||||
|
||||
// SyncCommitteeDuties computes sync committee duties for the requested validators.
|
||||
// It also registers sync subnets for matched validators.
|
||||
// The caller is responsible for providing a state that is adequate for the requested epoch.
|
||||
func (s *Service) SyncCommitteeDuties(ctx context.Context, st state.BeaconState, requestedEpoch primitives.Epoch, currentEpoch primitives.Epoch, indices []primitives.ValidatorIndex) ([]*SyncCommitteeDutyResult, *RpcError) {
|
||||
_, span := trace.StartSpan(ctx, "coreService.SyncCommitteeDuties")
|
||||
defer span.End()
|
||||
|
||||
// Determine which sync committee to use based on the requested epoch.
|
||||
startingEpoch := min(requestedEpoch, currentEpoch)
|
||||
currentSyncCommitteeFirstEpoch, err := slots.SyncCommitteePeriodStartEpoch(startingEpoch)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get sync committee period start epoch"), Reason: Internal}
|
||||
}
|
||||
nextSyncCommitteeFirstEpoch := currentSyncCommitteeFirstEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
isCurrentCommittee := requestedEpoch < nextSyncCommitteeFirstEpoch
|
||||
|
||||
var committee [][]byte
|
||||
if isCurrentCommittee {
|
||||
sc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get sync committee"), Reason: Internal}
|
||||
}
|
||||
committee = sc.Pubkeys
|
||||
} else {
|
||||
sc, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get sync committee"), Reason: Internal}
|
||||
}
|
||||
committee = sc.Pubkeys
|
||||
}
|
||||
|
||||
// Build pubkey → positions map from committee pubkeys.
|
||||
committeePubkeys := make(map[[fieldparams.BLSPubkeyLength]byte][]uint64)
|
||||
for j, pk := range committee {
|
||||
var pk48 [fieldparams.BLSPubkeyLength]byte
|
||||
copy(pk48[:], pk)
|
||||
committeePubkeys[pk48] = append(committeePubkeys[pk48], uint64(j))
|
||||
}
|
||||
|
||||
duties := make([]*SyncCommitteeDutyResult, 0)
|
||||
for _, index := range indices {
|
||||
pubkey := st.PubkeyAtIndex(index)
|
||||
var zeroPubkey [fieldparams.BLSPubkeyLength]byte
|
||||
if bytes.Equal(pubkey[:], zeroPubkey[:]) {
|
||||
return nil, &RpcError{Err: errors.Errorf("Invalid validator index %d", index), Reason: BadRequest}
|
||||
}
|
||||
positions, ok := committeePubkeys[pubkey]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
duties = append(duties, &SyncCommitteeDutyResult{
|
||||
Pubkey: pubkey,
|
||||
ValidatorIndex: index,
|
||||
ValidatorSyncCommitteeIndices: positions,
|
||||
})
|
||||
|
||||
// Register sync subnets for matched validators.
|
||||
if isCurrentCommittee {
|
||||
if err := RegisterSyncSubnetCurrentPeriod(st, requestedEpoch, pubkey[:], syncDutyStatus(st, index)); err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrapf(err, "could not register sync subnet for validator %d", index), Reason: Internal}
|
||||
}
|
||||
} else {
|
||||
if err := RegisterSyncSubnetNextPeriod(st, requestedEpoch, pubkey[:], syncDutyStatus(st, index)); err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrapf(err, "could not register sync subnet for validator %d", index), Reason: Internal}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return duties, nil
|
||||
}
|
||||
|
||||
// SyncCommitteeDutiesLastValidEpoch returns the last epoch for which sync committee duties can be computed.
|
||||
func SyncCommitteeDutiesLastValidEpoch(currentEpoch primitives.Epoch) primitives.Epoch {
|
||||
currentSyncPeriodIndex := currentEpoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
return (currentSyncPeriodIndex+2)*params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1
|
||||
}
|
||||
|
||||
// findValidatorIndexInCommittee finds the position of a validator in a committee.
|
||||
func findValidatorIndexInCommittee(committee []primitives.ValidatorIndex, validatorIndex primitives.ValidatorIndex) uint64 {
|
||||
for i, vIdx := range committee {
|
||||
if vIdx == validatorIndex {
|
||||
return uint64(i)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// syncDutyStatus returns a validator.Status suitable for sync subnet registration.
|
||||
// It returns Active for any active validator and Pending otherwise.
|
||||
func syncDutyStatus(st state.BeaconState, idx primitives.ValidatorIndex) validator.Status {
|
||||
val, err := st.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil || val.IsNil() {
|
||||
return validator.Pending
|
||||
}
|
||||
currentEpoch := coreTime.CurrentEpoch(st)
|
||||
if val.ActivationEpoch() <= currentEpoch && currentEpoch < val.ExitEpoch() {
|
||||
return validator.Active
|
||||
}
|
||||
return validator.Pending
|
||||
}
|
||||
|
||||
// AttestationDependentRoot returns the block root at (epoch-1 start - 1),
|
||||
// which is the dependent root for attester duties at the given epoch.
|
||||
// Callers must handle epoch <= 1 separately (e.g. using the genesis block root from the DB).
|
||||
func AttestationDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
|
||||
if epoch <= 1 {
|
||||
return nil, errors.New("epoch <= 1 requires genesis block root from DB")
|
||||
}
|
||||
prevEpochStartSlot, err := slots.EpochStart(epoch.Sub(1))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not obtain epoch's start slot: %v", err)
|
||||
}
|
||||
root, err := helpers.BlockRootAtSlot(s, prevEpochStartSlot.Sub(1))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get block root")
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// ProposalDependentRoot returns the block root at (epoch start - 1),
|
||||
// which is the dependent root for proposer duties at the given epoch.
|
||||
// This is the pre-Fulu (v1) calculation used by the REST /eth/v1 endpoint.
|
||||
// Callers must handle epoch 0 separately (e.g. using the genesis block root from the DB).
|
||||
func ProposalDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
|
||||
if epoch == 0 {
|
||||
return nil, errors.New("epoch 0 requires genesis block root from DB")
|
||||
}
|
||||
epochStartSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not obtain epoch's start slot: %v", err)
|
||||
}
|
||||
root, err := helpers.BlockRootAtSlot(s, epochStartSlot.Sub(1))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get block root")
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// ProposalDependentRootV2 returns the dependent root for proposer duties.
|
||||
func ProposalDependentRootV2(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
|
||||
if s.Version() >= version.Fulu {
|
||||
// Post-Fulu (EIP-7917) the proposer schedule is deterministic from the
|
||||
// previous epoch's state, so the dependent root is (prev_epoch_start - 1),
|
||||
// matching AttestationDependentRoot. Pre-Fulu it falls back to (epoch_start - 1).
|
||||
// See https://github.com/ethereum/beacon-APIs/pull/563.
|
||||
return AttestationDependentRoot(s, epoch)
|
||||
}
|
||||
return ProposalDependentRoot(s, epoch)
|
||||
}
|
||||
220
beacon-chain/rpc/core/duties_test.go
Normal file
220
beacon-chain/rpc/core/duties_test.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestAttesterDuties(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := util.DeterministicEth1Data(len(deposits))
|
||||
require.NoError(t, err)
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Service{}
|
||||
|
||||
t.Run("single validator", func(t *testing.T) {
|
||||
duties, rpcErr := s.AttesterDuties(t.Context(), bs, 0, []primitives.ValidatorIndex{0})
|
||||
require.Equal(t, (*RpcError)(nil), rpcErr)
|
||||
require.Equal(t, 1, len(duties))
|
||||
duty := duties[0]
|
||||
assert.Equal(t, primitives.ValidatorIndex(0), duty.ValidatorIndex)
|
||||
assert.NotEqual(t, uint64(0), duty.CommitteeLength)
|
||||
assert.NotEqual(t, uint64(0), duty.CommitteesAtSlot)
|
||||
})
|
||||
|
||||
t.Run("multiple validators", func(t *testing.T) {
|
||||
indices := []primitives.ValidatorIndex{0, 1, 2}
|
||||
duties, rpcErr := s.AttesterDuties(t.Context(), bs, 0, indices)
|
||||
require.Equal(t, (*RpcError)(nil), rpcErr)
|
||||
require.Equal(t, 3, len(duties))
|
||||
})
|
||||
|
||||
t.Run("zero pubkey returns error", func(t *testing.T) {
|
||||
// Index far beyond the validator count should have a zero pubkey.
|
||||
badIndex := primitives.ValidatorIndex(depChainStart + 100)
|
||||
_, rpcErr := s.AttesterDuties(t.Context(), bs, 0, []primitives.ValidatorIndex{badIndex})
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, ErrorReason(BadRequest), rpcErr.Reason)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProposerDuties(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := util.DeterministicEth1Data(len(deposits))
|
||||
require.NoError(t, err)
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Service{}
|
||||
|
||||
t.Run("basic OK", func(t *testing.T) {
|
||||
duties, rpcErr := s.ProposerDuties(t.Context(), bs, 0)
|
||||
require.Equal(t, (*RpcError)(nil), rpcErr)
|
||||
// Epoch 0 has SlotsPerEpoch slots, but slot 0 is skipped for proposer, so expect SlotsPerEpoch-1 duties.
|
||||
require.Equal(t, int(params.BeaconConfig().SlotsPerEpoch-1), len(duties))
|
||||
})
|
||||
|
||||
t.Run("sorted by slot", func(t *testing.T) {
|
||||
duties, rpcErr := s.ProposerDuties(t.Context(), bs, 0)
|
||||
require.Equal(t, (*RpcError)(nil), rpcErr)
|
||||
for i := 1; i < len(duties); i++ {
|
||||
assert.Equal(t, true, duties[i-1].Slot <= duties[i].Slot, "duties should be sorted by slot")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncCommitteeDuties(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
numVals := uint64(11)
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, numVals)
|
||||
vals := st.Validators()
|
||||
|
||||
currCommittee := ðpb.SyncCommittee{AggregatePubkey: make([]byte, 48)}
|
||||
for i := range 5 {
|
||||
currCommittee.Pubkeys = append(currCommittee.Pubkeys, vals[i].PublicKey)
|
||||
}
|
||||
// Add one pubkey twice to test duplicate positions.
|
||||
currCommittee.Pubkeys = append(currCommittee.Pubkeys, vals[0].PublicKey)
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(currCommittee))
|
||||
|
||||
nextCommittee := ðpb.SyncCommittee{AggregatePubkey: make([]byte, 48)}
|
||||
for i := 5; i < 10; i++ {
|
||||
nextCommittee.Pubkeys = append(nextCommittee.Pubkeys, vals[i].PublicKey)
|
||||
}
|
||||
require.NoError(t, st.SetNextSyncCommittee(nextCommittee))
|
||||
|
||||
s := &Service{}
|
||||
|
||||
t.Run("current committee", func(t *testing.T) {
|
||||
duties, rpcErr := s.SyncCommitteeDuties(t.Context(), st, 0, 0, []primitives.ValidatorIndex{1})
|
||||
require.Equal(t, (*RpcError)(nil), rpcErr)
|
||||
require.Equal(t, 1, len(duties))
|
||||
assert.Equal(t, primitives.ValidatorIndex(1), duties[0].ValidatorIndex)
|
||||
require.Equal(t, 1, len(duties[0].ValidatorSyncCommitteeIndices))
|
||||
assert.Equal(t, uint64(1), duties[0].ValidatorSyncCommitteeIndices[0])
|
||||
})
|
||||
|
||||
t.Run("validator with duplicate positions", func(t *testing.T) {
|
||||
duties, rpcErr := s.SyncCommitteeDuties(t.Context(), st, 0, 0, []primitives.ValidatorIndex{0})
|
||||
require.Equal(t, (*RpcError)(nil), rpcErr)
|
||||
require.Equal(t, 1, len(duties))
|
||||
// Validator 0 appears at index 0 and 5.
|
||||
require.Equal(t, 2, len(duties[0].ValidatorSyncCommitteeIndices))
|
||||
})
|
||||
|
||||
t.Run("next committee", func(t *testing.T) {
|
||||
nextEpoch := params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
duties, rpcErr := s.SyncCommitteeDuties(t.Context(), st, nextEpoch, 0, []primitives.ValidatorIndex{5})
|
||||
require.Equal(t, (*RpcError)(nil), rpcErr)
|
||||
require.Equal(t, 1, len(duties))
|
||||
assert.Equal(t, primitives.ValidatorIndex(5), duties[0].ValidatorIndex)
|
||||
})
|
||||
|
||||
t.Run("validator not in committee", func(t *testing.T) {
|
||||
// Validator 10 is not in either committee.
|
||||
duties, rpcErr := s.SyncCommitteeDuties(t.Context(), st, 0, 0, []primitives.ValidatorIndex{10})
|
||||
require.Equal(t, (*RpcError)(nil), rpcErr)
|
||||
require.Equal(t, 0, len(duties))
|
||||
})
|
||||
|
||||
t.Run("zero pubkey returns error", func(t *testing.T) {
|
||||
badIndex := primitives.ValidatorIndex(numVals + 100)
|
||||
_, rpcErr := s.SyncCommitteeDuties(t.Context(), st, 0, 0, []primitives.ValidatorIndex{badIndex})
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, ErrorReason(BadRequest), rpcErr.Reason)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncCommitteeDutiesLastValidEpoch(t *testing.T) {
|
||||
t.Run("epoch 0", func(t *testing.T) {
|
||||
result := SyncCommitteeDutiesLastValidEpoch(0)
|
||||
expected := 2*params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1
|
||||
assert.Equal(t, expected, result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProposalDependentRootV2(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// With SlotsPerEpoch=8 and epoch=2:
|
||||
// attestation dependent root slot = prev_epoch_start - 1 = 8 - 1 = 7
|
||||
// v1 proposer dependent root slot = epoch_start - 1 = 16 - 1 = 15
|
||||
// We set distinct roots at these slots so the test proves the fork
|
||||
// branch selects the right one.
|
||||
makeBlockRoots := func(t *testing.T) [][]byte {
|
||||
shr := params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
roots := make([][]byte, shr)
|
||||
for i := range roots {
|
||||
roots[i] = make([]byte, 32)
|
||||
roots[i][0] = byte(i)
|
||||
}
|
||||
return roots
|
||||
}
|
||||
|
||||
t.Run("post-Fulu uses prev_epoch_start minus 1", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
spe := params.BeaconConfig().SlotsPerEpoch
|
||||
st, _ := util.DeterministicGenesisStateFulu(t, 64)
|
||||
require.NoError(t, st.SetSlot(2*spe))
|
||||
require.NoError(t, st.SetBlockRoots(makeBlockRoots(t)))
|
||||
|
||||
got, err := ProposalDependentRootV2(st, 2)
|
||||
require.NoError(t, err)
|
||||
// Post-Fulu: prev_epoch_start - 1 = SlotsPerEpoch - 1
|
||||
assert.Equal(t, byte(spe-1), got[0])
|
||||
})
|
||||
|
||||
t.Run("pre-Fulu uses epoch_start minus 1", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.ElectraForkEpoch = 0
|
||||
cfg.FuluForkEpoch = 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
spe := params.BeaconConfig().SlotsPerEpoch
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 64)
|
||||
require.NoError(t, st.SetSlot(2*spe))
|
||||
require.NoError(t, st.SetBlockRoots(makeBlockRoots(t)))
|
||||
|
||||
got, err := ProposalDependentRootV2(st, 2)
|
||||
require.NoError(t, err)
|
||||
// Pre-Fulu: epoch_start - 1 = 2*SlotsPerEpoch - 1
|
||||
assert.Equal(t, byte(2*spe-1), got[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestFindValidatorIndexInCommittee(t *testing.T) {
|
||||
committee := []primitives.ValidatorIndex{10, 20, 30}
|
||||
assert.Equal(t, uint64(0), findValidatorIndexInCommittee(committee, 10))
|
||||
assert.Equal(t, uint64(1), findValidatorIndexInCommittee(committee, 20))
|
||||
assert.Equal(t, uint64(2), findValidatorIndexInCommittee(committee, 30))
|
||||
// Not found returns 0.
|
||||
assert.Equal(t, uint64(0), findValidatorIndexInCommittee(committee, 99))
|
||||
}
|
||||
@@ -891,6 +891,15 @@ func (s *Service) beaconEndpoints(
|
||||
handler: server.GetProposerLookahead,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/execution_payload_envelope/{block_root}",
|
||||
name: namespace + ".GetExecutionPayloadEnvelope",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
},
|
||||
handler: server.GetExecutionPayloadEnvelope,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ func Test_endpoints(t *testing.T) {
|
||||
"/eth/v1/beacon/states/{state_id}/pending_partial_withdrawals": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/pending_consolidations": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/proposer_lookahead": {http.MethodGet},
|
||||
"/eth/v1/beacon/execution_payload_envelope/{block_root}": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
|
||||
"/eth/v2/beacon/blinded_blocks": {http.MethodPost},
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"handlers_gloas.go",
|
||||
"handlers_pool.go",
|
||||
"handlers_state.go",
|
||||
"handlers_validator.go",
|
||||
|
||||
74
beacon-chain/rpc/eth/beacon/handlers_gloas.go
Normal file
74
beacon-chain/rpc/eth/beacon/handlers_gloas.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GetExecutionPayloadEnvelope retrieves a full execution payload envelope by beacon block root.
|
||||
// The blinded envelope is fetched from the DB and the full execution payload is reconstructed
|
||||
// from the EL via eth_getBlockByHash.
|
||||
func (s *Server) GetExecutionPayloadEnvelope(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
|
||||
rootBytes, err := bytesutil.DecodeHexWithLength(r.PathValue("block_root"), 32)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not decode block root: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
root := [32]byte(rootBytes)
|
||||
blinded, err := s.BeaconDB.ExecutionPayloadEnvelope(ctx, root)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNotFound) {
|
||||
httputil.HandleError(w, "execution payload envelope not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
httputil.HandleError(w, "could not retrieve execution payload envelope: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
full, err := s.ExecutionReconstructor.ReconstructExecutionPayloadEnvelope(ctx, blinded)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "could not reconstruct execution payload envelope: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set(api.VersionHeader, version.String(version.Gloas))
|
||||
|
||||
if httputil.RespondWithSsz(r) {
|
||||
sszBytes, err := full.MarshalSSZ()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "could not marshal envelope to SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
httputil.WriteSsz(w, sszBytes)
|
||||
return
|
||||
}
|
||||
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
finalized := s.FinalizationFetcher.IsFinalized(ctx, root)
|
||||
|
||||
jsonEnvelope, err := structs.SignedExecutionPayloadEnvelopeFromConsensus(full)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "could not convert envelope to JSON: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
httputil.WriteJson(w, &structs.GetExecutionPayloadEnvelopeResponse{
|
||||
Version: version.String(version.Gloas),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: finalized,
|
||||
Data: jsonEnvelope,
|
||||
})
|
||||
}
|
||||
@@ -85,6 +85,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.FuluForkEpoch = 109
|
||||
config.GloasForkVersion = []byte("GloasForkVersion")
|
||||
config.GloasForkEpoch = 110
|
||||
config.MaxBuildersPerWithdrawalsSweep = 112
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
||||
config.BuilderWithdrawalPrefixByte = byte('e')
|
||||
@@ -179,6 +180,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.BuilderIndexSelfBuild = primitives.BuilderIndex(125)
|
||||
config.BuilderPaymentThresholdNumerator = 104
|
||||
config.BuilderPaymentThresholdDenominator = 105
|
||||
config.MaxRequestPayloads = 106
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -222,7 +224,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 193, len(data))
|
||||
assert.Equal(t, 195, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -306,6 +308,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("GloasForkVersion")), v)
|
||||
case "GLOAS_FORK_EPOCH":
|
||||
assert.Equal(t, "110", v)
|
||||
case "MAX_BUILDERS_PER_WITHDRAWALS_SWEEP":
|
||||
assert.Equal(t, "112", v)
|
||||
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
|
||||
assert.Equal(t, "1000", v)
|
||||
case "BLS_WITHDRAWAL_PREFIX":
|
||||
@@ -596,6 +600,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "4096", v)
|
||||
case "MAX_BLOB_COMMITMENTS_PER_BLOCK":
|
||||
assert.Equal(t, "94", v)
|
||||
case "MAX_REQUEST_PAYLOADS":
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.MaxRequestPayloads), v)
|
||||
case "MAX_BYTES_PER_TRANSACTION":
|
||||
assert.Equal(t, "95", v)
|
||||
case "MAX_EXTRA_DATA_BYTES":
|
||||
|
||||
@@ -113,6 +113,19 @@ func (s *Server) getBeaconStateV2(ctx context.Context, w http.ResponseWriter, id
|
||||
httputil.HandleError(w, errMsgStateFromConsensus+": "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
case version.Gloas:
|
||||
if strings.ToLower(string(id)) == "head" {
|
||||
st, err = s.Stater.State(ctx, []byte(strconv.FormatUint(uint64(s.HeadFetcher.HeadSlot()), 10)))
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
respSt, err = structs.BeaconStateGloasFromConsensus(st)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, errMsgStateFromConsensus+": "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
default:
|
||||
httputil.HandleError(w, "Unsupported state version", http.StatusInternalServerError)
|
||||
return
|
||||
|
||||
@@ -232,6 +232,35 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
assert.Equal(t, "123", st.Slot)
|
||||
assert.Equal(t, int(params.BeaconConfig().MinSeedLookahead+1)*int(params.BeaconConfig().SlotsPerEpoch), len(st.ProposerLookahead))
|
||||
})
|
||||
t.Run("Gloas", func(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fakeState.SetSlot(123))
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/states/{state_id}", nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBeaconStateV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBeaconStateV2Response{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, version.String(version.Gloas), resp.Version)
|
||||
st := &structs.BeaconStateGloas{}
|
||||
require.NoError(t, json.Unmarshal(resp.Data, st))
|
||||
assert.Equal(t, "123", st.Slot)
|
||||
assert.Equal(t, int(params.BeaconConfig().MinSeedLookahead+1)*int(params.BeaconConfig().SlotsPerEpoch), len(st.ProposerLookahead))
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
@@ -427,6 +456,78 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fakeState.SetSlot(123))
|
||||
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/states/{state_id}", nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBeaconStateV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, version.String(version.Electra), writer.Header().Get(api.VersionHeader))
|
||||
sszExpected, err := fakeState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
||||
})
|
||||
t.Run("Fulu", func(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconStateFulu()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fakeState.SetSlot(123))
|
||||
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/states/{state_id}", nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBeaconStateV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, version.String(version.Fulu), writer.Header().Get(api.VersionHeader))
|
||||
sszExpected, err := fakeState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
||||
})
|
||||
t.Run("Gloas", func(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fakeState.SetSlot(123))
|
||||
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/states/{state_id}", nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBeaconStateV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, version.String(version.Gloas), writer.Header().Get(api.VersionHeader))
|
||||
sszExpected, err := fakeState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetForkChoiceHeadsV2(t *testing.T) {
|
||||
|
||||
@@ -74,6 +74,11 @@ const (
|
||||
LightClientOptimisticUpdateTopic = "light_client_optimistic_update"
|
||||
// DataColumnTopic represents a data column sidecar event topic
|
||||
DataColumnTopic = "data_column_sidecar"
|
||||
// ExecutionPayloadTopic represents a new execution payload envelope event topic
|
||||
ExecutionPayloadTopic = "execution_payload_available"
|
||||
// ExecutionPayloadBidTopic represents a new execution payload bid event topic.
|
||||
// This topic is currently not triggered but is recognized to avoid client subscription errors.
|
||||
ExecutionPayloadBidTopic = "execution_payload_bid"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -118,9 +123,14 @@ var stateFeedEventTopics = map[feed.EventType]string{
|
||||
statefeed.Reorg: ChainReorgTopic,
|
||||
statefeed.BlockProcessed: BlockTopic,
|
||||
statefeed.PayloadAttributes: PayloadAttributesTopic,
|
||||
statefeed.PayloadProcessed: ExecutionPayloadTopic,
|
||||
}
|
||||
|
||||
var topicsForStateFeed = topicsForFeed(stateFeedEventTopics)
|
||||
var topicsForStateFeed = func() map[string]bool {
|
||||
m := topicsForFeed(stateFeedEventTopics)
|
||||
m[ExecutionPayloadBidTopic] = true
|
||||
return m
|
||||
}()
|
||||
var topicsForOpsFeed = topicsForFeed(opsFeedEventTopics)
|
||||
|
||||
func topicsForFeed(em map[feed.EventType]string) map[string]bool {
|
||||
@@ -466,6 +476,8 @@ func topicForEvent(event *feed.Event) string {
|
||||
return PayloadAttributesTopic
|
||||
case *operation.DataColumnReceivedData:
|
||||
return DataColumnTopic
|
||||
case *statefeed.PayloadProcessedData:
|
||||
return ExecutionPayloadTopic
|
||||
default:
|
||||
return InvalidTopic
|
||||
}
|
||||
@@ -638,6 +650,13 @@ func (s *Server) lazyReaderForEvent(ctx context.Context, event *feed.Event, topi
|
||||
}
|
||||
return jsonMarshalReader(eventName, blk)
|
||||
}, nil
|
||||
case *statefeed.PayloadProcessedData:
|
||||
return func() io.Reader {
|
||||
return jsonMarshalReader(eventName, &structs.PayloadEvent{
|
||||
Slot: fmt.Sprintf("%d", v.Slot),
|
||||
BlockRoot: hexutil.Encode(v.BlockRoot[:]),
|
||||
})
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Wrapf(errUnhandledEventData, "event data type %T unsupported", v)
|
||||
}
|
||||
|
||||
@@ -393,6 +393,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
FinalizedCheckpointTopic,
|
||||
ChainReorgTopic,
|
||||
BlockTopic,
|
||||
ExecutionPayloadTopic,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
request := topics.testHttpRequest(testSync.ctx, t)
|
||||
@@ -445,6 +446,13 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
ExecutionOptimistic: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: statefeed.PayloadProcessed,
|
||||
Data: &statefeed.PayloadProcessedData{
|
||||
Slot: 10,
|
||||
BlockRoot: [32]byte{0x9a},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -50,8 +50,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -30,7 +29,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
validator2 "github.com/OffchainLabs/prysm/v7/consensus-types/validator"
|
||||
mvslice "github.com/OffchainLabs/prysm/v7/container/multi-value-slice"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
ethpbalpha "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -40,8 +38,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// GetAggregateAttestationV2 aggregates all attestations matching the given attestation data root and slot, returning the aggregated result.
|
||||
@@ -625,11 +621,13 @@ func (s *Server) GetAttestationData(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
isPostGloas := slots.ToEpoch(s.TimeFetcher.CurrentSlot()) >= params.BeaconConfig().GloasForkEpoch
|
||||
|
||||
_, slot, ok := shared.UintFromQuery(w, r, "slot", true)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
_, committeeIndex, ok := shared.UintFromQuery(w, r, "committee_index", true)
|
||||
_, committeeIndex, ok := shared.UintFromQuery(w, r, "committee_index", !isPostGloas)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -910,47 +908,22 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
assignments, err := helpers.CommitteeAssignments(ctx, st, requestedEpoch, requestedValIndices)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not compute committee assignments: "+err.Error(), http.StatusInternalServerError)
|
||||
coreDuties, rpcErr := s.CoreService.AttesterDuties(ctx, st, requestedEpoch, requestedValIndices)
|
||||
if rpcErr != nil {
|
||||
httputil.HandleError(w, rpcErr.Err.Error(), core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
return
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, st, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get active validator count: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
committeesAtSlot := helpers.SlotCommitteeCount(activeValidatorCount)
|
||||
|
||||
duties := make([]*structs.AttesterDuty, 0, len(requestedValIndices))
|
||||
for _, index := range requestedValIndices {
|
||||
pubkey := st.PubkeyAtIndex(index)
|
||||
var zeroPubkey [fieldparams.BLSPubkeyLength]byte
|
||||
if bytes.Equal(pubkey[:], zeroPubkey[:]) {
|
||||
httputil.HandleError(w, fmt.Sprintf("Invalid validator index %d", index), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
committee := assignments[index]
|
||||
if committee == nil {
|
||||
continue
|
||||
}
|
||||
var valIndexInCommittee int
|
||||
// valIndexInCommittee will be 0 in case we don't get a match. This is a potential false positive,
|
||||
// however it's an impossible condition because every validator must be assigned to a committee.
|
||||
for cIndex, vIndex := range committee.Committee {
|
||||
if vIndex == index {
|
||||
valIndexInCommittee = cIndex
|
||||
break
|
||||
}
|
||||
}
|
||||
duties := make([]*structs.AttesterDuty, 0, len(coreDuties))
|
||||
for _, d := range coreDuties {
|
||||
duties = append(duties, &structs.AttesterDuty{
|
||||
Pubkey: hexutil.Encode(pubkey[:]),
|
||||
ValidatorIndex: strconv.FormatUint(uint64(index), 10),
|
||||
CommitteeIndex: strconv.FormatUint(uint64(committee.CommitteeIndex), 10),
|
||||
CommitteeLength: strconv.Itoa(len(committee.Committee)),
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot, 10),
|
||||
ValidatorCommitteeIndex: strconv.Itoa(valIndexInCommittee),
|
||||
Slot: strconv.FormatUint(uint64(committee.AttesterSlot), 10),
|
||||
Pubkey: hexutil.Encode(d.Pubkey[:]),
|
||||
ValidatorIndex: strconv.FormatUint(uint64(d.ValidatorIndex), 10),
|
||||
CommitteeIndex: strconv.FormatUint(uint64(d.CommitteeIndex), 10),
|
||||
CommitteeLength: strconv.FormatUint(d.CommitteeLength, 10),
|
||||
CommitteesAtSlot: strconv.FormatUint(d.CommitteesAtSlot, 10),
|
||||
ValidatorCommitteeIndex: strconv.FormatUint(d.ValidatorCommitteeIndex, 10),
|
||||
Slot: strconv.FormatUint(uint64(d.Slot), 10),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -963,7 +936,7 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
dependentRoot = r[:]
|
||||
} else {
|
||||
dependentRoot, err = attestationDependentRoot(st, requestedEpoch)
|
||||
dependentRoot, err = core.AttestationDependentRoot(st, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@@ -1021,33 +994,23 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
var assignments map[primitives.ValidatorIndex][]primitives.Slot
|
||||
dutyEpoch := requestedEpoch
|
||||
if nextEpochLookahead {
|
||||
assignments, err = helpers.ProposerAssignments(ctx, st, nextEpoch)
|
||||
} else {
|
||||
assignments, err = helpers.ProposerAssignments(ctx, st, requestedEpoch)
|
||||
dutyEpoch = nextEpoch
|
||||
}
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not compute committee assignments: "+err.Error(), http.StatusInternalServerError)
|
||||
coreDuties, rpcErr := s.CoreService.ProposerDuties(ctx, st, dutyEpoch)
|
||||
if rpcErr != nil {
|
||||
httputil.HandleError(w, rpcErr.Err.Error(), core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
return
|
||||
}
|
||||
|
||||
duties := make([]*structs.ProposerDuty, 0)
|
||||
for index, proposalSlots := range assignments {
|
||||
val, err := st.ValidatorAtIndexReadOnly(index)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get validator at index %d: %v", index, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
pubkey48 := val.PublicKey()
|
||||
pubkey := pubkey48[:]
|
||||
for _, slot := range proposalSlots {
|
||||
duties = append(duties, &structs.ProposerDuty{
|
||||
Pubkey: hexutil.Encode(pubkey),
|
||||
ValidatorIndex: strconv.FormatUint(uint64(index), 10),
|
||||
Slot: strconv.FormatUint(uint64(slot), 10),
|
||||
})
|
||||
}
|
||||
duties := make([]*structs.ProposerDuty, 0, len(coreDuties))
|
||||
for _, d := range coreDuties {
|
||||
duties = append(duties, &structs.ProposerDuty{
|
||||
Pubkey: hexutil.Encode(d.Pubkey[:]),
|
||||
ValidatorIndex: strconv.FormatUint(uint64(d.ValidatorIndex), 10),
|
||||
Slot: strconv.FormatUint(uint64(d.Slot), 10),
|
||||
})
|
||||
}
|
||||
|
||||
var dependentRoot []byte
|
||||
@@ -1059,7 +1022,7 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
dependentRoot = r[:]
|
||||
} else {
|
||||
dependentRoot, err = proposalDependentRoot(st, requestedEpoch)
|
||||
dependentRoot, err = core.ProposalDependentRoot(st, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@@ -1070,10 +1033,6 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err = sortProposerDuties(duties); err != nil {
|
||||
httputil.HandleError(w, "Could not sort proposer duties: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp := &structs.GetProposerDutiesResponse{
|
||||
DependentRoot: hexutil.Encode(dependentRoot),
|
||||
@@ -1135,7 +1094,7 @@ func (s *Server) GetSyncCommitteeDuties(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(s.TimeFetcher.CurrentSlot())
|
||||
lastValidEpoch := syncCommitteeDutiesLastValidEpoch(currentEpoch)
|
||||
lastValidEpoch := core.SyncCommitteeDutiesLastValidEpoch(currentEpoch)
|
||||
if requestedEpoch > lastValidEpoch {
|
||||
httputil.HandleError(w, fmt.Sprintf("Epoch is too far in the future, maximum valid epoch is %d", lastValidEpoch), http.StatusBadRequest)
|
||||
return
|
||||
@@ -1149,55 +1108,23 @@ func (s *Server) GetSyncCommitteeDuties(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
currentSyncCommitteeFirstEpoch, err := slots.SyncCommitteePeriodStartEpoch(startingEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get sync committee period start epoch: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
nextSyncCommitteeFirstEpoch := currentSyncCommitteeFirstEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
isCurrentCommitteeRequested := requestedEpoch < nextSyncCommitteeFirstEpoch
|
||||
var committee *ethpbalpha.SyncCommittee
|
||||
if isCurrentCommitteeRequested {
|
||||
committee, err = st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get sync committee: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
committee, err = st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get sync committee: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
committeePubkeys := make(map[[fieldparams.BLSPubkeyLength]byte][]string)
|
||||
for j, pubkey := range committee.Pubkeys {
|
||||
pubkey48 := bytesutil.ToBytes48(pubkey)
|
||||
committeePubkeys[pubkey48] = append(committeePubkeys[pubkey48], strconv.FormatUint(uint64(j), 10))
|
||||
}
|
||||
duties, vals, err := syncCommitteeDutiesAndVals(st, requestedValIndices, committeePubkeys)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
coreDuties, rpcErr := s.CoreService.SyncCommitteeDuties(ctx, st, requestedEpoch, currentEpoch, requestedValIndices)
|
||||
if rpcErr != nil {
|
||||
httputil.HandleError(w, rpcErr.Err.Error(), core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
return
|
||||
}
|
||||
|
||||
var registerSyncSubnet func(state.BeaconState, primitives.Epoch, []byte, validator2.Status) error
|
||||
if isCurrentCommitteeRequested {
|
||||
registerSyncSubnet = core.RegisterSyncSubnetCurrentPeriod
|
||||
} else {
|
||||
registerSyncSubnet = core.RegisterSyncSubnetNextPeriod
|
||||
}
|
||||
for _, v := range vals {
|
||||
pk := v.PublicKey()
|
||||
valStatus, err := rpchelpers.ValidatorStatus(v, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get validator status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := registerSyncSubnet(st, requestedEpoch, pk[:], valStatus); err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not register sync subnet for pubkey %#x", pk), http.StatusInternalServerError)
|
||||
return
|
||||
duties := make([]*structs.SyncCommitteeDuty, 0, len(coreDuties))
|
||||
for _, d := range coreDuties {
|
||||
syncIndices := make([]string, len(d.ValidatorSyncCommitteeIndices))
|
||||
for i, idx := range d.ValidatorSyncCommitteeIndices {
|
||||
syncIndices[i] = strconv.FormatUint(idx, 10)
|
||||
}
|
||||
duties = append(duties, &structs.SyncCommitteeDuty{
|
||||
Pubkey: hexutil.Encode(d.Pubkey[:]),
|
||||
ValidatorIndex: strconv.FormatUint(uint64(d.ValidatorIndex), 10),
|
||||
ValidatorSyncCommitteeIndices: syncIndices,
|
||||
})
|
||||
}
|
||||
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
@@ -1388,7 +1315,7 @@ func (s *Server) GetPTCDuties(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
dependentRoot = r[:]
|
||||
} else {
|
||||
dependentRoot, err = attestationDependentRoot(st, requestedEpoch)
|
||||
dependentRoot, err = core.AttestationDependentRoot(st, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@@ -1525,102 +1452,3 @@ func (s *Server) BeaconCommitteeSelections(w http.ResponseWriter, _ *http.Reques
|
||||
func (s *Server) SyncCommitteeSelections(w http.ResponseWriter, _ *http.Request) {
|
||||
httputil.HandleError(w, "Endpoint not implemented", 501)
|
||||
}
|
||||
|
||||
// attestationDependentRoot is get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch - 1) - 1)
|
||||
// or the genesis block root in the case of underflow.
|
||||
func attestationDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
|
||||
var dependentRootSlot primitives.Slot
|
||||
if epoch <= 1 {
|
||||
dependentRootSlot = 0
|
||||
} else {
|
||||
prevEpochStartSlot, err := slots.EpochStart(epoch.Sub(1))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not obtain epoch's start slot: %v", err)
|
||||
}
|
||||
dependentRootSlot = prevEpochStartSlot.Sub(1)
|
||||
}
|
||||
root, err := helpers.BlockRootAtSlot(s, dependentRootSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get block root")
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// proposalDependentRoot is get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch) - 1)
|
||||
// or the genesis block root in the case of underflow.
|
||||
func proposalDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
|
||||
var dependentRootSlot primitives.Slot
|
||||
if epoch == 0 {
|
||||
dependentRootSlot = 0
|
||||
} else {
|
||||
epochStartSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not obtain epoch's start slot: %v", err)
|
||||
}
|
||||
dependentRootSlot = epochStartSlot.Sub(1)
|
||||
}
|
||||
root, err := helpers.BlockRootAtSlot(s, dependentRootSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get block root")
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func syncCommitteeDutiesLastValidEpoch(currentEpoch primitives.Epoch) primitives.Epoch {
|
||||
currentSyncPeriodIndex := currentEpoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
// Return the last epoch of the next sync committee.
|
||||
// To do this we go two periods ahead to find the first invalid epoch, and then subtract 1.
|
||||
return (currentSyncPeriodIndex+2)*params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1
|
||||
}
|
||||
|
||||
// syncCommitteeDutiesAndVals takes a list of requested validator indices and the actual sync committee pubkeys.
|
||||
// It returns duties for the validator indices that are part of the sync committee.
|
||||
// Additionally, it returns read-only validator objects for these validator indices.
|
||||
func syncCommitteeDutiesAndVals(
|
||||
st state.BeaconState,
|
||||
requestedValIndices []primitives.ValidatorIndex,
|
||||
committeePubkeys map[[fieldparams.BLSPubkeyLength]byte][]string,
|
||||
) ([]*structs.SyncCommitteeDuty, []state.ReadOnlyValidator, error) {
|
||||
duties := make([]*structs.SyncCommitteeDuty, 0)
|
||||
vals := make([]state.ReadOnlyValidator, 0)
|
||||
for _, index := range requestedValIndices {
|
||||
duty := &structs.SyncCommitteeDuty{
|
||||
ValidatorIndex: strconv.FormatUint(uint64(index), 10),
|
||||
}
|
||||
valPubkey := st.PubkeyAtIndex(index)
|
||||
var zeroPubkey [fieldparams.BLSPubkeyLength]byte
|
||||
if bytes.Equal(valPubkey[:], zeroPubkey[:]) {
|
||||
return nil, nil, errors.Errorf("Invalid validator index %d", index)
|
||||
}
|
||||
duty.Pubkey = hexutil.Encode(valPubkey[:])
|
||||
indices, ok := committeePubkeys[valPubkey]
|
||||
if ok {
|
||||
duty.ValidatorSyncCommitteeIndices = indices
|
||||
duties = append(duties, duty)
|
||||
v, err := st.ValidatorAtIndexReadOnly(index)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not get validator at index %d", index)
|
||||
}
|
||||
vals = append(vals, v)
|
||||
}
|
||||
}
|
||||
return duties, vals, nil
|
||||
}
|
||||
|
||||
func sortProposerDuties(duties []*structs.ProposerDuty) error {
|
||||
var err error
|
||||
sort.Slice(duties, func(i, j int) bool {
|
||||
si, parseErr := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
return false
|
||||
}
|
||||
sj, parseErr := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
return false
|
||||
}
|
||||
return si < sj
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1744,6 +1744,85 @@ func TestGetAttestationData(t *testing.T) {
|
||||
var att ethpbalpha.AttestationData
|
||||
require.NoError(t, att.UnmarshalSSZ(writer.Body.Bytes()))
|
||||
})
|
||||
|
||||
t.Run("committeeIndex omitted after gloas", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.GloasForkEpoch = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
targetBlock := util.NewBeaconBlock()
|
||||
targetBlock.Block.Slot = 1 * params.BeaconConfig().SlotsPerEpoch
|
||||
justifiedBlock := util.NewBeaconBlock()
|
||||
justifiedBlock.Block.Slot = 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
blockRoot, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not hash beacon block")
|
||||
justifiedRoot, err := justifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root for justified block")
|
||||
slot := 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(slot))
|
||||
justifiedCheckpoint := ðpbalpha.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: justifiedRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpoint))
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
chain := &mockChain.ChainService{
|
||||
Optimistic: false,
|
||||
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
|
||||
Root: blockRoot[:],
|
||||
CurrentJustifiedCheckPoint: justifiedCheckpoint,
|
||||
TargetRoot: blockRoot,
|
||||
State: beaconState,
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
AttestationCache: cache.NewAttestationDataCache(),
|
||||
OptimisticModeFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("http://example.com?slot=%d", slot)
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttestationData(writer, request)
|
||||
|
||||
expectedResponse := &structs.GetAttestationDataResponse{
|
||||
Data: &structs.AttestationData{
|
||||
Slot: strconv.FormatUint(uint64(slot), 10),
|
||||
BeaconBlockRoot: hexutil.Encode(blockRoot[:]),
|
||||
CommitteeIndex: strconv.FormatUint(0, 10),
|
||||
Source: &structs.Checkpoint{
|
||||
Epoch: strconv.FormatUint(2, 10),
|
||||
Root: hexutil.Encode(justifiedRoot[:]),
|
||||
},
|
||||
Target: &structs.Checkpoint{
|
||||
Epoch: strconv.FormatUint(3, 10),
|
||||
Root: hexutil.Encode(blockRoot[:]),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttestationDataResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
assert.DeepEqual(t, expectedResponse, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
@@ -2009,6 +2088,7 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
BeaconDB: db,
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
t.Run("single validator", func(t *testing.T) {
|
||||
@@ -2327,6 +2407,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
@@ -2369,6 +2450,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
@@ -2412,6 +2494,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(bs.Slot())
|
||||
@@ -2451,6 +2534,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
@@ -2577,6 +2661,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
t.Run("single validator", func(t *testing.T) {
|
||||
@@ -2768,6 +2853,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
@@ -2863,6 +2949,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
BeaconDB: db,
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
|
||||
@@ -72,10 +72,24 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
|
||||
}
|
||||
}
|
||||
|
||||
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Use core service for attester and proposer duties
|
||||
currentAttesterDuties, rpcErr := vs.CoreService.AttesterDuties(ctx, s, req.Epoch, requestIndices)
|
||||
if rpcErr != nil {
|
||||
return nil, status.Errorf(core.ErrorReasonToGRPC(rpcErr.Reason), "%v", rpcErr.Err)
|
||||
}
|
||||
nextAttesterDuties, rpcErr := vs.CoreService.AttesterDuties(ctx, s, req.Epoch+1, requestIndices)
|
||||
if rpcErr != nil {
|
||||
return nil, status.Errorf(core.ErrorReasonToGRPC(rpcErr.Reason), "%v", rpcErr.Err)
|
||||
}
|
||||
proposerDuties, rpcErr := vs.CoreService.ProposerDuties(ctx, s, req.Epoch)
|
||||
if rpcErr != nil {
|
||||
return nil, status.Errorf(core.ErrorReasonToGRPC(rpcErr.Reason), "%v", rpcErr.Err)
|
||||
}
|
||||
|
||||
// Build index maps for O(1) lookup
|
||||
currentAttesterMap := buildAttesterMap(currentAttesterDuties)
|
||||
nextAttesterMap := buildAttesterMap(nextAttesterDuties)
|
||||
proposerMap := buildProposerMap(proposerDuties)
|
||||
|
||||
validatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
|
||||
nextValidatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
|
||||
@@ -97,13 +111,72 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
|
||||
continue
|
||||
}
|
||||
|
||||
currentAssignment := vs.getValidatorAssignment(meta.current, info.index)
|
||||
nextAssignment := vs.getValidatorAssignment(meta.next, info.index)
|
||||
statusEnum := assignmentStatus(s, info.index)
|
||||
|
||||
assignment, nextDuty, err := vs.buildValidatorDuty(pubKey, info.index, s, req.Epoch, meta, currentAssignment, nextAssignment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Current epoch assignment
|
||||
assignment := ðpb.DutiesV2Response_Duty{
|
||||
PublicKey: pubKey,
|
||||
ValidatorIndex: info.index,
|
||||
Status: statusEnum,
|
||||
ProposerSlots: proposerMap[info.index],
|
||||
}
|
||||
if ad, ok := currentAttesterMap[info.index]; ok {
|
||||
assignment.AttesterSlot = ad.Slot
|
||||
assignment.CommitteeIndex = ad.CommitteeIndex
|
||||
assignment.CommitteeLength = ad.CommitteeLength
|
||||
assignment.CommitteesAtSlot = ad.CommitteesAtSlot
|
||||
assignment.ValidatorCommitteeIndex = ad.ValidatorCommitteeIndex
|
||||
}
|
||||
|
||||
// Next epoch assignment
|
||||
nextDuty := ðpb.DutiesV2Response_Duty{
|
||||
PublicKey: pubKey,
|
||||
ValidatorIndex: info.index,
|
||||
Status: statusEnum,
|
||||
}
|
||||
if ad, ok := nextAttesterMap[info.index]; ok {
|
||||
nextDuty.AttesterSlot = ad.Slot
|
||||
nextDuty.CommitteeIndex = ad.CommitteeIndex
|
||||
nextDuty.CommitteeLength = ad.CommitteeLength
|
||||
nextDuty.CommitteesAtSlot = ad.CommitteesAtSlot
|
||||
nextDuty.ValidatorCommitteeIndex = ad.ValidatorCommitteeIndex
|
||||
}
|
||||
|
||||
// Sync committee flags
|
||||
if coreTime.HigherEqualThanAltairVersionAndEpoch(s, req.Epoch) {
|
||||
inSync, err := helpers.IsCurrentPeriodSyncCommittee(s, info.index)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)
|
||||
}
|
||||
assignment.IsSyncCommittee = inSync
|
||||
nextDuty.IsSyncCommittee = inSync
|
||||
if inSync {
|
||||
if err := core.RegisterSyncSubnetCurrentPeriodProto(s, req.Epoch, pubKey, statusEnum); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not register sync subnet current period: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Next epoch sync committee duty is assigned with next period sync committee only during
|
||||
// sync period epoch boundary (ie. EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1). Else wise
|
||||
// next epoch sync committee duty is the same as current epoch.
|
||||
nextEpoch := req.Epoch.Add(1)
|
||||
stateEpoch := coreTime.CurrentEpoch(s)
|
||||
n := slots.SyncCommitteePeriod(nextEpoch)
|
||||
c := slots.SyncCommitteePeriod(stateEpoch)
|
||||
if n > c {
|
||||
nextInSync, err := helpers.IsNextPeriodSyncCommittee(s, info.index)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
|
||||
}
|
||||
nextDuty.IsSyncCommittee = nextInSync
|
||||
if nextInSync {
|
||||
if err := core.RegisterSyncSubnetNextPeriodProto(s, req.Epoch, pubKey, statusEnum); err != nil {
|
||||
log.WithError(err).Warn("Could not register sync subnet next period")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
validatorAssignments = append(validatorAssignments, assignment)
|
||||
nextValidatorAssignments = append(nextValidatorAssignments, nextDuty)
|
||||
}
|
||||
@@ -150,155 +223,20 @@ func (vs *Server) stateForEpoch(ctx context.Context, s state.BeaconState, reqEpo
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// dutiesMetadata bundles together related data needed for duty
|
||||
// construction.
|
||||
type dutiesMetadata struct {
|
||||
current *metadata
|
||||
next *metadata
|
||||
// buildAttesterMap creates a map from validator index to attester duty for O(1) lookup.
|
||||
func buildAttesterMap(duties []*core.AttesterDutyResult) map[primitives.ValidatorIndex]*core.AttesterDutyResult {
|
||||
m := make(map[primitives.ValidatorIndex]*core.AttesterDutyResult, len(duties))
|
||||
for _, d := range duties {
|
||||
m[d.ValidatorIndex] = d
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type metadata struct {
|
||||
committeesAtSlot uint64
|
||||
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
|
||||
committeeAssignments map[primitives.ValidatorIndex]*helpers.CommitteeAssignment
|
||||
}
|
||||
|
||||
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*dutiesMetadata, error) {
|
||||
meta := &dutiesMetadata{}
|
||||
var err error
|
||||
meta.current, err = loadMetadata(ctx, s, reqEpoch, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// note: we only set the proposer slots for the current assignment and not the next epoch assignment
|
||||
meta.current.proposalSlots, err = helpers.ProposerAssignments(ctx, s, reqEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute proposer slots: %v", err)
|
||||
}
|
||||
|
||||
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*metadata, error) {
|
||||
meta := &metadata{}
|
||||
|
||||
if err := helpers.VerifyAssignmentEpoch(reqEpoch, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, reqEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get active validator count: %v", err)
|
||||
}
|
||||
meta.committeesAtSlot = helpers.SlotCommitteeCount(activeValidatorCount)
|
||||
|
||||
// Use CommitteeAssignments which only computes committees for requested validators
|
||||
meta.committeeAssignments, err = helpers.CommitteeAssignments(ctx, s, reqEpoch, requestIndices)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
|
||||
}
|
||||
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// findValidatorIndexInCommittee finds the position of a validator in a committee.
|
||||
func findValidatorIndexInCommittee(committee []primitives.ValidatorIndex, validatorIndex primitives.ValidatorIndex) uint64 {
|
||||
for i, vIdx := range committee {
|
||||
if vIdx == validatorIndex {
|
||||
return uint64(i)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// getValidatorAssignment retrieves the assignment for a validator from CommitteeAssignments.
|
||||
func (vs *Server) getValidatorAssignment(meta *metadata, validatorIndex primitives.ValidatorIndex) *helpers.LiteAssignment {
|
||||
if assignment, exists := meta.committeeAssignments[validatorIndex]; exists {
|
||||
return &helpers.LiteAssignment{
|
||||
AttesterSlot: assignment.AttesterSlot,
|
||||
CommitteeIndex: assignment.CommitteeIndex,
|
||||
CommitteeLength: uint64(len(assignment.Committee)),
|
||||
ValidatorCommitteeIndex: findValidatorIndexInCommittee(assignment.Committee, validatorIndex),
|
||||
}
|
||||
}
|
||||
return &helpers.LiteAssignment{}
|
||||
}
|
||||
|
||||
// buildValidatorDuty builds both current‑epoch and next‑epoch V2 duty objects
|
||||
// for a single validator index.
|
||||
func (vs *Server) buildValidatorDuty(
|
||||
pubKey []byte,
|
||||
idx primitives.ValidatorIndex,
|
||||
s state.BeaconState,
|
||||
reqEpoch primitives.Epoch,
|
||||
meta *dutiesMetadata,
|
||||
currentAssignment *helpers.LiteAssignment,
|
||||
nextAssignment *helpers.LiteAssignment,
|
||||
) (*ethpb.DutiesV2Response_Duty, *ethpb.DutiesV2Response_Duty, error) {
|
||||
assignment := ðpb.DutiesV2Response_Duty{PublicKey: pubKey}
|
||||
nextDuty := ðpb.DutiesV2Response_Duty{PublicKey: pubKey}
|
||||
|
||||
statusEnum := assignmentStatus(s, idx)
|
||||
assignment.ValidatorIndex = idx
|
||||
assignment.Status = statusEnum
|
||||
assignment.CommitteesAtSlot = meta.current.committeesAtSlot
|
||||
assignment.ProposerSlots = meta.current.proposalSlots[idx]
|
||||
populateCommitteeFields(assignment, currentAssignment)
|
||||
|
||||
nextDuty.ValidatorIndex = idx
|
||||
nextDuty.Status = statusEnum
|
||||
nextDuty.CommitteesAtSlot = meta.next.committeesAtSlot
|
||||
populateCommitteeFields(nextDuty, nextAssignment)
|
||||
|
||||
// Sync committee flags
|
||||
if coreTime.HigherEqualThanAltairVersionAndEpoch(s, reqEpoch) {
|
||||
inSync, err := helpers.IsCurrentPeriodSyncCommittee(s, idx)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)
|
||||
}
|
||||
assignment.IsSyncCommittee = inSync
|
||||
nextDuty.IsSyncCommittee = inSync
|
||||
if inSync {
|
||||
if err := core.RegisterSyncSubnetCurrentPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
|
||||
return nil, nil, status.Errorf(codes.Internal, "Could not register sync subnet current period: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Next epoch sync committee duty is assigned with next period sync committee only during
|
||||
// sync period epoch boundary (ie. EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1). Else wise
|
||||
// next epoch sync committee duty is the same as current epoch.
|
||||
nextEpoch := reqEpoch + 1
|
||||
currentEpoch := coreTime.CurrentEpoch(s)
|
||||
n := slots.SyncCommitteePeriod(nextEpoch)
|
||||
c := slots.SyncCommitteePeriod(currentEpoch)
|
||||
if n > c {
|
||||
nextInSync, err := helpers.IsNextPeriodSyncCommittee(s, idx)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
|
||||
}
|
||||
nextDuty.IsSyncCommittee = nextInSync
|
||||
if nextInSync {
|
||||
if err := core.RegisterSyncSubnetNextPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
|
||||
log.WithError(err).Warn("Could not register sync subnet next period")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return assignment, nextDuty, nil
|
||||
}
|
||||
|
||||
func populateCommitteeFields(duty *ethpb.DutiesV2Response_Duty, la *helpers.LiteAssignment) {
|
||||
if duty == nil || la == nil {
|
||||
// should never be the case as previous functions should set
|
||||
return
|
||||
}
|
||||
duty.CommitteeLength = la.CommitteeLength
|
||||
duty.CommitteeIndex = la.CommitteeIndex
|
||||
duty.ValidatorCommitteeIndex = la.ValidatorCommitteeIndex
|
||||
duty.AttesterSlot = la.AttesterSlot
|
||||
// buildProposerMap creates a map from validator index to proposal slots for O(1) lookup.
|
||||
func buildProposerMap(duties []*core.ProposerDutyResult) map[primitives.ValidatorIndex][]primitives.Slot {
|
||||
m := make(map[primitives.ValidatorIndex][]primitives.Slot)
|
||||
for _, d := range duties {
|
||||
m[d.ValidatorIndex] = append(m[d.ValidatorIndex], d.Slot)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
mockExecution "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -53,6 +54,7 @@ func TestGetDutiesV2_OK(t *testing.T) {
|
||||
ForkchoiceFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -140,6 +142,7 @@ func TestGetAltairDutiesV2_SyncCommitteeOK(t *testing.T) {
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -247,6 +250,7 @@ func TestGetBellatrixDutiesV2_SyncCommitteeOK(t *testing.T) {
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -341,6 +345,7 @@ func TestGetAltairDutiesV2_UnknownPubkey(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
DepositFetcher: depositCache,
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
unknownPubkey := bytesutil.PadTo([]byte{'u'}, 48)
|
||||
@@ -387,6 +392,7 @@ func TestGetDutiesV2_StateAdvancement(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
ForkchoiceFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
// Verify state processing occurs
|
||||
@@ -442,6 +448,7 @@ func TestGetDutiesV2_CurrentEpoch_ShouldNotFail(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -482,6 +489,7 @@ func TestGetDutiesV2_MultipleKeys_OK(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
pubkey0 := deposits[0].Data.PublicKey
|
||||
@@ -540,6 +548,7 @@ func TestGetDutiesV2_NextSyncCommitteePeriod(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
ForkchoiceFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{},
|
||||
}
|
||||
|
||||
res, err := vs.GetDutiesV2(t.Context(), req)
|
||||
@@ -559,35 +568,3 @@ func TestGetDutiesV2_SyncNotReady(t *testing.T) {
|
||||
_, err := vs.GetDutiesV2(t.Context(), ðpb.DutiesRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
}
|
||||
|
||||
func TestGetValidatorAssignment(t *testing.T) {
|
||||
start := primitives.Slot(100)
|
||||
|
||||
// Test using CommitteeAssignments
|
||||
committeeAssignments := map[primitives.ValidatorIndex]*helpers.CommitteeAssignment{
|
||||
5: {
|
||||
Committee: []primitives.ValidatorIndex{4, 5, 6},
|
||||
AttesterSlot: start + 1,
|
||||
CommitteeIndex: primitives.CommitteeIndex(0),
|
||||
},
|
||||
}
|
||||
|
||||
meta := &metadata{
|
||||
committeeAssignments: committeeAssignments,
|
||||
}
|
||||
|
||||
vs := &Server{}
|
||||
|
||||
// Test existing validator
|
||||
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(5))
|
||||
require.NotNil(t, assignment)
|
||||
assert.Equal(t, start+1, assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
assert.Equal(t, uint64(1), assignment.ValidatorCommitteeIndex)
|
||||
|
||||
// Test non-existent validator should return empty assignment
|
||||
assignment = vs.getValidatorAssignment(meta, primitives.ValidatorIndex(99))
|
||||
require.NotNil(t, assignment)
|
||||
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
}
|
||||
|
||||
@@ -635,7 +635,11 @@ func (vs *Server) GetFeeRecipientByPubKey(ctx context.Context, request *ethpb.Fe
|
||||
// computeStateRoot computes the state root after a block has been processed through a state transition and
|
||||
// returns it to the validator client.
|
||||
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedBeaconBlock) ([]byte, error) {
|
||||
beaconState, err := vs.StateGen.StateByRoot(ctx, block.Block().ParentRoot())
|
||||
roblock, err := blocks.NewROBlockWithRoot(block, [32]byte{}) // root is not used
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create ROBlock")
|
||||
}
|
||||
beaconState, err := vs.BlockReceiver.GetPrestateToPropose(ctx, roblock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve beacon state")
|
||||
}
|
||||
|
||||
@@ -273,6 +273,13 @@ var errNoTerminalBlockHash = errors.New("no terminal block hash")
|
||||
//
|
||||
// Otherwise, the terminal block hash is fetched based on the slot's time, and an error is returned if it doesn't exist.
|
||||
func (vs *Server) getParentBlockHash(ctx context.Context, st state.BeaconState, slot primitives.Slot) ([]byte, error) {
|
||||
if st.Version() >= version.Gloas {
|
||||
latestBlockHash, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest block hash")
|
||||
}
|
||||
return latestBlockHash[:], nil
|
||||
}
|
||||
if st.Version() >= version.Capella {
|
||||
return getParentBlockHashPostCapella(st)
|
||||
}
|
||||
|
||||
@@ -176,6 +176,20 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_getParentBlockHash_Gloas(t *testing.T) {
|
||||
want := bytesutil.ToBytes32([]byte("gloas-parent-hash"))
|
||||
st, err := util.NewBeaconStateGloas(func(state *ethpb.BeaconStateGloas) error {
|
||||
state.LatestBlockHash = want[:]
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
vs := &Server{}
|
||||
got, err := vs.getParentBlockHash(context.Background(), st, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want[:], got)
|
||||
}
|
||||
|
||||
func TestServer_getExecutionPayloadContextTimeout(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
nonTransitionSt, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
|
||||
@@ -190,7 +190,13 @@ func (vs *Server) PublishExecutionPayloadEnvelope(
|
||||
return nil, status.Errorf(codes.Internal, "failed to broadcast execution payload envelope: %v", err)
|
||||
}
|
||||
|
||||
// TODO: Receive the envelope locally following the broadcastReceiveBlock pattern.
|
||||
roSigned, err := consensusblocks.WrappedROSignedExecutionPayloadEnvelope(req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "could not wrap signed envelope: %v", err)
|
||||
}
|
||||
if err := vs.ExecutionPayloadEnvelopeReceiver.ReceiveExecutionPayloadEnvelope(ctx, roSigned); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to receive execution payload envelope: %v", err)
|
||||
}
|
||||
|
||||
// TODO: Build and broadcast data column sidecars from the cached blobs bundle.
|
||||
// In Gloas, blob data is delivered alongside the execution payload envelope
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
mockstategen "github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen/mock"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -235,16 +237,30 @@ func TestPublishExecutionPayloadEnvelope_Success(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
broadcaster := &mockp2p.MockBroadcaster{}
|
||||
receiver := &mockExecutionPayloadEnvelopeReceiver{}
|
||||
vs := &Server{
|
||||
P2P: broadcaster,
|
||||
P2P: broadcaster,
|
||||
ExecutionPayloadEnvelopeReceiver: receiver,
|
||||
}
|
||||
|
||||
req := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: ðpb.ExecutionPayloadEnvelope{
|
||||
Slot: 1,
|
||||
BuilderIndex: 0,
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
Payload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
},
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||
Slot: 1,
|
||||
BuilderIndex: 0,
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
@@ -254,4 +270,14 @@ func TestPublishExecutionPayloadEnvelope_Success(t *testing.T) {
|
||||
require.NotNil(t, resp)
|
||||
require.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
require.Equal(t, 1, len(broadcaster.BroadcastMessages))
|
||||
require.Equal(t, 1, receiver.calls)
|
||||
}
|
||||
|
||||
type mockExecutionPayloadEnvelopeReceiver struct {
|
||||
calls int
|
||||
}
|
||||
|
||||
func (m *mockExecutionPayloadEnvelopeReceiver) ReceiveExecutionPayloadEnvelope(_ context.Context, _ interfaces.ROSignedExecutionPayloadEnvelope) error {
|
||||
m.calls++
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -94,6 +94,14 @@ func TestServer_GetBeaconBlock_Phase0(t *testing.T) {
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := getProposerServer(ctx, db, beaconState, parentRoot[:])
|
||||
// Use a separate mock for BlockReceiver with an independent state copy.
|
||||
// This mirrors production where computeStateRoot calls StateByRoot (fresh from DB),
|
||||
// not the same head state object mutated by the getSlashings goroutine.
|
||||
proposerServer.BlockReceiver = &mock.ChainService{
|
||||
State: beaconState.Copy(),
|
||||
Root: parentRoot[:],
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
@@ -405,6 +413,14 @@ func TestServer_GetBeaconBlock_Capella(t *testing.T) {
|
||||
}
|
||||
|
||||
proposerServer := getProposerServer(ctx, db, beaconState, parentRoot[:])
|
||||
advancedState := beaconState.Copy()
|
||||
advancedState, err = transition.ProcessSlots(ctx, advancedState, capellaSlot)
|
||||
require.NoError(t, err)
|
||||
proposerServer.BlockReceiver = &mock.ChainService{
|
||||
State: advancedState,
|
||||
Root: parentRoot[:],
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(payload)
|
||||
require.NoError(t, err)
|
||||
proposerServer.ExecutionEngineCaller = &mockExecution.EngineClient{
|
||||
@@ -528,6 +544,14 @@ func TestServer_GetBeaconBlock_Deneb(t *testing.T) {
|
||||
blobs := [][]byte{[]byte("blob"), []byte("blob1"), []byte("blob2")}
|
||||
bundle := &enginev1.BlobsBundle{KzgCommitments: kc, Proofs: proofs, Blobs: blobs}
|
||||
proposerServer := getProposerServer(ctx, db, beaconState, parentRoot[:])
|
||||
advancedState := beaconState.Copy()
|
||||
advancedState, err = transition.ProcessSlots(ctx, advancedState, denebSlot)
|
||||
require.NoError(t, err)
|
||||
proposerServer.BlockReceiver = &mock.ChainService{
|
||||
State: advancedState,
|
||||
Root: parentRoot[:],
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
}
|
||||
proposerServer.ExecutionEngineCaller = &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
GetPayloadResponse: &blocks.GetPayloadResponse{
|
||||
@@ -661,6 +685,14 @@ func TestServer_GetBeaconBlock_Electra(t *testing.T) {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
proposerServer := getProposerServer(ctx, db, beaconState, parentRoot[:])
|
||||
advancedState := beaconState.Copy()
|
||||
advancedState, err = transition.ProcessSlots(ctx, advancedState, electraSlot)
|
||||
require.NoError(t, err)
|
||||
proposerServer.BlockReceiver = &mock.ChainService{
|
||||
State: advancedState,
|
||||
Root: parentRoot[:],
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(payload)
|
||||
require.NoError(t, err)
|
||||
proposerServer.ExecutionEngineCaller = &mockExecution.EngineClient{
|
||||
@@ -790,6 +822,14 @@ func TestServer_GetBeaconBlock_Fulu(t *testing.T) {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
proposerServer := getProposerServer(ctx, db, beaconState, parentRoot[:])
|
||||
advancedState := beaconState.Copy()
|
||||
advancedState, err = transition.ProcessSlots(ctx, advancedState, fuluSlot)
|
||||
require.NoError(t, err)
|
||||
proposerServer.BlockReceiver = &mock.ChainService{
|
||||
State: advancedState,
|
||||
Root: parentRoot[:],
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(payload)
|
||||
require.NoError(t, err)
|
||||
proposerServer.ExecutionEngineCaller = &mockExecution.EngineClient{
|
||||
@@ -1291,6 +1331,11 @@ func TestProposer_ComputeStateRoot_OK(t *testing.T) {
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
BlockReceiver: &mock.ChainService{
|
||||
State: beaconState.Copy(),
|
||||
Root: parentRoot[:],
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
}
|
||||
req := util.NewBeaconBlock()
|
||||
req.Block.ProposerIndex = 84
|
||||
@@ -1344,7 +1389,8 @@ func TestHandleStateRootError_IncrementsAttempts(t *testing.T) {
|
||||
|
||||
stateGen := stategen.New(db, doublylinkedtree.New())
|
||||
vs := &Server{
|
||||
StateGen: stateGen,
|
||||
StateGen: stateGen,
|
||||
BlockReceiver: &mock.ChainService{State: beaconState},
|
||||
}
|
||||
|
||||
// Create a block that will trigger retries
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user