mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-16 16:58:06 -05:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9de4eec82a |
@@ -2,10 +2,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bid.go",
|
||||
"pending_payment.go",
|
||||
],
|
||||
srcs = ["bid.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
@@ -15,7 +12,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -26,15 +22,10 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"bid_test.go",
|
||||
"pending_payment_test.go",
|
||||
],
|
||||
srcs = ["bid_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def process_builder_pending_payments(state: BeaconState) -> None:
|
||||
//
|
||||
// quorum = get_builder_payment_quorum_threshold(state)
|
||||
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
||||
// if payment.weight >= quorum:
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
//
|
||||
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
||||
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
// state.builder_pending_payments = old_payments + new_payments
|
||||
func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||
quorum, err := builderQuorumThreshold(state)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute builder payment quorum threshold")
|
||||
}
|
||||
|
||||
payments, err := state.BuilderPendingPayments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get builder pending payments")
|
||||
}
|
||||
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
var withdrawals []*ethpb.BuilderPendingWithdrawal
|
||||
for _, payment := range payments[:slotsPerEpoch] {
|
||||
if quorum > payment.Weight {
|
||||
continue
|
||||
}
|
||||
withdrawals = append(withdrawals, payment.Withdrawal)
|
||||
}
|
||||
|
||||
if err := state.AppendBuilderPendingWithdrawals(withdrawals); err != nil {
|
||||
return errors.Wrap(err, "could not append builder pending withdrawals")
|
||||
}
|
||||
|
||||
if err := state.RotateBuilderPendingPayments(); err != nil {
|
||||
return errors.Wrap(err, "could not rotate builder pending payments")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// builderQuorumThreshold calculates the quorum threshold for builder payments.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
||||
//
|
||||
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
||||
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
||||
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
|
||||
activeBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get total active balance")
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
slotsPerEpoch := uint64(cfg.SlotsPerEpoch)
|
||||
numerator := cfg.BuilderPaymentThresholdNumerator
|
||||
denominator := cfg.BuilderPaymentThresholdDenominator
|
||||
|
||||
activeBalancePerSlot := activeBalance / slotsPerEpoch
|
||||
quorum := (activeBalancePerSlot * numerator) / denominator
|
||||
return primitives.Gwei(quorum), nil
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestBuilderQuorumThreshold(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := builderQuorumThreshold(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
|
||||
perSlot := total / uint64(cfg.SlotsPerEpoch)
|
||||
want := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
|
||||
require.Equal(t, primitives.Gwei(want), got)
|
||||
}
|
||||
|
||||
func TestProcessBuilderPendingPayments(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
buildPayments := func(weights ...primitives.Gwei) []*ethpb.BuilderPendingPayment {
|
||||
p := make([]*ethpb.BuilderPendingPayment, 2*int(cfg.SlotsPerEpoch))
|
||||
for i := range p {
|
||||
p[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
|
||||
}
|
||||
}
|
||||
for i, w := range weights {
|
||||
p[i].Weight = w
|
||||
p[i].Withdrawal.Amount = 1
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
}
|
||||
pbSt, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
|
||||
perSlot := total / uint64(cfg.SlotsPerEpoch)
|
||||
quorum := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
|
||||
slotsPerEpoch := int(cfg.SlotsPerEpoch)
|
||||
|
||||
t.Run("append qualifying withdrawals", func(t *testing.T) {
|
||||
payments := buildPayments(primitives.Gwei(quorum+1), primitives.Gwei(quorum+2))
|
||||
st := &testProcessState{BeaconState: pbSt, payments: payments}
|
||||
|
||||
require.NoError(t, ProcessBuilderPendingPayments(st))
|
||||
require.Equal(t, 2, len(st.withdrawals))
|
||||
require.Equal(t, payments[0].Withdrawal, st.withdrawals[0])
|
||||
require.Equal(t, payments[1].Withdrawal, st.withdrawals[1])
|
||||
|
||||
require.Equal(t, 2*slotsPerEpoch, len(st.payments))
|
||||
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
|
||||
require.Equal(t, primitives.Gwei(0), st.payments[i].Weight)
|
||||
require.Equal(t, primitives.Gwei(0), st.payments[i].Withdrawal.Amount)
|
||||
require.Equal(t, 20, len(st.payments[i].Withdrawal.FeeRecipient))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no withdrawals when below quorum", func(t *testing.T) {
|
||||
payments := buildPayments(primitives.Gwei(quorum - 1))
|
||||
st := &testProcessState{BeaconState: pbSt, payments: payments}
|
||||
|
||||
require.NoError(t, ProcessBuilderPendingPayments(st))
|
||||
require.Equal(t, 0, len(st.withdrawals))
|
||||
})
|
||||
}
|
||||
|
||||
type testProcessState struct {
|
||||
state.BeaconState
|
||||
payments []*ethpb.BuilderPendingPayment
|
||||
withdrawals []*ethpb.BuilderPendingWithdrawal
|
||||
}
|
||||
|
||||
func (t *testProcessState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
|
||||
return t.payments, nil
|
||||
}
|
||||
|
||||
func (t *testProcessState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
|
||||
t.withdrawals = append(t.withdrawals, withdrawals...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testProcessState) RotateBuilderPendingPayments() error {
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
rotated := slices.Clone(t.payments[slotsPerEpoch:])
|
||||
for range slotsPerEpoch {
|
||||
rotated = append(rotated, ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
})
|
||||
}
|
||||
t.payments = rotated
|
||||
return nil
|
||||
}
|
||||
@@ -1053,10 +1053,6 @@ func (s *Store) getStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(slot) < s.getOffset() {
|
||||
return nil, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
st, err := s.stateByDiff(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1074,10 +1070,6 @@ func (s *Store) hasStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if uint64(slot) < s.getOffset() {
|
||||
return false, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
stateLvl := computeLevel(s.getOffset(), slot)
|
||||
return stateLvl != -1, nil
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ const (
|
||||
*/
|
||||
|
||||
// SlotInDiffTree returns whether the given slot is a saving point in the diff tree.
|
||||
// If it is, it also returns the offset and level in the tree.
|
||||
// It it is, it also returns the offset and level in the tree.
|
||||
func (s *Store) SlotInDiffTree(slot primitives.Slot) (uint64, int, error) {
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
|
||||
@@ -25,7 +25,7 @@ func newStateDiffCache(s *Store) (*stateDiffCache, error) {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get(offsetKey)
|
||||
offsetBytes := bucket.Get([]byte("offset"))
|
||||
if offsetBytes == nil {
|
||||
return errors.New("state diff cache: offset not found")
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
var (
|
||||
offsetKey = []byte("offset")
|
||||
ErrSlotBeforeOffset = errors.New("slot is before state-diff root offset")
|
||||
ErrSlotBeforeOffset = errors.New("slot is before root offset")
|
||||
)
|
||||
|
||||
func makeKeyForStateDiffTree(level int, slot uint64) []byte {
|
||||
@@ -73,9 +73,6 @@ func (s *Store) getAnchorState(offset uint64, lvl int, slot primitives.Slot) (an
|
||||
|
||||
// computeLevel computes the level in the diff tree. Returns -1 in case slot should not be in tree.
|
||||
func computeLevel(offset uint64, slot primitives.Slot) int {
|
||||
if uint64(slot) < offset {
|
||||
return -1
|
||||
}
|
||||
rel := uint64(slot) - offset
|
||||
for i, exp := range flags.Get().StateDiffExponents {
|
||||
if exp < 2 || exp >= 64 {
|
||||
|
||||
@@ -43,12 +43,8 @@ func TestStateDiff_ComputeLevel(t *testing.T) {
|
||||
|
||||
offset := db.getOffset()
|
||||
|
||||
// should be -1. slot < offset
|
||||
lvl := computeLevel(10, primitives.Slot(9))
|
||||
require.Equal(t, -1, lvl)
|
||||
|
||||
// 2 ** 21
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
|
||||
lvl := computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
|
||||
require.Equal(t, 0, lvl)
|
||||
|
||||
// 2 ** 21 * 3
|
||||
|
||||
@@ -1395,23 +1395,6 @@ func TestStore_CanSaveRetrieveStateUsingStateDiff(t *testing.T) {
|
||||
require.IsNil(t, readSt)
|
||||
})
|
||||
|
||||
t.Run("slot before offset", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 10)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := bytesutil.ToBytes32([]byte{'A'})
|
||||
ss := ðpb.StateSummary{Slot: 9, Root: r[:]}
|
||||
err = db.SaveStateSummary(t.Context(), ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := db.getStateUsingStateDiff(t.Context(), r)
|
||||
require.ErrorIs(t, err, ErrSlotBeforeOffset)
|
||||
require.IsNil(t, st)
|
||||
})
|
||||
|
||||
t.Run("Full state snapshot", func(t *testing.T) {
|
||||
t.Run("using state summary", func(t *testing.T) {
|
||||
for v := range version.All() {
|
||||
@@ -1644,21 +1627,4 @@ func TestStore_HasStateUsingStateDiff(t *testing.T) {
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("slot before offset", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 10)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := bytesutil.ToBytes32([]byte{'B'})
|
||||
ss := ðpb.StateSummary{Slot: 0, Root: r[:]}
|
||||
err = db.SaveStateSummary(t.Context(), ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
hasState, err := db.hasStateUsingStateDiff(t.Context(), r)
|
||||
require.ErrorIs(t, err, ErrSlotBeforeOffset)
|
||||
require.Equal(t, false, hasState)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -170,8 +170,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.SyncMessageDueBPS = 103
|
||||
config.BuilderWithdrawalPrefixByte = byte('b')
|
||||
config.BuilderIndexSelfBuild = primitives.BuilderIndex(125)
|
||||
config.BuilderPaymentThresholdNumerator = 104
|
||||
config.BuilderPaymentThresholdDenominator = 105
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -212,7 +210,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 180, len(data))
|
||||
assert.Equal(t, 178, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -590,10 +588,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "102", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "103", v)
|
||||
case "BUILDER_PAYMENT_THRESHOLD_NUMERATOR":
|
||||
assert.Equal(t, "104", v)
|
||||
case "BUILDER_PAYMENT_THRESHOLD_DENOMINATOR":
|
||||
assert.Equal(t, "105", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
blobSchedule, ok := v.([]any)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
type writeOnlyGloasFields interface {
|
||||
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
|
||||
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
|
||||
RotateBuilderPendingPayments() error
|
||||
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
|
||||
}
|
||||
|
||||
type readOnlyGloasFields interface {
|
||||
@@ -18,5 +16,4 @@ type readOnlyGloasFields interface {
|
||||
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
|
||||
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
|
||||
LatestBlockHash() ([32]byte, error)
|
||||
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
||||
}
|
||||
|
||||
@@ -135,15 +135,3 @@ func (b *BeaconState) builderPendingBalanceToWithdraw(builderIndex primitives.Bu
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// BuilderPendingPayments returns a copy of the builder pending payments.
|
||||
func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
|
||||
if b.version < version.Gloas {
|
||||
return nil, errNotSupported("BuilderPendingPayments", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.builderPendingPaymentsVal(), nil
|
||||
}
|
||||
|
||||
@@ -157,12 +157,3 @@ func TestBuilderHelpers(t *testing.T) {
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
|
||||
_, err = st.BuilderPendingPayments()
|
||||
require.ErrorContains(t, "BuilderPendingPayments", err)
|
||||
}
|
||||
|
||||
@@ -725,13 +725,3 @@ func ProtobufBeaconStateFulu(s any) (*ethpb.BeaconStateFulu, error) {
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
// ProtobufBeaconStateGloas transforms an input into beacon state Gloas in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateGloas(s any) (*ethpb.BeaconStateGloas, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateGloas)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.BeaconStateGloas")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
@@ -4,75 +4,12 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
)
|
||||
|
||||
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
|
||||
// front and appending slots per epoch empty payments to the end.
|
||||
// This implements: state.builder_pending_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:] + [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
func (b *BeaconState) RotateBuilderPendingPayments() error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("RotateBuilderPendingPayments", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
copy(b.builderPendingPayments[:slotsPerEpoch], b.builderPendingPayments[slotsPerEpoch:2*slotsPerEpoch])
|
||||
|
||||
for i := slotsPerEpoch; i < primitives.Slot(len(b.builderPendingPayments)); i++ {
|
||||
b.builderPendingPayments[i] = emptyPayment()
|
||||
}
|
||||
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
b.rebuildTrie[types.BuilderPendingPayments] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendBuilderPendingWithdrawals appends builder pending withdrawals to the beacon state.
|
||||
// If the withdrawals slice is shared, it copies the slice first to preserve references.
|
||||
func (b *BeaconState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("AppendBuilderPendingWithdrawals", b.version)
|
||||
}
|
||||
|
||||
if len(withdrawals) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
pendingWithdrawals := b.builderPendingWithdrawals
|
||||
if b.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs() > 1 {
|
||||
pendingWithdrawals = make([]*ethpb.BuilderPendingWithdrawal, 0, len(b.builderPendingWithdrawals)+len(withdrawals))
|
||||
pendingWithdrawals = append(pendingWithdrawals, b.builderPendingWithdrawals...)
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals].MinusRef()
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.builderPendingWithdrawals = append(pendingWithdrawals, withdrawals...)
|
||||
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
|
||||
return nil
|
||||
}
|
||||
|
||||
func emptyPayment() *ethpb.BuilderPendingPayment {
|
||||
return ðpb.BuilderPendingPayment{
|
||||
Weight: 0,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: 0,
|
||||
BuilderIndex: 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SetExecutionPayloadBid sets the latest execution payload bid in the state.
|
||||
func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error {
|
||||
if b.version < version.Gloas {
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
@@ -140,110 +138,3 @@ func TestSetBuilderPendingPayment(t *testing.T) {
|
||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
|
||||
})
|
||||
}
|
||||
|
||||
func TestRotateBuilderPendingPayments(t *testing.T) {
|
||||
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
|
||||
for i := range payments {
|
||||
idx := uint64(i)
|
||||
payments[i] = ðpb.BuilderPendingPayment{
|
||||
Weight: primitives.Gwei(idx * 100e9),
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: primitives.Gwei(idx * 1e9),
|
||||
BuilderIndex: primitives.BuilderIndex(idx + 100),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
statePb, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
BuilderPendingPayments: payments,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, ok := statePb.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
oldPayments, err := st.BuilderPendingPayments()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.RotateBuilderPendingPayments())
|
||||
|
||||
newPayments, err := st.BuilderPendingPayments()
|
||||
require.NoError(t, err)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := range slotsPerEpoch {
|
||||
require.DeepEqual(t, oldPayments[slotsPerEpoch+i], newPayments[i])
|
||||
}
|
||||
|
||||
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
|
||||
payment := newPayments[i]
|
||||
require.Equal(t, primitives.Gwei(0), payment.Weight)
|
||||
require.Equal(t, 20, len(payment.Withdrawal.FeeRecipient))
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
require.Equal(t, primitives.BuilderIndex(0), payment.Withdrawal.BuilderIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRotateBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
||||
st := &BeaconState{version: version.Electra}
|
||||
err := st.RotateBuilderPendingPayments()
|
||||
require.ErrorContains(t, "RotateBuilderPendingPayments", err)
|
||||
}
|
||||
|
||||
func TestAppendBuilderPendingWithdrawal_CopyOnWrite(t *testing.T) {
|
||||
wd := ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: 1,
|
||||
BuilderIndex: 2,
|
||||
}
|
||||
statePb, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{wd},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
st, ok := statePb.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
copied := st.Copy().(*BeaconState)
|
||||
require.Equal(t, uint(2), st.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
|
||||
appended := ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: 4,
|
||||
BuilderIndex: 5,
|
||||
}
|
||||
require.NoError(t, copied.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{appended}))
|
||||
|
||||
require.Equal(t, 1, len(st.builderPendingWithdrawals))
|
||||
require.Equal(t, 2, len(copied.builderPendingWithdrawals))
|
||||
require.DeepEqual(t, wd, copied.builderPendingWithdrawals[0])
|
||||
require.DeepEqual(t, appended, copied.builderPendingWithdrawals[1])
|
||||
require.DeepEqual(t, wd, st.builderPendingWithdrawals[0])
|
||||
require.Equal(t, uint(1), st.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
require.Equal(t, uint(1), copied.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
}
|
||||
|
||||
func TestAppendBuilderPendingWithdrawals(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
|
||||
},
|
||||
builderPendingWithdrawals: make([]*ethpb.BuilderPendingWithdrawal, 0),
|
||||
}
|
||||
|
||||
first := ðpb.BuilderPendingWithdrawal{Amount: 1}
|
||||
second := ðpb.BuilderPendingWithdrawal{Amount: 2}
|
||||
require.NoError(t, st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{first, second}))
|
||||
|
||||
require.Equal(t, 2, len(st.builderPendingWithdrawals))
|
||||
require.DeepEqual(t, first, st.builderPendingWithdrawals[0])
|
||||
require.DeepEqual(t, second, st.builderPendingWithdrawals[1])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||
}
|
||||
|
||||
func TestAppendBuilderPendingWithdrawals_UnsupportedVersion(t *testing.T) {
|
||||
st := &BeaconState{version: version.Electra}
|
||||
err := st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{{}})
|
||||
require.ErrorContains(t, "AppendBuilderPendingWithdrawals", err)
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ func (b batch) ensureParent(expected [32]byte) error {
|
||||
func (b batch) blockRequest() *eth.BeaconBlocksByRangeRequest {
|
||||
return ð.BeaconBlocksByRangeRequest{
|
||||
StartSlot: b.begin,
|
||||
Count: uint64(b.end.FlooredSubSlot(b.begin)),
|
||||
Count: uint64(b.end - b.begin),
|
||||
Step: 1,
|
||||
}
|
||||
}
|
||||
@@ -156,7 +156,7 @@ func (b batch) blockRequest() *eth.BeaconBlocksByRangeRequest {
|
||||
func (b batch) blobRequest() *eth.BlobSidecarsByRangeRequest {
|
||||
return ð.BlobSidecarsByRangeRequest{
|
||||
StartSlot: b.begin,
|
||||
Count: uint64(b.end.FlooredSubSlot(b.begin)),
|
||||
Count: uint64(b.end - b.begin),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,93 +10,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestBlockRequest(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
begin primitives.Slot
|
||||
end primitives.Slot
|
||||
expectedCount uint64
|
||||
}{
|
||||
{
|
||||
name: "normal case",
|
||||
begin: 100,
|
||||
end: 200,
|
||||
expectedCount: 100,
|
||||
},
|
||||
{
|
||||
name: "end equals begin",
|
||||
begin: 100,
|
||||
end: 100,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "end less than begin (would underflow without check)",
|
||||
begin: 200,
|
||||
end: 100,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "zero values",
|
||||
begin: 0,
|
||||
end: 0,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "single slot",
|
||||
begin: 0,
|
||||
end: 1,
|
||||
expectedCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
b := batch{begin: tc.begin, end: tc.end}
|
||||
req := b.blockRequest()
|
||||
require.Equal(t, tc.expectedCount, req.Count)
|
||||
require.Equal(t, tc.begin, req.StartSlot)
|
||||
require.Equal(t, uint64(1), req.Step)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobRequest(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
begin primitives.Slot
|
||||
end primitives.Slot
|
||||
expectedCount uint64
|
||||
}{
|
||||
{
|
||||
name: "normal case",
|
||||
begin: 100,
|
||||
end: 200,
|
||||
expectedCount: 100,
|
||||
},
|
||||
{
|
||||
name: "end equals begin",
|
||||
begin: 100,
|
||||
end: 100,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "end less than begin (would underflow without check)",
|
||||
begin: 200,
|
||||
end: 100,
|
||||
expectedCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
b := batch{begin: tc.begin, end: tc.end}
|
||||
req := b.blobRequest()
|
||||
require.Equal(t, tc.expectedCount, req.Count)
|
||||
require.Equal(t, tc.begin, req.StartSlot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortBatchDesc(t *testing.T) {
|
||||
orderIn := []primitives.Slot{100, 10000, 1}
|
||||
orderOut := []primitives.Slot{10000, 100, 1}
|
||||
|
||||
@@ -4,6 +4,9 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
@@ -53,6 +56,32 @@ func (s *Service) verifierRoutine() {
|
||||
}
|
||||
}
|
||||
|
||||
// A routine that runs in the background to perform batch
|
||||
// KZG verifications by draining the channel and processing all pending requests.
|
||||
func (s *Service) kzgVerifierRoutine() {
|
||||
for {
|
||||
kzgBatch := make([]*kzgVerifier, 0, 1)
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case kzg := <-s.kzgChan:
|
||||
kzgBatch = append(kzgBatch, kzg)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case kzg := <-s.kzgChan:
|
||||
kzgBatch = append(kzgBatch, kzg)
|
||||
continue
|
||||
default:
|
||||
verifyKzgBatch(kzgBatch)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
|
||||
defer span.End()
|
||||
@@ -125,3 +154,71 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
|
||||
}
|
||||
return aggSet, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateWithKzgBatchVerifier")
|
||||
defer span.End()
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case s.kzgChan <- verificationSet:
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||
case err := <-resChan:
|
||||
if err != nil {
|
||||
log.WithError(err).Trace("Could not perform batch verification")
|
||||
tracing.AnnotateError(span, err)
|
||||
return s.validateUnbatchedColumnsKzg(ctx, dataColumns)
|
||||
}
|
||||
}
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateUnbatchedColumnsKzg(ctx context.Context, columns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateUnbatchedColumnsKzg")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
if err := peerdas.VerifyDataColumnsSidecarKZGProofs(columns); err != nil {
|
||||
err = errors.Wrap(err, "could not verify")
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("fallback").Observe(float64(time.Since(start).Milliseconds()))
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
|
||||
if len(kzgBatch) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
allDataColumns := make([]blocks.RODataColumn, 0, len(kzgBatch))
|
||||
for _, kzgVerifier := range kzgBatch {
|
||||
allDataColumns = append(allDataColumns, kzgVerifier.dataColumns...)
|
||||
}
|
||||
|
||||
var verificationErr error
|
||||
start := time.Now()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allDataColumns)
|
||||
if err != nil {
|
||||
verificationErr = errors.Wrap(err, "batch KZG verification failed")
|
||||
} else {
|
||||
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("batch").Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
// Send the same result to all verifiers in the batch
|
||||
for _, verifier := range kzgBatch {
|
||||
verifier.resChan <- verificationErr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -668,7 +668,7 @@ func populateBlock(bw *blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p
|
||||
|
||||
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
||||
missStr := make([]string, 0, len(missing))
|
||||
for _, k := range missing {
|
||||
for k := range missing {
|
||||
missStr = append(missStr, fmt.Sprintf("%#x", k))
|
||||
}
|
||||
return errors.Wrapf(errMissingBlobsForBlockCommitments,
|
||||
|
||||
@@ -226,6 +226,8 @@ func (s *Service) Start() {
|
||||
|
||||
// fetchOriginSidecars fetches origin sidecars
|
||||
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
const delay = 10 * time.Second // The delay between each attempt to fetch origin data column sidecars
|
||||
|
||||
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
@@ -258,7 +260,7 @@ func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
blockVersion := roBlock.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.fetchOriginDataColumnSidecars(roBlock); err != nil {
|
||||
if err := s.fetchOriginDataColumnSidecars(roBlock, delay); err != nil {
|
||||
return errors.Wrap(err, "fetch origin columns")
|
||||
}
|
||||
return nil
|
||||
@@ -412,7 +414,7 @@ func (s *Service) fetchOriginBlobSidecars(pids []peer.ID, rob blocks.ROBlock) er
|
||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock) error {
|
||||
func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock, delay time.Duration) error {
|
||||
const (
|
||||
errorMessage = "Failed to fetch origin data column sidecars"
|
||||
warningIteration = 10
|
||||
@@ -499,6 +501,7 @@ func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock) error {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"attempt": attempt,
|
||||
"missingIndices": helpers.SortedPrettySliceFromMap(missingIndicesByRoot[root]),
|
||||
"delay": delay,
|
||||
})
|
||||
|
||||
logFunc := log.Debug
|
||||
@@ -507,6 +510,13 @@ func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock) error {
|
||||
}
|
||||
|
||||
logFunc("Failed to fetch some origin data column sidecars, retrying later")
|
||||
|
||||
// Wait before retrying, respecting context cancellation.
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return s.ctx.Err()
|
||||
case <-time.After(delay):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -687,7 +687,10 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{{Epoch: 0, MaxBlobsPerBlock: 10}}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
const blobCount = 1
|
||||
const (
|
||||
delay = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
t.Run("block has no commitments", func(t *testing.T) {
|
||||
service := new(Service)
|
||||
@@ -699,7 +702,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
roBlock, err := blocks.NewROBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock)
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -721,7 +724,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
err := storage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock)
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -826,7 +829,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
attempt++
|
||||
})
|
||||
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock)
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check all corresponding sidecars are saved in the store.
|
||||
|
||||
@@ -1,14 +1,339 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
)
|
||||
|
||||
func TestValidateWithKzgBatchVerifier(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dataColumns []blocks.RODataColumn
|
||||
expectedResult pubsub.ValidationResult
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "single valid data column",
|
||||
dataColumns: createValidTestDataColumns(t, 1),
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "multiple valid data columns",
|
||||
dataColumns: createValidTestDataColumns(t, 3),
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "single invalid data column",
|
||||
dataColumns: createInvalidTestDataColumns(t, 1),
|
||||
expectedResult: pubsub.ValidationReject,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "empty data column slice",
|
||||
dataColumns: []blocks.RODataColumn{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, tt.dataColumns)
|
||||
|
||||
require.Equal(t, tt.expectedResult, result)
|
||||
if tt.expectError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifierRoutine(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("processes single request", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
resChan := make(chan error, 1)
|
||||
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for verification result")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("batches multiple requests", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
const numRequests = 5
|
||||
resChans := make([]chan error, numRequests)
|
||||
|
||||
for i := range numRequests {
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
resChan := make(chan error, 1)
|
||||
resChans[i] = resChan
|
||||
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
}
|
||||
|
||||
for i := range numRequests {
|
||||
select {
|
||||
case err := <-resChans[i]:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("timeout waiting for verification result %d", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("context cancellation stops routine", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
|
||||
routineDone := make(chan struct{})
|
||||
go func() {
|
||||
service.kzgVerifierRoutine()
|
||||
close(routineDone)
|
||||
}()
|
||||
|
||||
cancel()
|
||||
|
||||
select {
|
||||
case <-routineDone:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for routine to exit")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyKzgBatch(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("all valid data columns succeed", func(t *testing.T) {
|
||||
dataColumns := createValidTestDataColumns(t, 3)
|
||||
resChan := make(chan error, 1)
|
||||
kzgVerifiers := []*kzgVerifier{{dataColumns: dataColumns, resChan: resChan}}
|
||||
|
||||
verifyKzgBatch(kzgVerifiers)
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for batch verification")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid proofs fail entire batch", func(t *testing.T) {
|
||||
validColumns := createValidTestDataColumns(t, 1)
|
||||
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||
allColumns := append(validColumns, invalidColumns...)
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
kzgVerifiers := []*kzgVerifier{{dataColumns: allColumns, resChan: resChan}}
|
||||
|
||||
verifyKzgBatch(kzgVerifiers)
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
assert.NotNil(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for batch verification")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty batch handling", func(t *testing.T) {
|
||||
verifyKzgBatch([]*kzgVerifier{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestKzgBatchVerifierConcurrency(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
const numGoroutines = 10
|
||||
const numRequestsPerGoroutine = 5
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numGoroutines)
|
||||
|
||||
// Multiple goroutines sending verification requests simultaneously
|
||||
for i := range numGoroutines {
|
||||
go func(goroutineID int) {
|
||||
defer wg.Done()
|
||||
|
||||
for range numRequestsPerGoroutine {
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, dataColumns)
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("fallback handles mixed valid/invalid batch correctly", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
validColumns := createValidTestDataColumns(t, 1)
|
||||
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, validColumns)
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err = service.validateWithKzgBatchVerifier(ctx, invalidColumns)
|
||||
require.Equal(t, pubsub.ValidationReject, result)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty data columns fallback", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, []blocks.RODataColumn{})
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SecondsPerSlot = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier blocked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
|
||||
cancelledCtx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-service.kzgChan:
|
||||
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
|
||||
@@ -77,13 +77,8 @@ func SendBeaconBlocksByRangeRequest(
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Cap the slice capacity to MaxRequestBlock to prevent panic from invalid Count values.
|
||||
// This guards against upstream bugs that may produce astronomically large Count values
|
||||
// (e.g., due to unsigned integer underflow).
|
||||
sliceCap := min(req.Count, params.MaxRequestBlock(slots.ToEpoch(tor.CurrentSlot())))
|
||||
|
||||
// Augment block processing function, if non-nil block processor is provided.
|
||||
blocks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, sliceCap)
|
||||
blocks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, req.Count)
|
||||
process := func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
blocks = append(blocks, blk)
|
||||
if blockProcessor != nil {
|
||||
|
||||
@@ -168,6 +168,7 @@ type Service struct {
|
||||
syncContributionBitsOverlapLock sync.RWMutex
|
||||
syncContributionBitsOverlapCache *lru.Cache
|
||||
signatureChan chan *signatureVerifier
|
||||
kzgChan chan *kzgVerifier
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
@@ -208,7 +209,10 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
}
|
||||
// Initialize signature channel with configured limit
|
||||
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
|
||||
|
||||
// Initialize KZG channel with fixed buffer size of 100.
|
||||
// This buffer size is designed to handle burst traffic of data column gossip messages:
|
||||
// - Data columns arrive less frequently than attestations (default batchVerifierLimit=1000)
|
||||
r.kzgChan = make(chan *kzgVerifier, 100)
|
||||
// Correctly remove it from our seen pending block map.
|
||||
// The eviction method always assumes that the mutex is held.
|
||||
r.slotToPendingBlocks.OnEvicted(func(s string, i any) {
|
||||
@@ -261,6 +265,7 @@ func (s *Service) Start() {
|
||||
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.kzgVerifierRoutine()
|
||||
go s.startDiscoveryAndSubscriptions()
|
||||
go s.processDataColumnLogs()
|
||||
|
||||
|
||||
@@ -144,9 +144,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`.
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
validationResult, err := s.validateWithKzgBatchVerifier(ctx, roDataColumns)
|
||||
if validationResult != pubsub.ValidationAccept {
|
||||
return validationResult, err
|
||||
}
|
||||
// Mark KZG verification as satisfied since we did it via batch verifier
|
||||
verifier.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
|
||||
|
||||
// [IGNORE] The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)`
|
||||
// with valid header signature, sidecar inclusion proof, and kzg proof.
|
||||
|
||||
@@ -71,7 +71,10 @@ func TestValidateDataColumn(t *testing.T) {
|
||||
ctx: ctx,
|
||||
newColumnsVerifier: newDataColumnsVerifier,
|
||||
seenDataColumnCache: newSlotAwareCache(seenDataColumnSize),
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
// Start the KZG verifier routine for batch verification
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
// Encode a `beaconBlock` message instead of expected.
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- small touch ups on state diff code.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Prevent authentication bypass on direct `/v2/validator/*` endpoints by enforcing auth checks for non-public routes.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Removed
|
||||
|
||||
- Batching of KZG verification for incoming via gossip data column sidecars
|
||||
@@ -1,3 +0,0 @@
|
||||
### Removed
|
||||
|
||||
- Remove unused `delay` parameter from `fetchOriginDataColumnSidecars` function.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Added some defensive checks to prevent overflows in block batch requests.
|
||||
3
changelog/satushh-delay.md
Normal file
3
changelog/satushh-delay.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Use delay in fetchOriginDataColumnSidecars. Currently it is simply logged.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Log commitments instead of indices in missingCommitError
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- add pending payments processing and quorum threshold, plus spectests and state hooks (rotate/append)
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Added shell completion support for `beacon-chain` and `validator` CLI tools.
|
||||
@@ -3,8 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"completion.go",
|
||||
"completion_scripts.go",
|
||||
"config.go",
|
||||
"defaults.go",
|
||||
"flags.go",
|
||||
@@ -30,7 +28,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"completion_test.go",
|
||||
"config_test.go",
|
||||
"flags_test.go",
|
||||
"helpers_test.go",
|
||||
|
||||
@@ -271,11 +271,9 @@ func main() {
|
||||
Commands: []*cli.Command{
|
||||
dbcommands.Commands,
|
||||
jwtcommands.Commands,
|
||||
cmd.CompletionCommand("beacon-chain"),
|
||||
},
|
||||
Flags: appFlags,
|
||||
Before: before,
|
||||
EnableBashCompletion: true,
|
||||
Flags: appFlags,
|
||||
Before: before,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// CompletionCommand returns the completion command for the given binary name.
|
||||
// The binaryName parameter should be "beacon-chain" or "validator".
|
||||
func CompletionCommand(binaryName string) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "completion",
|
||||
Category: "completion",
|
||||
Usage: "Generate shell completion scripts",
|
||||
Description: fmt.Sprintf(`Generate shell completion scripts for bash, zsh, or fish.
|
||||
|
||||
To load completions:
|
||||
|
||||
Bash:
|
||||
$ source <(%[1]s completion bash)
|
||||
# To load completions for each session, execute once:
|
||||
$ %[1]s completion bash > /etc/bash_completion.d/%[1]s
|
||||
|
||||
Zsh:
|
||||
# To load completions for each session, execute once:
|
||||
$ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
|
||||
|
||||
# You may need to start a new shell for completions to take effect.
|
||||
|
||||
Fish:
|
||||
$ %[1]s completion fish | source
|
||||
# To load completions for each session, execute once:
|
||||
$ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
|
||||
`, binaryName),
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "bash",
|
||||
Usage: "Generate bash completion script",
|
||||
Action: func(_ *cli.Context) error {
|
||||
fmt.Println(bashCompletionScript(binaryName))
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "zsh",
|
||||
Usage: "Generate zsh completion script",
|
||||
Action: func(_ *cli.Context) error {
|
||||
fmt.Println(zshCompletionScript(binaryName))
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "fish",
|
||||
Usage: "Generate fish completion script",
|
||||
Action: func(_ *cli.Context) error {
|
||||
fmt.Println(fishCompletionScript(binaryName))
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// bashCompletionScript returns the bash completion script for the given binary.
|
||||
func bashCompletionScript(binaryName string) string {
|
||||
// Convert hyphens to underscores for bash function names
|
||||
funcName := strings.ReplaceAll(binaryName, "-", "_")
|
||||
return fmt.Sprintf(`#!/bin/bash
|
||||
|
||||
_%[1]s_completions() {
|
||||
local cur prev words cword opts
|
||||
COMPREPLY=()
|
||||
|
||||
# Use bash-completion if available, otherwise set variables directly
|
||||
if declare -F _init_completion >/dev/null 2>&1; then
|
||||
_init_completion -n "=:" || return
|
||||
else
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
words=("${COMP_WORDS[@]}")
|
||||
cword=$COMP_CWORD
|
||||
fi
|
||||
|
||||
# Build command array for completion - flag must be at the END
|
||||
local -a requestComp
|
||||
if [[ "$cur" == "-"* ]]; then
|
||||
requestComp=("${COMP_WORDS[@]:0:COMP_CWORD}" "$cur" --generate-bash-completion)
|
||||
else
|
||||
requestComp=("${COMP_WORDS[@]:0:COMP_CWORD}" --generate-bash-completion)
|
||||
fi
|
||||
|
||||
opts=$("${requestComp[@]}" 2>/dev/null)
|
||||
COMPREPLY=($(compgen -W "${opts}" -- "${cur}"))
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -o bashdefault -o default -o nospace -F _%[1]s_completions %[2]s
|
||||
`, funcName, binaryName)
|
||||
}
|
||||
|
||||
// zshCompletionScript returns the zsh completion script for the given binary.
|
||||
func zshCompletionScript(binaryName string) string {
|
||||
// Convert hyphens to underscores for zsh function names
|
||||
funcName := strings.ReplaceAll(binaryName, "-", "_")
|
||||
return fmt.Sprintf(`#compdef %[2]s
|
||||
|
||||
_%[1]s() {
|
||||
local curcontext="$curcontext" ret=1
|
||||
local -a completions
|
||||
|
||||
# Build command array with --generate-bash-completion at the END
|
||||
local -a requestComp
|
||||
if [[ "${words[CURRENT]}" == -* ]]; then
|
||||
requestComp=(${words[1,CURRENT]} --generate-bash-completion)
|
||||
else
|
||||
requestComp=(${words[1,CURRENT-1]} --generate-bash-completion)
|
||||
fi
|
||||
|
||||
completions=($("${requestComp[@]}" 2>/dev/null))
|
||||
|
||||
if [[ ${#completions[@]} -gt 0 ]]; then
|
||||
_describe -t commands '%[2]s' completions && ret=0
|
||||
fi
|
||||
|
||||
# Fallback to file completion
|
||||
_files && ret=0
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
compdef _%[1]s %[2]s
|
||||
`, funcName, binaryName)
|
||||
}
|
||||
|
||||
// fishCompletionScript returns the fish completion script for the given binary.
|
||||
func fishCompletionScript(binaryName string) string {
|
||||
// Convert hyphens to underscores for fish function names
|
||||
funcName := strings.ReplaceAll(binaryName, "-", "_")
|
||||
return fmt.Sprintf(`# Fish completion for %[2]s
|
||||
|
||||
function __fish_%[1]s_complete
|
||||
set -l args (commandline -opc)
|
||||
set -l cur (commandline -ct)
|
||||
|
||||
# Build command with --generate-bash-completion at the END
|
||||
if string match -q -- '-*' "$cur"
|
||||
%[2]s $args $cur --generate-bash-completion 2>/dev/null
|
||||
else
|
||||
%[2]s $args --generate-bash-completion 2>/dev/null
|
||||
end
|
||||
end
|
||||
|
||||
complete -c %[2]s -f -a "(__fish_%[1]s_complete)"
|
||||
`, funcName, binaryName)
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func TestCompletionCommand(t *testing.T) {
|
||||
t.Run("creates command with correct name", func(t *testing.T) {
|
||||
cmd := CompletionCommand("beacon-chain")
|
||||
require.Equal(t, "completion", cmd.Name)
|
||||
})
|
||||
|
||||
t.Run("has three subcommands", func(t *testing.T) {
|
||||
cmd := CompletionCommand("beacon-chain")
|
||||
require.Equal(t, 3, len(cmd.Subcommands))
|
||||
|
||||
names := make([]string, len(cmd.Subcommands))
|
||||
for i, sub := range cmd.Subcommands {
|
||||
names[i] = sub.Name
|
||||
}
|
||||
assert.DeepEqual(t, []string{"bash", "zsh", "fish"}, names)
|
||||
})
|
||||
|
||||
t.Run("description contains binary name", func(t *testing.T) {
|
||||
cmd := CompletionCommand("validator")
|
||||
assert.Equal(t, true, strings.Contains(cmd.Description, "validator"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBashCompletionScript(t *testing.T) {
|
||||
script := bashCompletionScript("beacon-chain")
|
||||
|
||||
assert.Equal(t, true, strings.Contains(script, "beacon-chain"), "script should contain binary name")
|
||||
assert.Equal(t, true, strings.Contains(script, "_beacon_chain_completions"), "script should contain function name with underscores")
|
||||
assert.Equal(t, true, strings.Contains(script, "complete -o bashdefault"), "script should contain complete command")
|
||||
assert.Equal(t, true, strings.Contains(script, "--generate-bash-completion"), "script should use generate-bash-completion flag")
|
||||
}
|
||||
|
||||
func TestZshCompletionScript(t *testing.T) {
|
||||
script := zshCompletionScript("validator")
|
||||
|
||||
assert.Equal(t, true, strings.Contains(script, "#compdef validator"), "script should contain compdef directive")
|
||||
assert.Equal(t, true, strings.Contains(script, "_validator"), "script should contain function name")
|
||||
assert.Equal(t, true, strings.Contains(script, "--generate-bash-completion"), "script should use generate-bash-completion flag")
|
||||
}
|
||||
|
||||
func TestFishCompletionScript(t *testing.T) {
|
||||
script := fishCompletionScript("beacon-chain")
|
||||
|
||||
assert.Equal(t, true, strings.Contains(script, "complete -c beacon-chain"), "script should contain complete command")
|
||||
assert.Equal(t, true, strings.Contains(script, "__fish_beacon_chain_complete"), "script should contain function name with underscores")
|
||||
assert.Equal(t, true, strings.Contains(script, "--generate-bash-completion"), "script should use generate-bash-completion flag")
|
||||
}
|
||||
|
||||
func TestScriptFunctionNames(t *testing.T) {
|
||||
// Test that hyphens are converted to underscores in function names
|
||||
bashScript := bashCompletionScript("beacon-chain")
|
||||
assert.Equal(t, true, strings.Contains(bashScript, "_beacon_chain_completions"))
|
||||
assert.Equal(t, false, strings.Contains(bashScript, "_beacon-chain_completions"))
|
||||
|
||||
zshScript := zshCompletionScript("beacon-chain")
|
||||
assert.Equal(t, true, strings.Contains(zshScript, "_beacon_chain"))
|
||||
|
||||
fishScript := fishCompletionScript("beacon-chain")
|
||||
assert.Equal(t, true, strings.Contains(fishScript, "__fish_beacon_chain_complete"))
|
||||
}
|
||||
|
||||
func TestCompletionSubcommandActions(t *testing.T) {
|
||||
// Test that Action functions execute without errors
|
||||
cmd := CompletionCommand("beacon-chain")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
subcommand string
|
||||
}{
|
||||
{"bash action executes", "bash"},
|
||||
{"zsh action executes", "zsh"},
|
||||
{"fish action executes", "fish"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var subCmd *cli.Command
|
||||
for _, sub := range cmd.Subcommands {
|
||||
if sub.Name == tt.subcommand {
|
||||
subCmd = sub
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NotNil(t, subCmd, "subcommand should exist")
|
||||
require.NotNil(t, subCmd.Action, "subcommand should have an action")
|
||||
|
||||
// Action should not return an error; use a real cli.Context
|
||||
app := &cli.App{}
|
||||
ctx := cli.NewContext(app, nil, nil)
|
||||
err := subCmd.Action(ctx)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -140,10 +140,8 @@ func main() {
|
||||
slashingprotectioncommands.Commands,
|
||||
dbcommands.Commands,
|
||||
web.Commands,
|
||||
cmd.CompletionCommand("validator"),
|
||||
},
|
||||
Flags: appFlags,
|
||||
EnableBashCompletion: true,
|
||||
Flags: appFlags,
|
||||
Before: func(ctx *cli.Context) error {
|
||||
// Load flags from config file, if specified.
|
||||
if err := cmd.LoadFlagsFromConfig(ctx, appFlags); err != nil {
|
||||
|
||||
@@ -293,10 +293,6 @@ type BeaconChainConfig struct {
|
||||
ValidatorCustodyRequirement uint64 `yaml:"VALIDATOR_CUSTODY_REQUIREMENT" spec:"true"` // ValidatorCustodyRequirement is the minimum number of custody groups an honest node with validators attached custodies and serves samples from
|
||||
BalancePerAdditionalCustodyGroup uint64 `yaml:"BALANCE_PER_ADDITIONAL_CUSTODY_GROUP" spec:"true"` // BalancePerAdditionalCustodyGroup is the balance increment corresponding to one additional group to custody.
|
||||
|
||||
// Values introduced in Gloas upgrade
|
||||
BuilderPaymentThresholdNumerator uint64 `yaml:"BUILDER_PAYMENT_THRESHOLD_NUMERATOR" spec:"true"` // BuilderPaymentThresholdNumerator is the numerator for builder payment quorum threshold calculation.
|
||||
BuilderPaymentThresholdDenominator uint64 `yaml:"BUILDER_PAYMENT_THRESHOLD_DENOMINATOR" spec:"true"` // BuilderPaymentThresholdDenominator is the denominator for builder payment quorum threshold calculation.
|
||||
|
||||
// Networking Specific Parameters
|
||||
MaxPayloadSize uint64 `yaml:"MAX_PAYLOAD_SIZE" spec:"true"` // MAX_PAYLOAD_SIZE is the maximum allowed size of uncompressed payload in gossip messages and rpc chunks.
|
||||
AttestationSubnetCount uint64 `yaml:"ATTESTATION_SUBNET_COUNT" spec:"true"` // AttestationSubnetCount is the number of attestation subnets used in the gossipsub protocol.
|
||||
|
||||
@@ -331,11 +331,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
MinEpochsForDataColumnSidecarsRequest: 4096,
|
||||
ValidatorCustodyRequirement: 8,
|
||||
BalancePerAdditionalCustodyGroup: 32_000_000_000,
|
||||
|
||||
// Values related to gloas
|
||||
BuilderPaymentThresholdNumerator: 6,
|
||||
BuilderPaymentThresholdDenominator: 10,
|
||||
|
||||
// Values related to networking parameters.
|
||||
MaxPayloadSize: 10 * 1 << 20, // 10 MiB
|
||||
AttestationSubnetCount: 64,
|
||||
|
||||
@@ -200,7 +200,6 @@ go_test(
|
||||
"fulu__sanity__blocks_test.go",
|
||||
"fulu__sanity__slots_test.go",
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_header_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
"phase0__epoch_processing__effective_balance_updates_test.go",
|
||||
@@ -280,7 +279,6 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/gloas/operations:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/epoch_processing"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_EpochProcessing_ProcessBuilderPendingPayments(t *testing.T) {
|
||||
epoch_processing.RunBuilderPendingPaymentsTests(t, "mainnet")
|
||||
}
|
||||
@@ -206,7 +206,6 @@ go_test(
|
||||
"fulu__sanity__blocks_test.go",
|
||||
"fulu__sanity__slots_test.go",
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_bid_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
"phase0__epoch_processing__effective_balance_updates_test.go",
|
||||
@@ -290,7 +289,6 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/gloas/operations:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/epoch_processing"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_EpochProcessing_ProcessBuilderPendingPayments(t *testing.T) {
|
||||
epoch_processing.RunBuilderPendingPaymentsTests(t, "minimal")
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"builder_pending_payments.go",
|
||||
"helpers.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/epoch_processing",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/spectest/utils:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_google_go_cmp//cmp:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//testing/protocmp:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,26 +0,0 @@
|
||||
package epoch_processing
|
||||
|
||||
import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
|
||||
)
|
||||
|
||||
func RunBuilderPendingPaymentsTests(t *testing.T, config string) {
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
testFolders, testsFolderPath := utils.TestFolders(t, config, "gloas", "epoch_processing/builder_pending_payments/pyspec_tests")
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
folderPath := path.Join(testsFolderPath, folder.Name())
|
||||
RunEpochOperationTest(t, folderPath, processBuilderPendingPayments)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func processBuilderPendingPayments(t *testing.T, st state.BeaconState) (state.BeaconState, error) {
|
||||
return st, gloas.ProcessBuilderPendingPayments(st)
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package epoch_processing
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/testing/protocmp"
|
||||
)
|
||||
|
||||
type epochOperation func(*testing.T, state.BeaconState) (state.BeaconState, error)
|
||||
|
||||
// RunEpochOperationTest takes in the prestate and processes it through the
|
||||
// passed in epoch operation function and checks the post state with the expected post state.
|
||||
func RunEpochOperationTest(
|
||||
t *testing.T,
|
||||
testFolderPath string,
|
||||
operationFn epochOperation,
|
||||
) {
|
||||
preBeaconStateFile, err := util.BazelFileBytes(path.Join(testFolderPath, "pre.ssz_snappy"))
|
||||
require.NoError(t, err)
|
||||
preBeaconStateSSZ, err := snappy.Decode(nil /* dst */, preBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
preBeaconStateBase := ðpb.BeaconStateGloas{}
|
||||
if err := preBeaconStateBase.UnmarshalSSZ(preBeaconStateSSZ); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
preBeaconState, err := state_native.InitializeFromProtoGloas(preBeaconStateBase)
|
||||
require.NoError(t, err)
|
||||
|
||||
// If the post.ssz is not present, it means the test should fail on our end.
|
||||
postSSZFilepath, err := bazel.Runfile(path.Join(testFolderPath, "post.ssz_snappy"))
|
||||
postSSZExists := true
|
||||
if err != nil && strings.Contains(err.Error(), "could not locate file") {
|
||||
postSSZExists = false
|
||||
} else if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState, err := operationFn(t, preBeaconState)
|
||||
if postSSZExists {
|
||||
require.NoError(t, err)
|
||||
|
||||
postBeaconStateFile, err := os.ReadFile(postSSZFilepath) // #nosec G304
|
||||
require.NoError(t, err)
|
||||
postBeaconStateSSZ, err := snappy.Decode(nil /* dst */, postBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
postBeaconState := ðpb.BeaconStateGloas{}
|
||||
if err := postBeaconState.UnmarshalSSZ(postBeaconStateSSZ); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
|
||||
pbState, err := state_native.ProtobufBeaconStateGloas(beaconState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(pbState, postBeaconState) {
|
||||
t.Log(cmp.Diff(postBeaconState, pbState, protocmp.Transform()))
|
||||
t.Fatal("Post state does not match expected")
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Fatal("Did not fail when expected")
|
||||
}
|
||||
t.Logf("Expected failure; failure reason = %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -37,16 +37,8 @@ func (s *Server) AuthTokenInterceptor() grpc.UnaryServerInterceptor {
|
||||
// AuthTokenHandler is an HTTP handler to authorize a route.
|
||||
func (s *Server) AuthTokenHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
needsAuth := strings.Contains(path, api.WebApiUrlPrefix) || strings.Contains(path, api.KeymanagerApiPrefix)
|
||||
// Protect direct (non-/api) web endpoints too; otherwise callers can bypass auth by hitting /v2/validator/*.
|
||||
if strings.HasPrefix(path, api.WebUrlPrefix) &&
|
||||
!strings.HasPrefix(path, api.WebUrlPrefix+"initialize") &&
|
||||
!strings.HasPrefix(path, api.WebUrlPrefix+"health/") {
|
||||
needsAuth = true
|
||||
}
|
||||
|
||||
if needsAuth && !strings.Contains(path, api.SystemLogsPrefix) {
|
||||
// if it's not initialize or has a web prefix
|
||||
if (strings.Contains(r.URL.Path, api.WebApiUrlPrefix) || strings.Contains(r.URL.Path, api.KeymanagerApiPrefix)) && !strings.Contains(r.URL.Path, api.SystemLogsPrefix) {
|
||||
// ignore some routes
|
||||
reqToken := r.Header.Get("Authorization")
|
||||
if reqToken == "" {
|
||||
|
||||
@@ -107,16 +107,6 @@ func TestServer_AuthTokenHandler(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(rr.Body.Bytes(), errJson))
|
||||
require.StringContains(t, "Unauthorized", errJson.Message)
|
||||
})
|
||||
t.Run("direct /v2 endpoint also needs auth token (no /api bypass)", func(t *testing.T) {
|
||||
rr := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(http.MethodGet, "/v2/validator/beacon/status", http.NoBody)
|
||||
require.NoError(t, err)
|
||||
testHandler.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusUnauthorized, rr.Code)
|
||||
errJson := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(rr.Body.Bytes(), errJson))
|
||||
require.StringContains(t, "Unauthorized", errJson.Message)
|
||||
})
|
||||
t.Run("initialize does not need auth", func(t *testing.T) {
|
||||
rr := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(http.MethodGet, api.WebUrlPrefix+"initialize", http.NoBody)
|
||||
|
||||
Reference in New Issue
Block a user