mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-24 04:38:07 -05:00
Compare commits
15 Commits
blocker-fo
...
hdiff_star
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3a5f1f3f23 | ||
|
|
006f1a9095 | ||
|
|
bd630da3e6 | ||
|
|
1f118763d6 | ||
|
|
8378b161ce | ||
|
|
e2d39c3420 | ||
|
|
f34596964b | ||
|
|
56d03b4d5c | ||
|
|
68ff47db10 | ||
|
|
403d88b136 | ||
|
|
b8a82885a7 | ||
|
|
432e4dfbbf | ||
|
|
a71990bc43 | ||
|
|
cb1d98b7dc | ||
|
|
8e5c982edb |
@@ -21,7 +21,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -12,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -128,16 +126,7 @@ func processProposerSlashing(
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process proposer slashing")
|
||||
}
|
||||
|
||||
var err error
|
||||
// [New in Gloas:EIP7732]: remove the BuilderPendingPayment corresponding to the slashed proposer within 2 epoch window
|
||||
if beaconState.Version() >= version.Gloas {
|
||||
err = gloas.RemoveBuilderPendingPayment(beaconState, slashing.Header_1.Header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
|
||||
@@ -5,14 +5,12 @@ go_library(
|
||||
srcs = [
|
||||
"bid.go",
|
||||
"pending_payment.go",
|
||||
"proposer_slashing.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -31,7 +29,6 @@ go_test(
|
||||
srcs = [
|
||||
"bid_test.go",
|
||||
"pending_payment_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
|
||||
// Spec v1.7.0 (pseudocode):
|
||||
//
|
||||
// slot = header_1.slot
|
||||
// proposal_epoch = compute_epoch_at_slot(slot)
|
||||
// if proposal_epoch == get_current_epoch(state):
|
||||
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
// elif proposal_epoch == get_previous_epoch(state):
|
||||
// payment_index = slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
|
||||
proposalEpoch := slots.ToEpoch(header.Slot)
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
var paymentIndex primitives.Slot
|
||||
if proposalEpoch == currentEpoch {
|
||||
paymentIndex = slotsPerEpoch + header.Slot%slotsPerEpoch
|
||||
} else if proposalEpoch+1 == currentEpoch {
|
||||
paymentIndex = header.Slot % slotsPerEpoch
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := st.ClearBuilderPendingPayment(paymentIndex); err != nil {
|
||||
return errors.Wrap(err, "could not clear builder pending payment")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestRemoveBuilderPendingPayment_CurrentEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*2 + 1
|
||||
headerSlot := slotsPerEpoch * 2
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(slotsPerEpoch + headerSlot%slotsPerEpoch)
|
||||
|
||||
setPendingPayment(t, st, paymentIndex, 123)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
got := getPendingPayment(t, st, paymentIndex)
|
||||
require.NotNil(t, got.Withdrawal)
|
||||
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
|
||||
}
|
||||
|
||||
func TestRemoveBuilderPendingPayment_PreviousEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*2 + 1
|
||||
headerSlot := slotsPerEpoch + 7
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(headerSlot % slotsPerEpoch)
|
||||
|
||||
setPendingPayment(t, st, paymentIndex, 456)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
got := getPendingPayment(t, st, paymentIndex)
|
||||
require.NotNil(t, got.Withdrawal)
|
||||
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
|
||||
}
|
||||
|
||||
func TestRemoveBuilderPendingPayment_OlderThanTwoEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*4 + 1 // current epoch far ahead
|
||||
headerSlot := slotsPerEpoch * 2 // two epochs behind
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(headerSlot % slotsPerEpoch)
|
||||
|
||||
original := getPendingPayment(t, st, paymentIndex)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
after := getPendingPayment(t, st, paymentIndex)
|
||||
require.DeepEqual(t, original.Withdrawal.FeeRecipient, after.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, original.Withdrawal.Amount, after.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func newGloasStateWithPayments(t *testing.T, slot primitives.Slot) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
paymentCount := int(slotsPerEpoch * 2)
|
||||
payments := make([]*eth.BuilderPendingPayment, paymentCount)
|
||||
for i := range payments {
|
||||
payments[i] = ð.BuilderPendingPayment{
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
|
||||
Amount: 1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(ð.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
BuilderPendingPayments: payments,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}
|
||||
|
||||
func setPendingPayment(t *testing.T, st state.BeaconState, index int, amount uint64) {
|
||||
t.Helper()
|
||||
|
||||
payment := ð.BuilderPendingPayment{
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
|
||||
Amount: primitives.Gwei(amount),
|
||||
},
|
||||
}
|
||||
require.NoError(t, st.SetBuilderPendingPayment(primitives.Slot(index), payment))
|
||||
}
|
||||
|
||||
func getPendingPayment(t *testing.T, st state.BeaconState, index int) *eth.BuilderPendingPayment {
|
||||
t.Helper()
|
||||
|
||||
stateProto := st.ToProtoUnsafe().(*eth.BeaconStateGloas)
|
||||
|
||||
return stateProto.BuilderPendingPayments[index]
|
||||
}
|
||||
@@ -71,7 +71,6 @@ go_test(
|
||||
"state_test.go",
|
||||
"trailing_slot_state_cache_test.go",
|
||||
"transition_fuzz_test.go",
|
||||
"transition_gloas_test.go",
|
||||
"transition_no_verify_sig_test.go",
|
||||
"transition_test.go",
|
||||
],
|
||||
@@ -107,7 +106,6 @@ go_test(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -142,18 +142,6 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Unset the next payload availability
|
||||
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
|
||||
if state.Version() >= version.Gloas {
|
||||
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProcessSlot_GloasClearsNextPayloadAvailability(t *testing.T) {
|
||||
slot := primitives.Slot(10)
|
||||
cfg := params.BeaconConfig()
|
||||
nextIdx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
|
||||
byteIdx := nextIdx / 8
|
||||
bitMask := byte(1 << (nextIdx % 8))
|
||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
post := st.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, byte(0xFF)&^bitMask, post.ExecutionPayloadAvailability[byteIdx])
|
||||
}
|
||||
|
||||
func TestProcessSlot_GloasClearsNextPayloadAvailability_Wrap(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
slot := primitives.Slot(cfg.SlotsPerHistoricalRoot - 1)
|
||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
post := st.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, byte(0xFE), post.ExecutionPayloadAvailability[0])
|
||||
}
|
||||
|
||||
func TestProcessSlot_GloasAvailabilityUpdateError(t *testing.T) {
|
||||
slot := primitives.Slot(7)
|
||||
availability := make([]byte, 1)
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
cfg := params.BeaconConfig()
|
||||
idx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
|
||||
byteIdx := idx / 8
|
||||
require.EqualError(t, err, fmt.Sprintf(
|
||||
"bit index %d (byte index %d) out of range for execution payload availability length %d",
|
||||
idx, byteIdx, len(availability),
|
||||
))
|
||||
}
|
||||
|
||||
func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
protoState := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
LatestBlockHeader: testBeaconBlockHeader(),
|
||||
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
RandaoMixes: make([][]byte, fieldparams.RandaoMixesLength),
|
||||
ExecutionPayloadAvailability: availability,
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
PreviousEpochParticipation: []byte{},
|
||||
CurrentEpochParticipation: []byte{},
|
||||
JustificationBits: []byte{0},
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentSyncCommittee: ðpb.SyncCommittee{},
|
||||
NextSyncCommittee: ðpb.SyncCommittee{},
|
||||
}
|
||||
|
||||
for i := range protoState.BlockRoots {
|
||||
protoState.BlockRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.StateRoots {
|
||||
protoState.StateRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.RandaoMixes {
|
||||
protoState.RandaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
for i := range protoState.BuilderPendingPayments {
|
||||
protoState.BuilderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pubkeys := make([][]byte, cfg.SyncCommitteeSize)
|
||||
for i := range pubkeys {
|
||||
pubkeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
aggPubkey := make([]byte, fieldparams.BLSPubkeyLength)
|
||||
protoState.CurrentSyncCommittee = ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: aggPubkey,
|
||||
}
|
||||
protoState.NextSyncCommittee = ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: aggPubkey,
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(protoState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.Gloas, st.Version())
|
||||
return st
|
||||
}
|
||||
|
||||
func testBeaconBlockHeader() *ethpb.BeaconBlockHeader {
|
||||
return ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
@@ -512,11 +512,6 @@ func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []u
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars file path open")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := file.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during Get")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read file metadata.
|
||||
metadata, err := dcs.metadata(file)
|
||||
|
||||
@@ -22,6 +22,10 @@ var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
|
||||
// ErrNotFoundMetadataSeqNum is a not found error specifically for the metadata sequence number getter
|
||||
var ErrNotFoundMetadataSeqNum = errors.Wrap(ErrNotFound, "metadata sequence number")
|
||||
|
||||
// ErrStateDiffIncompatible is returned when state-diff feature is enabled
|
||||
// but the database was created without state-diff support.
|
||||
var ErrStateDiffIncompatible = errors.New("state-diff feature enabled but database was created without state-diff support")
|
||||
|
||||
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
|
||||
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
|
||||
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")
|
||||
|
||||
@@ -42,6 +42,10 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
|
||||
if err := s.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block root")
|
||||
}
|
||||
|
||||
if err := s.initializeStateDiff(0, genesisState); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize state diff for genesis")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -203,15 +203,45 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if features.Get().EnableStateDiff {
|
||||
sdCache, err := newStateDiffCache(kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if err := kv.startStateDiff(ctx); err != nil {
|
||||
if errors.Is(err, ErrStateDiffIncompatible) {
|
||||
return kv, err
|
||||
}
|
||||
kv.stateDiffCache = sdCache
|
||||
return nil, err
|
||||
}
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
func (kv *Store) startStateDiff(ctx context.Context) error {
|
||||
if !features.Get().EnableStateDiff {
|
||||
return nil
|
||||
}
|
||||
// Check if offset already exists (existing state-diff database).
|
||||
hasOffset, err := kv.hasStateDiffOffset()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
if hasOffset {
|
||||
// Existing state-diff database - restarts not yet supported.
|
||||
return errors.New("restarting with existing state-diff database not yet supported")
|
||||
}
|
||||
|
||||
// Check if this is a new database (no head block).
|
||||
headBlock, err := kv.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head block")
|
||||
}
|
||||
|
||||
if headBlock == nil {
|
||||
// New database - will be initialized later during checkpoint/genesis sync.
|
||||
// stateDiffCache stays nil until SaveOrigin or SaveGenesisData initializes it.
|
||||
log.Info("State-diff enabled: will be initialized during checkpoint or genesis sync")
|
||||
} else {
|
||||
// Existing database without state-diff - return store with error for caller to handle.
|
||||
return ErrStateDiffIncompatible
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearDB removes the previously stored database in the data directory.
|
||||
|
||||
@@ -9,11 +9,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
statenative "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/hdiff"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -122,6 +124,66 @@ func (s *Store) getOffset() uint64 {
|
||||
return s.stateDiffCache.getOffset()
|
||||
}
|
||||
|
||||
// hasStateDiffOffset checks if the state-diff offset has been set in the database.
|
||||
// This is used to detect if an existing database has state-diff enabled.
|
||||
func (s *Store) hasStateDiffOffset() (bool, error) {
|
||||
var hasOffset bool
|
||||
err := s.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
hasOffset = bucket.Get(offsetKey) != nil
|
||||
return nil
|
||||
})
|
||||
return hasOffset, err
|
||||
}
|
||||
|
||||
// initializeStateDiff sets up the state-diff schema for a new database.
|
||||
// This should be called during checkpoint sync or genesis sync.
|
||||
func (s *Store) initializeStateDiff(slot primitives.Slot, initialState state.ReadOnlyBeaconState) error {
|
||||
// Return early if the feature is not set
|
||||
if !features.Get().EnableStateDiff {
|
||||
return nil
|
||||
}
|
||||
// Only reinitialize if the offset is different
|
||||
if s.stateDiffCache != nil {
|
||||
if s.stateDiffCache.getOffset() == uint64(slot) {
|
||||
log.WithField("offset", slot).Warning("Ignoring state diff cache reinitialization")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Write offset directly to the database (without using cache which doesn't exist yet).
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, uint64(slot))
|
||||
return bucket.Put(offsetKey, offsetBytes)
|
||||
})
|
||||
if err != nil {
|
||||
return pkgerrors.Wrap(err, "failed to set offset")
|
||||
}
|
||||
|
||||
// Create the state diff cache (this will read the offset from the database).
|
||||
sdCache, err := newStateDiffCache(s)
|
||||
if err != nil {
|
||||
return pkgerrors.Wrap(err, "failed to create state diff cache")
|
||||
}
|
||||
s.stateDiffCache = sdCache
|
||||
|
||||
// Save the initial state as a full snapshot.
|
||||
if err := s.saveFullSnapshot(initialState); err != nil {
|
||||
return pkgerrors.Wrap(err, "failed to save initial snapshot")
|
||||
}
|
||||
|
||||
log.WithField("offset", slot).Info("Initialized state-diff cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
func keyForSnapshot(v int) ([]byte, error) {
|
||||
switch v {
|
||||
case version.Fulu:
|
||||
|
||||
@@ -110,6 +110,8 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "save finalized checkpoint")
|
||||
}
|
||||
|
||||
if err := s.initializeStateDiff(state.Slot(), state); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize state diff")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -540,7 +540,12 @@ func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store,
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := kv.NewKVStore(ctx, dbPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, kv.ErrStateDiffIncompatible) {
|
||||
log.WithError(err).Warn("Disabling state-diff feature")
|
||||
cfg := features.Get()
|
||||
cfg.EnableStateDiff = false
|
||||
features.Init(cfg)
|
||||
} else if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -1042,27 +1044,112 @@ func (s *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
var root []byte
|
||||
blockID := r.PathValue("block_id")
|
||||
if blockID == "" {
|
||||
httputil.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
root, err := s.Blocker.BlockRoot(ctx, []byte(blockID))
|
||||
if !shared.WriteBlockRootFetchError(w, err) {
|
||||
return
|
||||
switch blockID {
|
||||
case "head":
|
||||
root, err = s.ChainInfoFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not retrieve head root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if root == nil {
|
||||
httputil.HandleError(w, "No head root was found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
case "finalized":
|
||||
finalized := s.ChainInfoFetcher.FinalizedCheckpt()
|
||||
root = finalized.Root
|
||||
case "genesis":
|
||||
blk, err := s.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not retrieve genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find genesis block: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not hash genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
root = blkRoot[:]
|
||||
default:
|
||||
isHex := strings.HasPrefix(blockID, "0x")
|
||||
if isHex {
|
||||
blockIDBytes, err := hexutil.Decode(blockID)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not decode block ID into bytes: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(blockIDBytes) != fieldparams.RootLength {
|
||||
httputil.HandleError(w, fmt.Sprintf("Block ID has length %d instead of %d", len(blockIDBytes), fieldparams.RootLength), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
blockID32 := bytesutil.ToBytes32(blockIDBytes)
|
||||
blk, err := s.BeaconDB.Block(ctx, blockID32)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not retrieve block for block root %#x: %v", blockID, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find block: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
root = blockIDBytes
|
||||
} else {
|
||||
slot, err := strconv.ParseUint(blockID, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse block ID: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
hasRoots, roots, err := s.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not retrieve blocks for slot %d: %v", slot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if !hasRoots {
|
||||
httputil.HandleError(w, "Could not find any blocks with given slot", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
root = roots[0][:]
|
||||
if len(roots) == 1 {
|
||||
break
|
||||
}
|
||||
for _, blockRoot := range roots {
|
||||
canonical, err := s.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not determine if block root is canonical: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if canonical {
|
||||
root = blockRoot[:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
b32Root := bytesutil.ToBytes32(root)
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, b32Root)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
response := &structs.BlockRootResponse{
|
||||
Data: &structs.BlockRoot{
|
||||
Root: hexutil.Encode(root[:]),
|
||||
Root: hexutil.Encode(root),
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, root),
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, b32Root),
|
||||
}
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
@@ -2509,10 +2509,6 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
@@ -2528,7 +2524,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: map[string]string{"block_id": "3bad0"},
|
||||
wantErr: "Invalid block ID",
|
||||
wantErr: "Could not parse block ID",
|
||||
wantCode: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
@@ -2576,7 +2572,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: map[string]string{"block_id": hexutil.Encode(bytesutil.PadTo([]byte("hi there"), 32))},
|
||||
wantErr: "Block not found",
|
||||
wantErr: "Could not find block",
|
||||
wantCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
@@ -2589,7 +2585,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "no block",
|
||||
blockID: map[string]string{"block_id": "105"},
|
||||
wantErr: "Block not found",
|
||||
wantErr: "Could not find any blocks with given slot",
|
||||
wantCode: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
@@ -2637,10 +2633,6 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
@@ -2676,10 +2668,6 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
t.Run("true", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
@@ -182,16 +181,6 @@ func prepareConfigSpec() (map[string]any, error) {
|
||||
data[tag] = convertValueForJSON(val, tag)
|
||||
}
|
||||
|
||||
// Add Fulu preset values. These are compile-time constants from fieldparams,
|
||||
// not runtime configs, but are required by the /eth/v1/config/spec API.
|
||||
data["NUMBER_OF_COLUMNS"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.NumberOfColumns)), "NUMBER_OF_COLUMNS")
|
||||
data["CELLS_PER_EXT_BLOB"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.NumberOfColumns)), "CELLS_PER_EXT_BLOB")
|
||||
data["FIELD_ELEMENTS_PER_CELL"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.CellsPerBlob)), "FIELD_ELEMENTS_PER_CELL")
|
||||
data["FIELD_ELEMENTS_PER_EXT_BLOB"] = convertValueForJSON(reflect.ValueOf(config.FieldElementsPerBlob*2), "FIELD_ELEMENTS_PER_EXT_BLOB")
|
||||
data["KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH"] = convertValueForJSON(reflect.ValueOf(uint64(4)), "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH")
|
||||
// UPDATE_TIMEOUT is derived from SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
data["UPDATE_TIMEOUT"] = convertValueForJSON(reflect.ValueOf(uint64(config.SlotsPerEpoch)*uint64(config.EpochsPerSyncCommitteePeriod)), "UPDATE_TIMEOUT")
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MinSyncCommitteeParticipants = 71
|
||||
config.ProposerReorgCutoffBPS = primitives.BP(121)
|
||||
config.AttestationDueBPS = primitives.BP(122)
|
||||
config.AggregateDueBPS = primitives.BP(123)
|
||||
config.AggregrateDueBPS = primitives.BP(123)
|
||||
config.ContributionDueBPS = primitives.BP(124)
|
||||
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
|
||||
config.TerminalBlockHashActivationEpoch = 72
|
||||
@@ -212,7 +212,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 186, len(data))
|
||||
assert.Equal(t, 180, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -470,7 +470,7 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "121", v)
|
||||
case "ATTESTATION_DUE_BPS":
|
||||
assert.Equal(t, "122", v)
|
||||
case "AGGREGATE_DUE_BPS":
|
||||
case "AGGREGRATE_DUE_BPS":
|
||||
assert.Equal(t, "123", v)
|
||||
case "CONTRIBUTION_DUE_BPS":
|
||||
assert.Equal(t, "124", v)
|
||||
@@ -598,18 +598,6 @@ func TestGetSpec(t *testing.T) {
|
||||
blobSchedule, ok := v.([]any)
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, 2, len(blobSchedule))
|
||||
case "FIELD_ELEMENTS_PER_CELL":
|
||||
assert.Equal(t, "64", v) // From fieldparams.CellsPerBlob
|
||||
case "FIELD_ELEMENTS_PER_EXT_BLOB":
|
||||
assert.Equal(t, "198", v) // FieldElementsPerBlob (99) * 2
|
||||
case "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "4", v) // Preset value
|
||||
case "CELLS_PER_EXT_BLOB":
|
||||
assert.Equal(t, "128", v) // From fieldparams.NumberOfColumns
|
||||
case "NUMBER_OF_COLUMNS":
|
||||
assert.Equal(t, "128", v) // From fieldparams.NumberOfColumns
|
||||
case "UPDATE_TIMEOUT":
|
||||
assert.Equal(t, "1782", v) // SlotsPerEpoch (27) * EpochsPerSyncCommitteePeriod (66)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -26,30 +26,21 @@ func WriteStateFetchError(w http.ResponseWriter, err error) {
|
||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// writeBlockIdError handles common block ID lookup errors.
|
||||
// Returns true if an error was handled and written to the response, false if no error.
|
||||
func writeBlockIdError(w http.ResponseWriter, err error, fallbackMsg string) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
var blockNotFoundErr *lookup.BlockNotFoundError
|
||||
if errors.As(err, &blockNotFoundErr) {
|
||||
httputil.HandleError(w, "Block not found: "+blockNotFoundErr.Error(), http.StatusNotFound)
|
||||
return true
|
||||
return false
|
||||
}
|
||||
var invalidBlockIdErr *lookup.BlockIdParseError
|
||||
if errors.As(err, &invalidBlockIdErr) {
|
||||
httputil.HandleError(w, "Invalid block ID: "+invalidBlockIdErr.Error(), http.StatusBadRequest)
|
||||
return true
|
||||
return false
|
||||
}
|
||||
httputil.HandleError(w, fallbackMsg+err.Error(), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
if writeBlockIdError(w, err, "Could not get block from block ID: ") {
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get block from block ID: "+err.Error(), http.StatusInternalServerError)
|
||||
return false
|
||||
}
|
||||
if err = blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
@@ -58,10 +49,3 @@ func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBe
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBlockRootFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block root.
|
||||
// Returns true if no error occurred, false otherwise.
|
||||
func WriteBlockRootFetchError(w http.ResponseWriter, err error) bool {
|
||||
return !writeBlockIdError(w, err, "Could not get block root from block ID: ")
|
||||
}
|
||||
|
||||
@@ -105,59 +105,3 @@ func TestWriteBlockFetchError(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteBlockRootFetchError tests the WriteBlockRootFetchError function
|
||||
// to ensure that the correct error message and code are written to the response
|
||||
// and that the function returns the correct boolean value.
|
||||
func TestWriteBlockRootFetchError(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
expectedMessage string
|
||||
expectedCode int
|
||||
expectedReturn bool
|
||||
}{
|
||||
{
|
||||
name: "Nil error should return true",
|
||||
err: nil,
|
||||
expectedReturn: true,
|
||||
},
|
||||
{
|
||||
name: "BlockNotFoundError should return 404",
|
||||
err: lookup.NewBlockNotFoundError("block not found at slot 123"),
|
||||
expectedMessage: "Block not found",
|
||||
expectedCode: http.StatusNotFound,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "BlockIdParseError should return 400",
|
||||
err: &lookup.BlockIdParseError{},
|
||||
expectedMessage: "Invalid block ID",
|
||||
expectedCode: http.StatusBadRequest,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "Generic error should return 500",
|
||||
err: errors.New("database connection failed"),
|
||||
expectedMessage: "Could not get block root from block ID",
|
||||
expectedCode: http.StatusInternalServerError,
|
||||
expectedReturn: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
writer := httptest.NewRecorder()
|
||||
result := WriteBlockRootFetchError(writer, c.err)
|
||||
|
||||
assert.Equal(t, c.expectedReturn, result, "incorrect return value")
|
||||
if !c.expectedReturn {
|
||||
assert.Equal(t, c.expectedCode, writer.Code, "incorrect status code")
|
||||
assert.StringContains(t, c.expectedMessage, writer.Body.String(), "incorrect error message")
|
||||
|
||||
e := &httputil.DefaultJsonError{}
|
||||
assert.NoError(t, json.Unmarshal(writer.Body.Bytes(), e), "failed to unmarshal response")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,7 +60,6 @@ func (e BlockIdParseError) Error() string {
|
||||
// Blocker is responsible for retrieving blocks.
|
||||
type Blocker interface {
|
||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error)
|
||||
BlobSidecars(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([][]byte, *core.RpcError)
|
||||
DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError)
|
||||
@@ -226,19 +225,6 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// BlockRoot returns the block root for a given identifier. The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
// - "finalized"
|
||||
// - "justified"
|
||||
// - <slot>
|
||||
// - <hex encoded block root with '0x' prefix>
|
||||
// - <block root>
|
||||
func (p *BeaconDbBlocker) BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error) {
|
||||
root, _, err := p.resolveBlockID(ctx, string(id))
|
||||
return root, err
|
||||
}
|
||||
|
||||
// blobsContext holds common information needed for blob retrieval
|
||||
type blobsContext struct {
|
||||
root [fieldparams.RootLength]byte
|
||||
|
||||
@@ -168,111 +168,6 @@ func TestGetBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
genBlk, blkContainers := testutil.FillDBWithBlocks(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
fetcher := &BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blkContainers[32].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
},
|
||||
}
|
||||
|
||||
genesisRoot, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want [32]byte
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: bytesutil.ToBytes32(blkContainers[30].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: bytesutil.ToBytes32(headBlock.BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: bytesutil.ToBytes32(blkContainers[64].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "justified",
|
||||
blockID: []byte("justified"),
|
||||
want: bytesutil.ToBytes32(blkContainers[32].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genesisRoot,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: genesisRoot[:],
|
||||
want: genesisRoot,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "hex root",
|
||||
blockID: []byte(hexutil.Encode(blkContainers[20].BlockRoot)),
|
||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block at slot",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := fetcher.BlockRoot(ctx, tt.blockID)
|
||||
if tt.wantErr {
|
||||
assert.NotEqual(t, err, nil, "no error has been returned")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.want, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobsErrorHandling(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
// MockBlocker is a fake implementation of lookup.Blocker.
|
||||
type MockBlocker struct {
|
||||
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
|
||||
RootToReturn [32]byte
|
||||
ErrorToReturn error
|
||||
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
|
||||
RootBlockMap map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
@@ -40,14 +39,6 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
|
||||
return m.SlotBlockMap[primitives.Slot(slotNumber)], nil
|
||||
}
|
||||
|
||||
// BlockRoot --
|
||||
func (m *MockBlocker) BlockRoot(_ context.Context, _ []byte) ([32]byte, error) {
|
||||
if m.ErrorToReturn != nil {
|
||||
return [32]byte{}, m.ErrorToReturn
|
||||
}
|
||||
return m.RootToReturn, nil
|
||||
}
|
||||
|
||||
// BlobSidecars --
|
||||
func (*MockBlocker) BlobSidecars(_ context.Context, _ string, _ ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
return nil, &core.RpcError{}
|
||||
|
||||
@@ -9,10 +9,8 @@ import (
|
||||
type writeOnlyGloasFields interface {
|
||||
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
|
||||
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
|
||||
ClearBuilderPendingPayment(index primitives.Slot) error
|
||||
RotateBuilderPendingPayments() error
|
||||
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
|
||||
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
|
||||
}
|
||||
|
||||
type readOnlyGloasFields interface {
|
||||
|
||||
@@ -27,7 +27,7 @@ func (b *BeaconState) RotateBuilderPendingPayments() error {
|
||||
copy(b.builderPendingPayments[:slotsPerEpoch], b.builderPendingPayments[slotsPerEpoch:2*slotsPerEpoch])
|
||||
|
||||
for i := slotsPerEpoch; i < primitives.Slot(len(b.builderPendingPayments)); i++ {
|
||||
b.builderPendingPayments[i] = emptyBuilderPendingPayment
|
||||
b.builderPendingPayments[i] = emptyPayment()
|
||||
}
|
||||
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
@@ -35,13 +35,6 @@ func (b *BeaconState) RotateBuilderPendingPayments() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// emptyBuilderPendingPayment is a shared zero-value payment used to clear entries.
|
||||
var emptyBuilderPendingPayment = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
|
||||
// AppendBuilderPendingWithdrawals appends builder pending withdrawals to the beacon state.
|
||||
// If the withdrawals slice is shared, it copies the slice first to preserve references.
|
||||
func (b *BeaconState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
|
||||
@@ -69,6 +62,17 @@ func (b *BeaconState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.Build
|
||||
return nil
|
||||
}
|
||||
|
||||
func emptyPayment() *ethpb.BuilderPendingPayment {
|
||||
return ðpb.BuilderPendingPayment{
|
||||
Weight: 0,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: 0,
|
||||
BuilderIndex: 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SetExecutionPayloadBid sets the latest execution payload bid in the state.
|
||||
func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error {
|
||||
if b.version < version.Gloas {
|
||||
@@ -102,25 +106,6 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearBuilderPendingPayment clears a builder pending payment at the specified index.
|
||||
func (b *BeaconState) ClearBuilderPendingPayment(index primitives.Slot) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("ClearBuilderPendingPayment", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if uint64(index) >= uint64(len(b.builderPendingPayments)) {
|
||||
return fmt.Errorf("builder pending payments index %d out of range (len=%d)", index, len(b.builderPendingPayments))
|
||||
}
|
||||
|
||||
b.builderPendingPayments[index] = emptyBuilderPendingPayment
|
||||
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
|
||||
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
|
||||
if b.version < version.Gloas {
|
||||
@@ -139,25 +124,3 @@ func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *e
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateExecutionPayloadAvailabilityAtIndex updates the execution payload availability bit at a specific index.
|
||||
func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
byteIndex := idx / 8
|
||||
bitIndex := idx % 8
|
||||
|
||||
if byteIndex >= uint64(len(b.executionPayloadAvailability)) {
|
||||
return fmt.Errorf("bit index %d (byte index %d) out of range for execution payload availability length %d", idx, byteIndex, len(b.executionPayloadAvailability))
|
||||
}
|
||||
|
||||
if val != 0 {
|
||||
b.executionPayloadAvailability[byteIndex] |= (1 << bitIndex)
|
||||
} else {
|
||||
b.executionPayloadAvailability[byteIndex] &^= (1 << bitIndex)
|
||||
}
|
||||
|
||||
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -141,46 +141,6 @@ func TestSetBuilderPendingPayment(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestClearBuilderPendingPayment(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.ClearBuilderPendingPayment(0)
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("clears and marks dirty", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 2),
|
||||
}
|
||||
st.builderPendingPayments[1] = ðpb.BuilderPendingPayment{
|
||||
Weight: 2,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
Amount: 99,
|
||||
BuilderIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.ClearBuilderPendingPayment(1))
|
||||
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[1])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
||||
})
|
||||
|
||||
t.Run("returns error on out of range index", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 1),
|
||||
}
|
||||
|
||||
err := st.ClearBuilderPendingPayment(2)
|
||||
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
|
||||
})
|
||||
}
|
||||
|
||||
func TestRotateBuilderPendingPayments(t *testing.T) {
|
||||
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
|
||||
@@ -287,44 +247,3 @@ func TestAppendBuilderPendingWithdrawals_UnsupportedVersion(t *testing.T) {
|
||||
err := st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{{}})
|
||||
require.ErrorContains(t, "AppendBuilderPendingWithdrawals", err)
|
||||
}
|
||||
|
||||
func TestUpdateExecutionPayloadAvailabilityAtIndex_SetAndClear(t *testing.T) {
|
||||
st := newGloasStateWithAvailability(t, make([]byte, 1024))
|
||||
|
||||
otherIdx := uint64(8) // byte 1, bit 0
|
||||
idx := uint64(9) // byte 1, bit 1
|
||||
|
||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(otherIdx, 1))
|
||||
require.Equal(t, byte(0x01), st.executionPayloadAvailability[1])
|
||||
|
||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 1))
|
||||
require.Equal(t, byte(0x03), st.executionPayloadAvailability[1])
|
||||
|
||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 0))
|
||||
require.Equal(t, byte(0x01), st.executionPayloadAvailability[1])
|
||||
}
|
||||
|
||||
func TestUpdateExecutionPayloadAvailabilityAtIndex_OutOfRange(t *testing.T) {
|
||||
st := newGloasStateWithAvailability(t, make([]byte, 1024))
|
||||
|
||||
idx := uint64(len(st.executionPayloadAvailability)) * 8
|
||||
err := st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 1)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
|
||||
for _, b := range st.executionPayloadAvailability {
|
||||
if b != 0 {
|
||||
t.Fatalf("execution payload availability mutated on error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newGloasStateWithAvailability(t *testing.T, availability []byte) *BeaconState {
|
||||
t.Helper()
|
||||
|
||||
st, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
ExecutionPayloadAvailability: availability,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return st.(*BeaconState)
|
||||
}
|
||||
|
||||
@@ -137,7 +137,7 @@ func (s *State) migrateToColdHdiff(ctx context.Context, fRoot [32]byte) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
_, lvl, err := s.beaconDB.SlotInDiffTree(slot)
|
||||
offset, lvl, err := s.beaconDB.SlotInDiffTree(slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("could not determine if slot %d is in diff tree", slot)
|
||||
continue
|
||||
@@ -145,6 +145,9 @@ func (s *State) migrateToColdHdiff(ctx context.Context, fRoot [32]byte) error {
|
||||
if lvl == -1 {
|
||||
continue
|
||||
}
|
||||
if uint64(slot) == offset {
|
||||
continue
|
||||
}
|
||||
// The state needs to be saved.
|
||||
// Try the epoch boundary cache first.
|
||||
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Add `NewBeaconStateGloas()`.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Flag `--log.vmodule` to set per-package verbosity levels for logging.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Fixed
|
||||
- Fixed a typo: AggregrateDueBPS -> AggregateDueBPS.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- optmizing /eth/v1/beacon/blocks/{block_id}/root endpoint by reusing blocker lookup instead of duplicated logic.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- delayed head evaluator check to mid epoch for e2e.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- updating phase 0 constants for ethspecify
|
||||
@@ -1,2 +0,0 @@
|
||||
### Removed
|
||||
- `--disable-get-blobs-v2` flag from help.
|
||||
3
changelog/potuz_hdiff_start_db.md
Normal file
3
changelog/potuz_hdiff_start_db.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Initialize db with state-diff feature flag.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Close opened file in data_column.go
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Added missing beacon config in fulu so that the presets don't go missing in /eth/v1/config/spec beacon api.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Implement modified proposer slashing for gloas
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Add slot processing with execution payload availability updates
|
||||
@@ -42,7 +42,6 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
|
||||
@@ -364,8 +364,7 @@ var (
|
||||
}
|
||||
// DisableGetBlobsV2 disables the engine_getBlobsV2 usage.
|
||||
DisableGetBlobsV2 = &cli.BoolFlag{
|
||||
Name: "disable-get-blobs-v2",
|
||||
Usage: "Disables the engine_getBlobsV2 usage.",
|
||||
Hidden: true,
|
||||
Name: "disable-get-blobs-v2",
|
||||
Usage: "Disables the engine_getBlobsV2 usage.",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
runtimeDebug "runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/node"
|
||||
@@ -114,7 +113,6 @@ var appFlags = []cli.Flag{
|
||||
cmd.PubsubQueueSize,
|
||||
cmd.DataDirFlag,
|
||||
cmd.VerbosityFlag,
|
||||
cmd.LogVModuleFlag,
|
||||
cmd.EnableTracingFlag,
|
||||
cmd.TracingProcessNameFlag,
|
||||
cmd.TracingEndpointFlag,
|
||||
@@ -160,6 +158,7 @@ var appFlags = []cli.Flag{
|
||||
dasFlags.BackfillOldestSlot,
|
||||
dasFlags.BlobRetentionEpochFlag,
|
||||
flags.BatchVerifierLimit,
|
||||
flags.StateDiffExponents,
|
||||
flags.DisableEphemeralLogFile,
|
||||
}
|
||||
|
||||
@@ -173,22 +172,13 @@ func before(ctx *cli.Context) error {
|
||||
return errors.Wrap(err, "failed to load flags from config file")
|
||||
}
|
||||
|
||||
// determine default log verbosity
|
||||
// determine log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
|
||||
// determine per package verbosity. if not set, maxLevel will be 0.
|
||||
vmoduleInput := strings.Join(ctx.StringSlice(cmd.LogVModuleFlag.Name), ",")
|
||||
vmodule, maxLevel, err := cmd.ParseVModule(vmoduleInput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
switch format {
|
||||
@@ -202,13 +192,11 @@ func before(ctx *cli.Context) error {
|
||||
formatter.FullTimestamp = true
|
||||
formatter.ForceFormatting = true
|
||||
formatter.ForceColors = true
|
||||
formatter.VModule = vmodule
|
||||
formatter.BaseVerbosity = verbosityLevel
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -232,7 +220,7 @@ func before(ctx *cli.Context) error {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel, vmodule); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,6 +74,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.RPCHost,
|
||||
flags.RPCPort,
|
||||
flags.BatchVerifierLimit,
|
||||
flags.StateDiffExponents,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -169,6 +170,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.ExecutionJWTSecretFlag,
|
||||
flags.JwtId,
|
||||
flags.InteropMockEth1DataVotesFlag,
|
||||
flags.DisableGetBlobsV2,
|
||||
},
|
||||
},
|
||||
{ // Flags relevant to configuring beacon chain monitoring.
|
||||
@@ -200,7 +202,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.LogFileName,
|
||||
cmd.VerbosityFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
cmd.LogVModuleFlag,
|
||||
},
|
||||
},
|
||||
{ // Feature flags.
|
||||
|
||||
@@ -86,7 +86,7 @@ func main() {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level, map[string]logrus.Level{}); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,11 +36,6 @@ var (
|
||||
Usage: "Logging verbosity. (trace, debug, info, warn, error, fatal, panic)",
|
||||
Value: "info",
|
||||
}
|
||||
// LogVModuleFlag defines per-package log levels.
|
||||
LogVModuleFlag = &cli.StringSliceFlag{
|
||||
Name: "log.vmodule",
|
||||
Usage: "Per-package log verbosity. packagePath=level entries separated by commas.",
|
||||
}
|
||||
// DataDirFlag defines a path on disk where Prysm databases are stored.
|
||||
DataDirFlag = &cli.StringFlag{
|
||||
Name: "datadir",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -238,41 +237,3 @@ func TestValidateNoArgs_SubcommandFlags(t *testing.T) {
|
||||
err = app.Run([]string{"command", "bar", "subComm2", "--barfoo100", "garbage", "subComm4"})
|
||||
require.ErrorContains(t, "unrecognized argument: garbage", err)
|
||||
}
|
||||
|
||||
func TestParseVModule(t *testing.T) {
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
input := "beacon-chain/p2p=error, beacon-chain/light-client=trace"
|
||||
parsed, maxL, err := ParseVModule(input)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, logrus.ErrorLevel, parsed["beacon-chain/p2p"])
|
||||
require.Equal(t, logrus.TraceLevel, parsed["beacon-chain/light-client"])
|
||||
require.Equal(t, logrus.TraceLevel, maxL)
|
||||
})
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
parsed, maxL, err := ParseVModule(" ")
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, parsed)
|
||||
require.Equal(t, logrus.PanicLevel, maxL)
|
||||
})
|
||||
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
tests := []string{
|
||||
"beacon-chain/p2p",
|
||||
"beacon-chain/p2p=",
|
||||
"beacon-chain/p2p/=error",
|
||||
"=info",
|
||||
"beacon-chain/*=info",
|
||||
"beacon-chain/p2p=meow",
|
||||
"/beacon-chain/p2p=info",
|
||||
"beacon-chain/../p2p=info",
|
||||
"beacon-chain/p2p=info,",
|
||||
"beacon-chain/p2p=info,beacon-chain/p2p=debug",
|
||||
}
|
||||
for _, input := range tests {
|
||||
_, maxL, err := ParseVModule(input)
|
||||
require.ErrorContains(t, "invalid", err)
|
||||
require.Equal(t, logrus.PanicLevel, maxL)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
@@ -103,63 +102,3 @@ func ExpandSingleEndpointIfFile(ctx *cli.Context, flag *cli.StringFlag) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseVModule parses a comma-separated list of package=level entries.
|
||||
func ParseVModule(input string) (map[string]logrus.Level, logrus.Level, error) {
|
||||
var l logrus.Level
|
||||
trimmed := strings.TrimSpace(input)
|
||||
if trimmed == "" {
|
||||
return nil, l, nil
|
||||
}
|
||||
|
||||
parts := strings.Split(trimmed, ",")
|
||||
result := make(map[string]logrus.Level, len(parts))
|
||||
for _, raw := range parts {
|
||||
entry := strings.TrimSpace(raw)
|
||||
if entry == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry: empty segment")
|
||||
}
|
||||
kv := strings.Split(entry, "=")
|
||||
if len(kv) != 2 {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: expected path=level", entry)
|
||||
}
|
||||
pkg := strings.TrimSpace(kv[0])
|
||||
levelText := strings.TrimSpace(kv[1])
|
||||
if pkg == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: empty package path", entry)
|
||||
}
|
||||
if levelText == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: empty level", entry)
|
||||
}
|
||||
if strings.Contains(pkg, "*") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: wildcards are not allowed", pkg)
|
||||
}
|
||||
if strings.ContainsAny(pkg, " \t\n") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: whitespace is not allowed", pkg)
|
||||
}
|
||||
if strings.HasPrefix(pkg, "/") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: leading slash is not allowed", pkg)
|
||||
}
|
||||
cleaned := path.Clean(pkg)
|
||||
if cleaned != pkg || pkg == "." || pkg == ".." {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: must be an absolute package path. (trailing slash not allowed)", pkg)
|
||||
}
|
||||
if _, exists := result[pkg]; exists {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: duplicate entry", pkg)
|
||||
}
|
||||
level, err := logrus.ParseLevel(levelText)
|
||||
if err != nil {
|
||||
return nil, l, fmt.Errorf("invalid vmodule level %q: must be one of panic, fatal, error, warn, info, debug, trace", levelText)
|
||||
}
|
||||
result[pkg] = level
|
||||
}
|
||||
|
||||
maxLevel := logrus.PanicLevel
|
||||
for _, lvl := range result {
|
||||
if lvl > maxLevel {
|
||||
maxLevel = lvl
|
||||
}
|
||||
}
|
||||
|
||||
return result, maxLevel, nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
runtimeDebug "runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/cmd"
|
||||
accountcommands "github.com/OffchainLabs/prysm/v7/cmd/validator/accounts"
|
||||
@@ -96,7 +95,6 @@ var appFlags = []cli.Flag{
|
||||
cmd.MinimalConfigFlag,
|
||||
cmd.E2EConfigFlag,
|
||||
cmd.VerbosityFlag,
|
||||
cmd.LogVModuleFlag,
|
||||
cmd.DataDirFlag,
|
||||
cmd.ClearDB,
|
||||
cmd.ForceClearDB,
|
||||
@@ -152,22 +150,13 @@ func main() {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine default log verbosity
|
||||
// determine log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
|
||||
// determine per package verbosity. if not set, maxLevel will be 0.
|
||||
vmoduleInput := strings.Join(ctx.StringSlice(cmd.LogVModuleFlag.Name), ",")
|
||||
vmodule, maxLevel, err := cmd.ParseVModule(vmoduleInput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
|
||||
@@ -183,13 +172,11 @@ func main() {
|
||||
formatter.FullTimestamp = true
|
||||
formatter.ForceFormatting = true
|
||||
formatter.ForceColors = true
|
||||
formatter.VModule = vmodule
|
||||
formatter.BaseVerbosity = verbosityLevel
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -210,7 +197,7 @@ func main() {
|
||||
}
|
||||
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel, vmodule); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +76,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.AcceptTosFlag,
|
||||
cmd.ApiTimeoutFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
cmd.LogVModuleFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -280,6 +280,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
DisableQUIC,
|
||||
EnableDiscoveryReboot,
|
||||
enableExperimentalAttestationPool,
|
||||
EnableStateDiff,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
enableHashtree,
|
||||
|
||||
@@ -88,7 +88,7 @@ type BeaconChainConfig struct {
|
||||
IntervalsPerSlot uint64 `yaml:"INTERVALS_PER_SLOT"` // IntervalsPerSlot defines the number of fork choice intervals in a slot defined in the fork choice spec.
|
||||
ProposerReorgCutoffBPS primitives.BP `yaml:"PROPOSER_REORG_CUTOFF_BPS" spec:"true"` // ProposerReorgCutoffBPS defines the proposer reorg deadline in basis points of the slot.
|
||||
AttestationDueBPS primitives.BP `yaml:"ATTESTATION_DUE_BPS" spec:"true"` // AttestationDueBPS defines the attestation due time in basis points of the slot.
|
||||
AggregateDueBPS primitives.BP `yaml:"AGGREGATE_DUE_BPS" spec:"true"` // AggregateDueBPS defines the aggregate due time in basis points of the slot.
|
||||
AggregrateDueBPS primitives.BP `yaml:"AGGREGRATE_DUE_BPS" spec:"true"` // AggregrateDueBPS defines the aggregate due time in basis points of the slot.
|
||||
SyncMessageDueBPS primitives.BP `yaml:"SYNC_MESSAGE_DUE_BPS" spec:"true"` // SyncMessageDueBPS defines the sync message due time in basis points of the slot.
|
||||
ContributionDueBPS primitives.BP `yaml:"CONTRIBUTION_DUE_BPS" spec:"true"` // ContributionDueBPS defines the contribution due time in basis points of the slot.
|
||||
|
||||
|
||||
@@ -243,7 +243,7 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
|
||||
fmt.Sprintf("MAX_BLOBS_PER_BLOCK: %d", cfg.DeprecatedMaxBlobsPerBlock),
|
||||
fmt.Sprintf("PROPOSER_REORG_CUTOFF_BPS: %d", cfg.ProposerReorgCutoffBPS),
|
||||
fmt.Sprintf("ATTESTATION_DUE_BPS: %d", cfg.AttestationDueBPS),
|
||||
fmt.Sprintf("AGGREGATE_DUE_BPS: %d", cfg.AggregateDueBPS),
|
||||
fmt.Sprintf("AGGREGRATE_DUE_BPS: %d", cfg.AggregrateDueBPS),
|
||||
fmt.Sprintf("SYNC_MESSAGE_DUE_BPS: %d", cfg.SyncMessageDueBPS),
|
||||
fmt.Sprintf("CONTRIBUTION_DUE_BPS: %d", cfg.ContributionDueBPS),
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
// These are variables that we don't use in Prysm. (i.e. future hardfork, light client... etc)
|
||||
// IMPORTANT: Use one field per line and sort these alphabetically to reduce conflicts.
|
||||
var placeholderFields = []string{
|
||||
"AGGREGATE_DUE_BPS",
|
||||
"AGGREGATE_DUE_BPS_GLOAS",
|
||||
"ATTESTATION_DEADLINE",
|
||||
"ATTESTATION_DUE_BPS_GLOAS",
|
||||
@@ -98,7 +99,7 @@ func assertEqualConfigs(t *testing.T, name string, fields []string, expected, ac
|
||||
assert.Equal(t, expected.HysteresisDownwardMultiplier, actual.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
|
||||
assert.Equal(t, expected.HysteresisUpwardMultiplier, actual.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
|
||||
assert.Equal(t, expected.AttestationDueBPS, actual.AttestationDueBPS, "%s: AttestationDueBPS", name)
|
||||
assert.Equal(t, expected.AggregateDueBPS, actual.AggregateDueBPS, "%s: AggregateDueBPS", name)
|
||||
assert.Equal(t, expected.AggregrateDueBPS, actual.AggregrateDueBPS, "%s: AggregrateDueBPS", name)
|
||||
assert.Equal(t, expected.ContributionDueBPS, actual.ContributionDueBPS, "%s: ContributionDueBPS", name)
|
||||
assert.Equal(t, expected.ProposerReorgCutoffBPS, actual.ProposerReorgCutoffBPS, "%s: ProposerReorgCutoffBPS", name)
|
||||
assert.Equal(t, expected.SyncMessageDueBPS, actual.SyncMessageDueBPS, "%s: SyncMessageDueBPS", name)
|
||||
|
||||
@@ -123,7 +123,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
// Time-based protocol parameters.
|
||||
ProposerReorgCutoffBPS: primitives.BP(1667),
|
||||
AttestationDueBPS: primitives.BP(3333),
|
||||
AggregateDueBPS: primitives.BP(6667),
|
||||
AggregrateDueBPS: primitives.BP(6667),
|
||||
SyncMessageDueBPS: primitives.BP(3333),
|
||||
ContributionDueBPS: primitives.BP(6667),
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ func addLogWriter(w io.Writer) {
|
||||
}
|
||||
|
||||
// ConfigurePersistentLogging adds a log-to-file writer. File content is identical to stdout.
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level, vmodule map[string]logrus.Level) error {
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level) error {
|
||||
logrus.WithField("logFileName", logFileName).Info("Logs will be made persistent")
|
||||
if err := file.MkdirAll(filepath.Dir(logFileName)); err != nil {
|
||||
return err
|
||||
@@ -47,13 +47,6 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
return nil
|
||||
}
|
||||
|
||||
maxVmoduleLevel := logrus.PanicLevel
|
||||
for _, level := range vmodule {
|
||||
if level > maxVmoduleLevel {
|
||||
maxVmoduleLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
// Create formatter and writer hook
|
||||
formatter := new(prefixed.TextFormatter)
|
||||
formatter.TimestampFormat = "2006-01-02 15:04:05.00"
|
||||
@@ -61,13 +54,11 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
// If persistent log files are written - we disable the log messages coloring because
|
||||
// the colors are ANSI codes and seen as gibberish in the log files.
|
||||
formatter.DisableColors = true
|
||||
formatter.BaseVerbosity = lvl
|
||||
formatter.VModule = vmodule
|
||||
|
||||
logrus.AddHook(&WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: f,
|
||||
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
|
||||
AllowedLevels: logrus.AllLevels[:lvl+1],
|
||||
})
|
||||
|
||||
logrus.Info("File logging initialized")
|
||||
|
||||
@@ -28,20 +28,20 @@ func TestMaskCredentialsLogging(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurePersistentLogging(t *testing.T) {
|
||||
func TestConfigurePersistantLogging(t *testing.T) {
|
||||
testParentDir := t.TempDir()
|
||||
|
||||
// 1. Test creation of file in an existing parent directory
|
||||
logFileName := "test.log"
|
||||
existingDirectory := "test-1-existing-testing-dir"
|
||||
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 2. Test creation of file along with parent directory
|
||||
nonExistingDirectory := "test-2-non-existing-testing-dir"
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
|
||||
@@ -52,7 +52,7 @@ func TestConfigurePersistentLogging(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
require.NoError(t, err)
|
||||
|
||||
//4. Create log file in a directory without 700 permissions
|
||||
|
||||
@@ -120,13 +120,6 @@ type TextFormatter struct {
|
||||
|
||||
// Timestamp format to use for display when a full timestamp is printed.
|
||||
TimestampFormat string
|
||||
|
||||
// VModule overrides the allowed log level for exact package paths.
|
||||
// When using VModule, you should also set BaseVerbosity to the default verbosity level provided by the user.
|
||||
VModule map[string]logrus.Level
|
||||
|
||||
// BaseVerbosity is the default verbosity level for all packages.
|
||||
BaseVerbosity logrus.Level
|
||||
}
|
||||
|
||||
func getCompiledColor(main string, fallback string) func(string) string {
|
||||
@@ -175,11 +168,6 @@ func (f *TextFormatter) SetColorScheme(colorScheme *ColorScheme) {
|
||||
}
|
||||
|
||||
func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
// check for vmodule compatibility
|
||||
if !f.shouldLog(entry) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
var b *bytes.Buffer
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
@@ -248,39 +236,6 @@ func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) shouldLog(entry *logrus.Entry) bool {
|
||||
if len(f.VModule) == 0 {
|
||||
return true
|
||||
}
|
||||
packagePath, ok := entry.Data["package"]
|
||||
if !ok {
|
||||
return entry.Level <= f.BaseVerbosity
|
||||
}
|
||||
packageName, ok := packagePath.(string)
|
||||
if !ok {
|
||||
return entry.Level <= f.BaseVerbosity
|
||||
}
|
||||
|
||||
packageLevel := f.bestMatchLevel(packageName)
|
||||
|
||||
return entry.Level <= packageLevel
|
||||
}
|
||||
|
||||
// bestMatchLevel returns the level of the most specific path that matches package name.
|
||||
func (f *TextFormatter) bestMatchLevel(pkg string) logrus.Level {
|
||||
bestLen := 0
|
||||
bestLevel := f.BaseVerbosity
|
||||
for k, v := range f.VModule {
|
||||
if k == pkg || strings.HasPrefix(pkg, k+"/") {
|
||||
if len(k) > bestLen {
|
||||
bestLen = len(k)
|
||||
bestLevel = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return bestLevel
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys []string, timestampFormat string, colorScheme *compiledColorScheme) (err error) {
|
||||
var levelColor func(string) string
|
||||
var levelText string
|
||||
|
||||
@@ -13,6 +13,10 @@ specrefs:
|
||||
|
||||
exceptions:
|
||||
presets:
|
||||
# Not implemented
|
||||
- CELLS_PER_EXT_BLOB#fulu
|
||||
- UPDATE_TIMEOUT#altair
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
@@ -61,6 +65,14 @@ exceptions:
|
||||
- PTC_TIMELINESS_INDEX#gloas
|
||||
|
||||
configs:
|
||||
# Not implemented (placeholders)
|
||||
- AGGREGATE_DUE_BPS#phase0
|
||||
- ATTESTATION_DUE_BPS#phase0
|
||||
- CONTRIBUTION_DUE_BPS#altair
|
||||
- PROPOSER_REORG_CUTOFF_BPS#phase0
|
||||
- SLOT_DURATION_MS#phase0
|
||||
- SYNC_MESSAGE_DUE_BPS#altair
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- AGGREGATE_DUE_BPS_GLOAS#gloas
|
||||
- ATTESTATION_DUE_BPS_GLOAS#gloas
|
||||
@@ -411,8 +423,10 @@ exceptions:
|
||||
- update_proposer_boost_root#phase0
|
||||
|
||||
presets:
|
||||
- CELLS_PER_EXT_BLOB#fulu
|
||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||
- BUILDER_REGISTRY_LIMIT#gloas
|
||||
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
- PTC_SIZE#gloas
|
||||
- UPDATE_TIMEOUT#altair
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
- name: AGGREGATE_DUE_BPS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: AggregateDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="AGGREGATE_DUE_BPS" fork="phase0" hash="7eaa811a">
|
||||
AGGREGATE_DUE_BPS: uint64 = 6667
|
||||
@@ -29,10 +26,7 @@
|
||||
</spec>
|
||||
|
||||
- name: ATTESTATION_DUE_BPS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: AttestationDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="ATTESTATION_DUE_BPS" fork="phase0" hash="929dd1c9">
|
||||
ATTESTATION_DUE_BPS: uint64 = 3333
|
||||
@@ -178,10 +172,7 @@
|
||||
</spec>
|
||||
|
||||
- name: CONTRIBUTION_DUE_BPS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: ContributionDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="CONTRIBUTION_DUE_BPS" fork="altair" hash="a3808203">
|
||||
CONTRIBUTION_DUE_BPS: uint64 = 6667
|
||||
@@ -558,10 +549,7 @@
|
||||
</spec>
|
||||
|
||||
- name: PROPOSER_REORG_CUTOFF_BPS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: ProposerReorgCutoffBPS\s+primitives.BP
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="PROPOSER_REORG_CUTOFF_BPS" fork="phase0" hash="a487cc43">
|
||||
PROPOSER_REORG_CUTOFF_BPS: uint64 = 1667
|
||||
@@ -648,10 +636,7 @@
|
||||
</spec>
|
||||
|
||||
- name: SLOT_DURATION_MS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: SlotDurationMilliseconds\s+uint64
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="SLOT_DURATION_MS" fork="phase0" hash="b6d4ba6d">
|
||||
SLOT_DURATION_MS: uint64 = 12000
|
||||
@@ -668,10 +653,7 @@
|
||||
</spec>
|
||||
|
||||
- name: SYNC_MESSAGE_DUE_BPS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: SyncMessageDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="SYNC_MESSAGE_DUE_BPS" fork="altair" hash="791b29d8">
|
||||
SYNC_MESSAGE_DUE_BPS: uint64 = 3333
|
||||
|
||||
@@ -19,10 +19,7 @@
|
||||
</spec>
|
||||
|
||||
- name: CELLS_PER_EXT_BLOB
|
||||
sources:
|
||||
- file: beacon-chain/rpc/eth/config/handlers.go
|
||||
search: data\["CELLS_PER_EXT_BLOB"\]
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec preset_var="CELLS_PER_EXT_BLOB" fork="fulu" hash="217075f3">
|
||||
CELLS_PER_EXT_BLOB = 128
|
||||
@@ -688,16 +685,6 @@
|
||||
TARGET_COMMITTEE_SIZE: uint64 = 128
|
||||
</spec>
|
||||
|
||||
- name: UPDATE_TIMEOUT
|
||||
sources:
|
||||
- file: beacon-chain/rpc/eth/config/handlers.go
|
||||
search: data\["UPDATE_TIMEOUT"\]
|
||||
regex: true
|
||||
spec: |
|
||||
<spec preset_var="UPDATE_TIMEOUT" fork="altair" hash="e633d57e">
|
||||
UPDATE_TIMEOUT = 8192
|
||||
</spec>
|
||||
|
||||
- name: VALIDATOR_REGISTRY_LIMIT
|
||||
sources:
|
||||
- file: config/fieldparams/mainnet.go
|
||||
|
||||
@@ -97,6 +97,7 @@ go_test(
|
||||
"endtoend_setup_test.go",
|
||||
"endtoend_test.go",
|
||||
"minimal_e2e_test.go",
|
||||
"minimal_hdiff_e2e_test.go",
|
||||
"minimal_slashing_e2e_test.go",
|
||||
"slasher_simulator_e2e_test.go",
|
||||
],
|
||||
|
||||
@@ -296,7 +296,7 @@ func (r *testRunner) waitForMatchingHead(ctx context.Context, timeout time.Durat
|
||||
}
|
||||
|
||||
func (r *testRunner) testCheckpointSync(ctx context.Context, g *errgroup.Group, i int, conns []*grpc.ClientConn, bnAPI, enr, minerEnr string) error {
|
||||
matchTimeout := 5 * time.Minute
|
||||
matchTimeout := 3 * time.Minute
|
||||
ethNode := eth1.NewNode(i, minerEnr)
|
||||
g.Go(func() error {
|
||||
return ethNode.Start(ctx)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
e2e "github.com/OffchainLabs/prysm/v7/testing/endtoend/params"
|
||||
@@ -129,42 +128,8 @@ func finishedSyncing(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForMidEpoch waits until we're at least halfway into the current epoch
|
||||
// and 3/4 into the current slot. This prevents race conditions at epoch
|
||||
// boundaries and slot boundaries where different nodes may report different heads.
|
||||
func waitForMidEpoch(conn *grpc.ClientConn) error {
|
||||
beaconClient := eth.NewBeaconChainClient(conn)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
midEpochSlot := slotsPerEpoch / 2
|
||||
|
||||
for {
|
||||
chainHead, err := beaconClient.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
slotInEpoch := chainHead.HeadSlot % slotsPerEpoch
|
||||
// If we're at least halfway into the epoch, we're safe
|
||||
if slotInEpoch >= midEpochSlot {
|
||||
// Wait 3/4 into the slot to ensure block propagation
|
||||
time.Sleep(time.Duration(secondsPerSlot) * time.Second * 3 / 4)
|
||||
return nil
|
||||
}
|
||||
// Wait for the remaining slots until mid-epoch
|
||||
slotsToWait := midEpochSlot - slotInEpoch
|
||||
time.Sleep(time.Duration(slotsToWait) * time.Duration(secondsPerSlot) * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
// Wait until we're at least halfway into the epoch to avoid race conditions
|
||||
// at epoch boundaries where nodes may report different epochs.
|
||||
if err := waitForMidEpoch(conns[0]); err != nil {
|
||||
return errors.Wrap(err, "failed waiting for mid-epoch")
|
||||
}
|
||||
|
||||
headEpochs := make([]primitives.Epoch, len(conns))
|
||||
headBlockRoots := make([][]byte, len(conns))
|
||||
justifiedRoots := make([][]byte, len(conns))
|
||||
prevJustifiedRoots := make([][]byte, len(conns))
|
||||
finalizedRoots := make([][]byte, len(conns))
|
||||
@@ -181,7 +146,6 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
|
||||
return errors.Wrapf(err, "connection number=%d", conIdx)
|
||||
}
|
||||
headEpochs[conIdx] = chainHead.HeadEpoch
|
||||
headBlockRoots[conIdx] = chainHead.HeadBlockRoot
|
||||
justifiedRoots[conIdx] = chainHead.JustifiedBlockRoot
|
||||
prevJustifiedRoots[conIdx] = chainHead.PreviousJustifiedBlockRoot
|
||||
finalizedRoots[conIdx] = chainHead.FinalizedBlockRoot
|
||||
@@ -202,14 +166,6 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
|
||||
headEpochs[i],
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(headBlockRoots[0], headBlockRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting head block roots on node %d, expected %#x, received %#x",
|
||||
i,
|
||||
headBlockRoots[0],
|
||||
headBlockRoots[i],
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(justifiedRoots[0], justifiedRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting justified block roots on node %d, expected %#x, received %#x: %s and %s",
|
||||
|
||||
16
testing/endtoend/minimal_hdiff_e2e_test.go
Normal file
16
testing/endtoend/minimal_hdiff_e2e_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package endtoend
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/endtoend/types"
|
||||
)
|
||||
|
||||
func TestEndToEnd_MinimalConfig_WithStateDiff(t *testing.T) {
|
||||
r := e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()),
|
||||
types.WithStateDiff(),
|
||||
)
|
||||
r.run()
|
||||
}
|
||||
@@ -76,6 +76,15 @@ func WithSSZOnly() E2EConfigOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithStateDiff() E2EConfigOpt {
|
||||
return func(cfg *E2EConfig) {
|
||||
cfg.BeaconFlags = append(cfg.BeaconFlags,
|
||||
"--enable-state-diff",
|
||||
"--state-diff-exponents=6,5", // Small exponents for quick testing
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// WithExitEpoch sets a custom epoch for voluntary exit submission.
|
||||
// This affects ProposeVoluntaryExit, ValidatorsHaveExited, SubmitWithdrawal, and ValidatorsHaveWithdrawn evaluators.
|
||||
func WithExitEpoch(e primitives.Epoch) E2EConfigOpt {
|
||||
|
||||
@@ -202,8 +202,6 @@ go_test(
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_header_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
"phase0__epoch_processing__effective_balance_updates_test.go",
|
||||
"phase0__epoch_processing__epoch_processing_test.go",
|
||||
@@ -284,7 +282,6 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/gloas/operations:go_default_library",
|
||||
"//testing/spectest/shared/gloas/sanity:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_Operations_ProposerSlashing(t *testing.T) {
|
||||
operations.RunProposerSlashingTest(t, "mainnet")
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/sanity"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_Sanity_Slots(t *testing.T) {
|
||||
sanity.RunSlotProcessingTests(t, "mainnet")
|
||||
}
|
||||
@@ -208,8 +208,6 @@ go_test(
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_bid_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
"phase0__epoch_processing__effective_balance_updates_test.go",
|
||||
"phase0__epoch_processing__epoch_processing_test.go",
|
||||
@@ -294,7 +292,6 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/gloas/operations:go_default_library",
|
||||
"//testing/spectest/shared/gloas/sanity:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_Operations_ProposerSlashing(t *testing.T) {
|
||||
operations.RunProposerSlashingTest(t, "minimal")
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/sanity"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_Sanity_Slots(t *testing.T) {
|
||||
sanity.RunSlotProcessingTests(t, "minimal")
|
||||
}
|
||||
@@ -6,7 +6,6 @@ go_library(
|
||||
srcs = [
|
||||
"execution_payload_bid.go",
|
||||
"helpers.go",
|
||||
"proposer_slashing.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -3,8 +3,6 @@ package operations
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -15,19 +13,3 @@ func sszToState(b []byte) (state.BeaconState, error) {
|
||||
}
|
||||
return state_native.InitializeFromProtoGloas(base)
|
||||
}
|
||||
|
||||
func sszToBlock(b []byte) (interfaces.SignedBeaconBlock, error) {
|
||||
base := ðpb.BeaconBlockGloas{}
|
||||
if err := base.UnmarshalSSZ(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockGloas{Block: base})
|
||||
}
|
||||
|
||||
func sszToBlockBody(b []byte) (interfaces.ReadOnlyBeaconBlockBody, error) {
|
||||
base := ðpb.BeaconBlockBodyGloas{}
|
||||
if err := base.UnmarshalSSZ(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.NewBeaconBlockBody(base)
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
common "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/common/operations"
|
||||
)
|
||||
|
||||
func blockWithProposerSlashing(ssz []byte) (interfaces.SignedBeaconBlock, error) {
|
||||
ps := ðpb.ProposerSlashing{}
|
||||
if err := ps.UnmarshalSSZ(ssz); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Body: ðpb.BeaconBlockBodyGloas{ProposerSlashings: []*ethpb.ProposerSlashing{ps}},
|
||||
},
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
}
|
||||
|
||||
func RunProposerSlashingTest(t *testing.T, config string) {
|
||||
common.RunProposerSlashingTest(t, config, version.String(version.Gloas), blockWithProposerSlashing, sszToState)
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = ["slot_processing.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/sanity",
|
||||
visibility = ["//testing/spectest:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/spectest/utils:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,61 +0,0 @@
|
||||
package sanity
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/golang/snappy"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transition.SkipSlotCache.Disable()
|
||||
}
|
||||
|
||||
// RunSlotProcessingTests executes "sanity/slots" tests.
|
||||
func RunSlotProcessingTests(t *testing.T, config string) {
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
|
||||
testFolders, testsFolderPath := utils.TestFolders(t, config, "gloas", "sanity/slots/pyspec_tests")
|
||||
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
preBeaconStateFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "pre.ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
preBeaconStateSSZ, err := snappy.Decode(nil /* dst */, preBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
base := ðpb.BeaconStateGloas{}
|
||||
require.NoError(t, base.UnmarshalSSZ(preBeaconStateSSZ), "Failed to unmarshal")
|
||||
beaconState, err := state_native.InitializeFromProtoGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
file, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "slots.yaml")
|
||||
require.NoError(t, err)
|
||||
fileStr := string(file)
|
||||
slotsCount, err := strconv.ParseUint(fileStr[:len(fileStr)-5], 10, 64)
|
||||
require.NoError(t, err)
|
||||
|
||||
postBeaconStateFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "post.ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
postBeaconStateSSZ, err := snappy.Decode(nil /* dst */, postBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
postBeaconState := ðpb.BeaconStateGloas{}
|
||||
require.NoError(t, postBeaconState.UnmarshalSSZ(postBeaconStateSSZ), "Failed to unmarshal")
|
||||
postState, err := transition.ProcessSlots(context.Background(), beaconState, beaconState.Slot().Add(slotsCount))
|
||||
require.NoError(t, err)
|
||||
|
||||
pbState, err := state_native.ProtobufBeaconStateGloas(postState.ToProto())
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(pbState, postBeaconState) {
|
||||
t.Fatal("Did not receive expected post state")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -112,7 +112,7 @@ func NewBeaconState(options ...NewBeaconStateOption) (state.BeaconState, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
return st.Copy(), nil
|
||||
}
|
||||
|
||||
// NewBeaconStateAltair creates a beacon state with minimum marshalable fields.
|
||||
@@ -167,7 +167,7 @@ func NewBeaconStateAltair(options ...func(state *ethpb.BeaconStateAltair) error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
return st.Copy(), nil
|
||||
}
|
||||
|
||||
// NewBeaconStateBellatrix creates a beacon state with minimum marshalable fields.
|
||||
@@ -234,7 +234,7 @@ func NewBeaconStateBellatrix(options ...func(state *ethpb.BeaconStateBellatrix)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
return st.Copy(), nil
|
||||
}
|
||||
|
||||
// NewBeaconStateCapella creates a beacon state with minimum marshalable fields.
|
||||
@@ -302,7 +302,7 @@ func NewBeaconStateCapella(options ...func(state *ethpb.BeaconStateCapella) erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
return st.Copy(), nil
|
||||
}
|
||||
|
||||
// NewBeaconStateDeneb creates a beacon state with minimum marshalable fields.
|
||||
@@ -370,7 +370,7 @@ func NewBeaconStateDeneb(options ...func(state *ethpb.BeaconStateDeneb) error) (
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
return st.Copy(), nil
|
||||
}
|
||||
|
||||
// NewBeaconStateElectra creates a beacon state with minimum marshalable fields.
|
||||
@@ -438,7 +438,7 @@ func NewBeaconStateElectra(options ...func(state *ethpb.BeaconStateElectra) erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
return st.Copy(), nil
|
||||
}
|
||||
|
||||
// NewBeaconStateFulu creates a beacon state with minimum marshalable fields.
|
||||
@@ -507,86 +507,7 @@ func NewBeaconStateFulu(options ...func(state *ethpb.BeaconStateFulu) error) (st
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateGloas creates a beacon state with minimum marshalable fields.
|
||||
func NewBeaconStateGloas(options ...func(state *ethpb.BeaconStateGloas) error) (state.BeaconState, error) {
|
||||
pubkeys := make([][]byte, 512)
|
||||
for i := range pubkeys {
|
||||
pubkeys[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
builderPendingPayments := make([]*ethpb.BuilderPendingPayment, 64)
|
||||
for i := range builderPendingPayments {
|
||||
builderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
seed := ðpb.BeaconStateGloas{
|
||||
BlockRoots: filledByteSlice2D(uint64(params.BeaconConfig().SlotsPerHistoricalRoot), 32),
|
||||
StateRoots: filledByteSlice2D(uint64(params.BeaconConfig().SlotsPerHistoricalRoot), 32),
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
RandaoMixes: filledByteSlice2D(uint64(params.BeaconConfig().EpochsPerHistoricalVector), 32),
|
||||
Validators: make([]*ethpb.Validator, 0),
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: make([]byte, 4),
|
||||
CurrentVersion: make([]byte, 4),
|
||||
},
|
||||
Eth1DataVotes: make([]*ethpb.Eth1Data, 0),
|
||||
HistoricalRoots: make([][]byte, 0),
|
||||
JustificationBits: bitfield.Bitvector4{0x0},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
LatestBlockHeader: HydrateBeaconHeader(ðpb.BeaconBlockHeader{}),
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
PreviousEpochParticipation: make([]byte, 0),
|
||||
CurrentEpochParticipation: make([]byte, 0),
|
||||
CurrentSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
NextSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Builders: make([]*ethpb.Builder, 0),
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
BuilderPendingPayments: builderPendingPayments,
|
||||
BuilderPendingWithdrawals: make([]*ethpb.BuilderPendingWithdrawal, 0),
|
||||
LatestBlockHash: make([]byte, 32),
|
||||
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
err := opt(seed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var st, err = state_native.InitializeFromProtoUnsafeGloas(seed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
return st.Copy(), nil
|
||||
}
|
||||
|
||||
// SSZ will fill 2D byte slices with their respective values, so we must fill these in too for round
|
||||
|
||||
@@ -68,26 +68,6 @@ func TestNewBeaconStateElectra(t *testing.T) {
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconStateFulu(t *testing.T) {
|
||||
st, err := NewBeaconStateFulu()
|
||||
require.NoError(t, err)
|
||||
b, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
got := ðpb.BeaconStateFulu{}
|
||||
require.NoError(t, got.UnmarshalSSZ(b))
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconStateGloas(t *testing.T) {
|
||||
st, err := NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
b, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
got := ðpb.BeaconStateGloas{}
|
||||
require.NoError(t, got.UnmarshalSSZ(b))
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconState_HashTreeRoot(t *testing.T) {
|
||||
st, err := NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -71,7 +71,7 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(*logFileName, "text", logrus.DebugLevel, map[string]logrus.Level{}); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(*logFileName, "text", logrus.DebugLevel); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,7 +207,7 @@ func (v *validator) signSlotWithSelectionProof(ctx context.Context, pubKey [fiel
|
||||
// such that any attestations from this slot have time to reach the beacon node
|
||||
// before creating the aggregated attestation.
|
||||
func (v *validator) waitToSlotTwoThirds(ctx context.Context, slot primitives.Slot) {
|
||||
v.waitUntilSlotComponent(ctx, slot, params.BeaconConfig().AggregateDueBPS)
|
||||
v.waitUntilSlotComponent(ctx, slot, params.BeaconConfig().AggregrateDueBPS)
|
||||
}
|
||||
|
||||
// This returns the signature of validator signing over aggregate and
|
||||
|
||||
@@ -257,7 +257,7 @@ func TestWaitForSlotTwoThird_WaitCorrectly(t *testing.T) {
|
||||
numOfSlots := primitives.Slot(4)
|
||||
slotDuration := params.BeaconConfig().SlotDuration()
|
||||
validator.genesisTime = currentTime.Add(-slotDuration * time.Duration(numOfSlots))
|
||||
timeToSleep := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AggregateDueBPS)
|
||||
timeToSleep := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AggregrateDueBPS)
|
||||
|
||||
twoThirdTime := currentTime.Add(timeToSleep)
|
||||
validator.waitToSlotTwoThirds(t.Context(), numOfSlots)
|
||||
|
||||
@@ -51,7 +51,7 @@ func (v *validator) slotComponentSpanName(component primitives.BP) string {
|
||||
switch component {
|
||||
case cfg.AttestationDueBPS:
|
||||
return "validator.waitAttestationWindow"
|
||||
case cfg.AggregateDueBPS:
|
||||
case cfg.AggregrateDueBPS:
|
||||
return "validator.waitAggregateWindow"
|
||||
case cfg.SyncMessageDueBPS:
|
||||
return "validator.waitSyncMessageWindow"
|
||||
|
||||
@@ -47,7 +47,7 @@ func TestSlotComponentSpanName(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "aggregate",
|
||||
component: cfg.AggregateDueBPS,
|
||||
component: cfg.AggregrateDueBPS,
|
||||
expected: "validator.waitAggregateWindow",
|
||||
},
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user