Compare commits

..

1 Commits

Author SHA1 Message Date
Manu NALEPA
4abfea99ce Remove KZG batch verification functionality. 2026-01-09 10:34:36 +01:00
60 changed files with 834 additions and 2337 deletions

View File

@@ -26,7 +26,6 @@ go_library(
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
@@ -213,12 +212,8 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
if err != nil {
return nil, errors.Wrap(err, "could not get next withdrawal validator index")
}
if features.Get().LowValcountSweep {
bound := min(uint64(st.NumValidators()), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
nextValidatorIndex += primitives.ValidatorIndex(bound)
} else {
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
}
bound := min(uint64(st.NumValidators()), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
nextValidatorIndex += primitives.ValidatorIndex(bound)
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
} else {
nextValidatorIndex = expectedWithdrawals[len(expectedWithdrawals)-1].ValidatorIndex + 1

View File

@@ -102,7 +102,6 @@ go_test(
"getters_test.go",
"getters_validator_test.go",
"getters_withdrawal_test.go",
"gloas_test.go",
"hasher_test.go",
"mvslice_fuzz_test.go",
"proofs_test.go",
@@ -157,7 +156,6 @@ go_test(
"@com_github_google_go_cmp//cmp:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//testing/protocmp:go_default_library",
],

View File

@@ -72,13 +72,11 @@ type BeaconState struct {
// Gloas fields
latestExecutionPayloadBid *ethpb.ExecutionPayloadBid
builders []*ethpb.Builder
nextWithdrawalBuilderIndex primitives.BuilderIndex
executionPayloadAvailability []byte
builderPendingPayments []*ethpb.BuilderPendingPayment
builderPendingWithdrawals []*ethpb.BuilderPendingWithdrawal
latestBlockHash []byte
payloadExpectedWithdrawals []*enginev1.Withdrawal
latestWithdrawalsRoot []byte
id uint64
lock sync.RWMutex
@@ -136,13 +134,11 @@ type beaconStateMarshalable struct {
PendingConsolidations []*ethpb.PendingConsolidation `json:"pending_consolidations" yaml:"pending_consolidations"`
ProposerLookahead []primitives.ValidatorIndex `json:"proposer_look_ahead" yaml:"proposer_look_ahead"`
LatestExecutionPayloadBid *ethpb.ExecutionPayloadBid `json:"latest_execution_payload_bid" yaml:"latest_execution_payload_bid"`
Builders []*ethpb.Builder `json:"builders" yaml:"builders"`
NextWithdrawalBuilderIndex primitives.BuilderIndex `json:"next_withdrawal_builder_index" yaml:"next_withdrawal_builder_index"`
ExecutionPayloadAvailability []byte `json:"execution_payload_availability" yaml:"execution_payload_availability"`
BuilderPendingPayments []*ethpb.BuilderPendingPayment `json:"builder_pending_payments" yaml:"builder_pending_payments"`
BuilderPendingWithdrawals []*ethpb.BuilderPendingWithdrawal `json:"builder_pending_withdrawals" yaml:"builder_pending_withdrawals"`
LatestBlockHash []byte `json:"latest_block_hash" yaml:"latest_block_hash"`
PayloadExpectedWithdrawals []*enginev1.Withdrawal `json:"payload_expected_withdrawals" yaml:"payload_expected_withdrawals"`
LatestWithdrawalsRoot []byte `json:"latest_withdrawals_root" yaml:"latest_withdrawals_root"`
}
func (b *BeaconState) MarshalJSON() ([]byte, error) {
@@ -198,13 +194,11 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) {
PendingConsolidations: b.pendingConsolidations,
ProposerLookahead: b.proposerLookahead,
LatestExecutionPayloadBid: b.latestExecutionPayloadBid,
Builders: b.builders,
NextWithdrawalBuilderIndex: b.nextWithdrawalBuilderIndex,
ExecutionPayloadAvailability: b.executionPayloadAvailability,
BuilderPendingPayments: b.builderPendingPayments,
BuilderPendingWithdrawals: b.builderPendingWithdrawals,
LatestBlockHash: b.latestBlockHash,
PayloadExpectedWithdrawals: b.payloadExpectedWithdrawals,
LatestWithdrawalsRoot: b.latestWithdrawalsRoot,
}
return json.Marshal(marshalable)
}

View File

@@ -305,12 +305,10 @@ func (b *BeaconState) ToProtoUnsafe() any {
PendingConsolidations: b.pendingConsolidations,
ProposerLookahead: lookahead,
ExecutionPayloadAvailability: b.executionPayloadAvailability,
Builders: b.builders,
NextWithdrawalBuilderIndex: b.nextWithdrawalBuilderIndex,
BuilderPendingPayments: b.builderPendingPayments,
BuilderPendingWithdrawals: b.builderPendingWithdrawals,
LatestBlockHash: b.latestBlockHash,
PayloadExpectedWithdrawals: b.payloadExpectedWithdrawals,
LatestWithdrawalsRoot: b.latestWithdrawalsRoot,
}
default:
return nil
@@ -609,12 +607,10 @@ func (b *BeaconState) ToProto() any {
PendingConsolidations: b.pendingConsolidationsVal(),
ProposerLookahead: lookahead,
ExecutionPayloadAvailability: b.executionPayloadAvailabilityVal(),
Builders: b.buildersVal(),
NextWithdrawalBuilderIndex: b.nextWithdrawalBuilderIndex,
BuilderPendingPayments: b.builderPendingPaymentsVal(),
BuilderPendingWithdrawals: b.builderPendingWithdrawalsVal(),
LatestBlockHash: b.latestBlockHashVal(),
PayloadExpectedWithdrawals: b.payloadExpectedWithdrawalsVal(),
LatestWithdrawalsRoot: b.latestWithdrawalsRootVal(),
}
default:
return nil

View File

@@ -1,7 +1,6 @@
package state_native
import (
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
@@ -48,22 +47,6 @@ func (b *BeaconState) builderPendingWithdrawalsVal() []*ethpb.BuilderPendingWith
return withdrawals
}
// buildersVal returns a copy of the builders registry.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) buildersVal() []*ethpb.Builder {
if b.builders == nil {
return nil
}
builders := make([]*ethpb.Builder, len(b.builders))
for i := range builders {
builder := b.builders[i]
builders[i] = ethpb.CopyBuilder(builder)
}
return builders
}
// latestBlockHashVal returns a copy of the latest block hash.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) latestBlockHashVal() []byte {
@@ -77,17 +60,15 @@ func (b *BeaconState) latestBlockHashVal() []byte {
return hash
}
// payloadExpectedWithdrawalsVal returns a copy of the payload expected withdrawals.
// latestWithdrawalsRootVal returns a copy of the latest withdrawals root.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) payloadExpectedWithdrawalsVal() []*enginev1.Withdrawal {
if b.payloadExpectedWithdrawals == nil {
func (b *BeaconState) latestWithdrawalsRootVal() []byte {
if b.latestWithdrawalsRoot == nil {
return nil
}
withdrawals := make([]*enginev1.Withdrawal, len(b.payloadExpectedWithdrawals))
for i, withdrawal := range b.payloadExpectedWithdrawals {
withdrawals[i] = withdrawal.Copy()
}
root := make([]byte, len(b.latestWithdrawalsRoot))
copy(root, b.latestWithdrawalsRoot)
return withdrawals
return root
}

View File

@@ -1,43 +0,0 @@
package state_native
import (
"testing"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/stretchr/testify/require"
)
func TestBuildersVal(t *testing.T) {
st := &BeaconState{}
require.Nil(t, st.buildersVal())
st.builders = []*ethpb.Builder{
{Pubkey: []byte{0x01}, ExecutionAddress: []byte{0x02}, Balance: 3},
nil,
}
got := st.buildersVal()
require.Len(t, got, 2)
require.Nil(t, got[1])
require.Equal(t, st.builders[0], got[0])
require.NotSame(t, st.builders[0], got[0])
}
func TestPayloadExpectedWithdrawalsVal(t *testing.T) {
st := &BeaconState{}
require.Nil(t, st.payloadExpectedWithdrawalsVal())
st.payloadExpectedWithdrawals = []*enginev1.Withdrawal{
{Index: 1, ValidatorIndex: 2, Address: []byte{0x03}, Amount: 4},
nil,
}
got := st.payloadExpectedWithdrawalsVal()
require.Len(t, got, 2)
require.Nil(t, got[1])
require.Equal(t, st.payloadExpectedWithdrawals[0], got[0])
require.NotSame(t, st.payloadExpectedWithdrawals[0], got[0])
}

View File

@@ -342,15 +342,6 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
}
if state.version >= version.Gloas {
buildersRoot, err := stateutil.BuildersRoot(state.builders)
if err != nil {
return nil, errors.Wrap(err, "could not compute builders merkleization")
}
fieldRoots[types.Builders.RealPosition()] = buildersRoot[:]
nextWithdrawalBuilderIndexRoot := ssz.Uint64Root(uint64(state.nextWithdrawalBuilderIndex))
fieldRoots[types.NextWithdrawalBuilderIndex.RealPosition()] = nextWithdrawalBuilderIndexRoot[:]
epaRoot, err := stateutil.ExecutionPayloadAvailabilityRoot(state.executionPayloadAvailability)
if err != nil {
return nil, errors.Wrap(err, "could not compute execution payload availability merkleization")
@@ -375,12 +366,8 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
lbhRoot := bytesutil.ToBytes32(state.latestBlockHash)
fieldRoots[types.LatestBlockHash.RealPosition()] = lbhRoot[:]
expectedWithdrawalsRoot, err := ssz.WithdrawalSliceRoot(state.payloadExpectedWithdrawals, fieldparams.MaxWithdrawalsPerPayload)
if err != nil {
return nil, errors.Wrap(err, "could not compute payload expected withdrawals root")
}
fieldRoots[types.PayloadExpectedWithdrawals.RealPosition()] = expectedWithdrawalsRoot[:]
lwrRoot := bytesutil.ToBytes32(state.latestWithdrawalsRoot)
fieldRoots[types.LatestWithdrawalsRoot.RealPosition()] = lwrRoot[:]
}
return fieldRoots, nil
}

View File

@@ -120,13 +120,11 @@ var (
)
gloasAdditionalFields = []types.FieldIndex{
types.Builders,
types.NextWithdrawalBuilderIndex,
types.ExecutionPayloadAvailability,
types.BuilderPendingPayments,
types.BuilderPendingWithdrawals,
types.LatestBlockHash,
types.PayloadExpectedWithdrawals,
types.LatestWithdrawalsRoot,
}
gloasFields = slices.Concat(
@@ -147,7 +145,7 @@ const (
denebSharedFieldRefCount = 7
electraSharedFieldRefCount = 10
fuluSharedFieldRefCount = 11
gloasSharedFieldRefCount = 13 // Adds Builders + BuilderPendingWithdrawals to the shared-ref set and LatestExecutionPayloadHeader is removed
gloasSharedFieldRefCount = 12 // Adds PendingBuilderWithdrawal to the shared-ref set and LatestExecutionPayloadHeader is removed
)
// InitializeFromProtoPhase0 the beacon state from a protobuf representation.
@@ -819,13 +817,11 @@ func InitializeFromProtoUnsafeGloas(st *ethpb.BeaconStateGloas) (state.BeaconSta
pendingConsolidations: st.PendingConsolidations,
proposerLookahead: proposerLookahead,
latestExecutionPayloadBid: st.LatestExecutionPayloadBid,
builders: st.Builders,
nextWithdrawalBuilderIndex: st.NextWithdrawalBuilderIndex,
executionPayloadAvailability: st.ExecutionPayloadAvailability,
builderPendingPayments: st.BuilderPendingPayments,
builderPendingWithdrawals: st.BuilderPendingWithdrawals,
latestBlockHash: st.LatestBlockHash,
payloadExpectedWithdrawals: st.PayloadExpectedWithdrawals,
latestWithdrawalsRoot: st.LatestWithdrawalsRoot,
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
@@ -865,7 +861,6 @@ func InitializeFromProtoUnsafeGloas(st *ethpb.BeaconStateGloas) (state.BeaconSta
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1)
b.sharedFieldReferences[types.PendingConsolidations] = stateutil.NewRef(1)
b.sharedFieldReferences[types.ProposerLookahead] = stateutil.NewRef(1)
b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1) // New in Gloas.
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1) // New in Gloas.
state.Count.Inc()
@@ -937,7 +932,6 @@ func (b *BeaconState) Copy() state.BeaconState {
pendingDeposits: b.pendingDeposits,
pendingPartialWithdrawals: b.pendingPartialWithdrawals,
pendingConsolidations: b.pendingConsolidations,
builders: b.builders,
// Everything else, too small to be concerned about, constant size.
genesisValidatorsRoot: b.genesisValidatorsRoot,
@@ -954,12 +948,11 @@ func (b *BeaconState) Copy() state.BeaconState {
latestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapella.Copy(),
latestExecutionPayloadHeaderDeneb: b.latestExecutionPayloadHeaderDeneb.Copy(),
latestExecutionPayloadBid: b.latestExecutionPayloadBid.Copy(),
nextWithdrawalBuilderIndex: b.nextWithdrawalBuilderIndex,
executionPayloadAvailability: b.executionPayloadAvailabilityVal(),
builderPendingPayments: b.builderPendingPaymentsVal(),
builderPendingWithdrawals: b.builderPendingWithdrawalsVal(),
latestBlockHash: b.latestBlockHashVal(),
payloadExpectedWithdrawals: b.payloadExpectedWithdrawalsVal(),
latestWithdrawalsRoot: b.latestWithdrawalsRootVal(),
id: types.Enumerator.Inc(),
@@ -1335,10 +1328,6 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
return stateutil.ProposerLookaheadRoot(b.proposerLookahead)
case types.LatestExecutionPayloadBid:
return b.latestExecutionPayloadBid.HashTreeRoot()
case types.Builders:
return stateutil.BuildersRoot(b.builders)
case types.NextWithdrawalBuilderIndex:
return ssz.Uint64Root(uint64(b.nextWithdrawalBuilderIndex)), nil
case types.ExecutionPayloadAvailability:
return stateutil.ExecutionPayloadAvailabilityRoot(b.executionPayloadAvailability)
@@ -1348,8 +1337,8 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
return stateutil.BuilderPendingWithdrawalsRoot(b.builderPendingWithdrawals)
case types.LatestBlockHash:
return bytesutil.ToBytes32(b.latestBlockHash), nil
case types.PayloadExpectedWithdrawals:
return ssz.WithdrawalSliceRoot(b.payloadExpectedWithdrawals, fieldparams.MaxWithdrawalsPerPayload)
case types.LatestWithdrawalsRoot:
return bytesutil.ToBytes32(b.latestWithdrawalsRoot), nil
}
return [32]byte{}, errors.New("invalid field index provided")
}

View File

@@ -116,10 +116,6 @@ func (f FieldIndex) String() string {
return "pendingConsolidations"
case ProposerLookahead:
return "proposerLookahead"
case Builders:
return "builders"
case NextWithdrawalBuilderIndex:
return "nextWithdrawalBuilderIndex"
case ExecutionPayloadAvailability:
return "executionPayloadAvailability"
case BuilderPendingPayments:
@@ -128,8 +124,8 @@ func (f FieldIndex) String() string {
return "builderPendingWithdrawals"
case LatestBlockHash:
return "latestBlockHash"
case PayloadExpectedWithdrawals:
return "payloadExpectedWithdrawals"
case LatestWithdrawalsRoot:
return "latestWithdrawalsRoot"
default:
return fmt.Sprintf("unknown field index number: %d", f)
}
@@ -215,20 +211,16 @@ func (f FieldIndex) RealPosition() int {
return 36
case ProposerLookahead:
return 37
case Builders:
return 38
case NextWithdrawalBuilderIndex:
return 39
case ExecutionPayloadAvailability:
return 40
return 38
case BuilderPendingPayments:
return 41
return 39
case BuilderPendingWithdrawals:
return 42
return 40
case LatestBlockHash:
return 43
case PayloadExpectedWithdrawals:
return 44
return 41
case LatestWithdrawalsRoot:
return 42
default:
return -1
}
@@ -295,13 +287,11 @@ const (
PendingPartialWithdrawals // Electra: EIP-7251
PendingConsolidations // Electra: EIP-7251
ProposerLookahead // Fulu: EIP-7917
Builders // Gloas: EIP-7732
NextWithdrawalBuilderIndex // Gloas: EIP-7732
ExecutionPayloadAvailability // Gloas: EIP-7732
BuilderPendingPayments // Gloas: EIP-7732
BuilderPendingWithdrawals // Gloas: EIP-7732
LatestBlockHash // Gloas: EIP-7732
PayloadExpectedWithdrawals // Gloas: EIP-7732
LatestWithdrawalsRoot // Gloas: EIP-7732
)
// Enumerator keeps track of the number of states created since the node's start.

View File

@@ -6,7 +6,6 @@ go_library(
"block_header_root.go",
"builder_pending_payments_root.go",
"builder_pending_withdrawals_root.go",
"builders_root.go",
"eth1_root.go",
"execution_payload_availability_root.go",
"field_root_attestation.go",

View File

@@ -1,12 +0,0 @@
package stateutil
import (
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// BuildersRoot computes the SSZ root of a slice of Builder.
func BuildersRoot(slice []*ethpb.Builder) ([32]byte, error) {
return ssz.SliceRoot(slice, uint64(fieldparams.BuilderRegistryLimit))
}

View File

@@ -4,9 +4,6 @@ import (
"context"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
@@ -56,32 +53,6 @@ func (s *Service) verifierRoutine() {
}
}
// A routine that runs in the background to perform batch
// KZG verifications by draining the channel and processing all pending requests.
func (s *Service) kzgVerifierRoutine() {
for {
kzgBatch := make([]*kzgVerifier, 0, 1)
select {
case <-s.ctx.Done():
return
case kzg := <-s.kzgChan:
kzgBatch = append(kzgBatch, kzg)
}
for {
select {
case <-s.ctx.Done():
return
case kzg := <-s.kzgChan:
kzgBatch = append(kzgBatch, kzg)
continue
default:
verifyKzgBatch(kzgBatch)
}
break
}
}
}
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
defer span.End()
@@ -154,71 +125,3 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
}
return aggSet, nil
}
func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateWithKzgBatchVerifier")
defer span.End()
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
resChan := make(chan error, 1)
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
select {
case s.kzgChan <- verificationSet:
case <-ctx.Done():
return pubsub.ValidationIgnore, ctx.Err()
}
select {
case <-ctx.Done():
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
case err := <-resChan:
if err != nil {
log.WithError(err).Trace("Could not perform batch verification")
tracing.AnnotateError(span, err)
return s.validateUnbatchedColumnsKzg(ctx, dataColumns)
}
}
return pubsub.ValidationAccept, nil
}
func (s *Service) validateUnbatchedColumnsKzg(ctx context.Context, columns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateUnbatchedColumnsKzg")
defer span.End()
start := time.Now()
if err := peerdas.VerifyDataColumnsSidecarKZGProofs(columns); err != nil {
err = errors.Wrap(err, "could not verify")
tracing.AnnotateError(span, err)
return pubsub.ValidationReject, err
}
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("fallback").Observe(float64(time.Since(start).Milliseconds()))
return pubsub.ValidationAccept, nil
}
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
if len(kzgBatch) == 0 {
return
}
allDataColumns := make([]blocks.RODataColumn, 0, len(kzgBatch))
for _, kzgVerifier := range kzgBatch {
allDataColumns = append(allDataColumns, kzgVerifier.dataColumns...)
}
var verificationErr error
start := time.Now()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allDataColumns)
if err != nil {
verificationErr = errors.Wrap(err, "batch KZG verification failed")
} else {
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("batch").Observe(float64(time.Since(start).Milliseconds()))
}
// Send the same result to all verifiers in the batch
for _, verifier := range kzgBatch {
verifier.resChan <- verificationErr
}
}

View File

@@ -1,339 +1,14 @@
package sync
import (
"context"
"sync"
"testing"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
pubsub "github.com/libp2p/go-libp2p-pubsub"
)
func TestValidateWithKzgBatchVerifier(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
tests := []struct {
name string
dataColumns []blocks.RODataColumn
expectedResult pubsub.ValidationResult
expectError bool
}{
{
name: "single valid data column",
dataColumns: createValidTestDataColumns(t, 1),
expectedResult: pubsub.ValidationAccept,
expectError: false,
},
{
name: "multiple valid data columns",
dataColumns: createValidTestDataColumns(t, 3),
expectedResult: pubsub.ValidationAccept,
expectError: false,
},
{
name: "single invalid data column",
dataColumns: createInvalidTestDataColumns(t, 1),
expectedResult: pubsub.ValidationReject,
expectError: true,
},
{
name: "empty data column slice",
dataColumns: []blocks.RODataColumn{},
expectedResult: pubsub.ValidationAccept,
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
result, err := service.validateWithKzgBatchVerifier(ctx, tt.dataColumns)
require.Equal(t, tt.expectedResult, result)
if tt.expectError {
assert.NotNil(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestVerifierRoutine(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
t.Run("processes single request", func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
dataColumns := createValidTestDataColumns(t, 1)
resChan := make(chan error, 1)
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
select {
case err := <-resChan:
require.NoError(t, err)
case <-time.After(time.Second):
t.Fatal("timeout waiting for verification result")
}
})
t.Run("batches multiple requests", func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
const numRequests = 5
resChans := make([]chan error, numRequests)
for i := range numRequests {
dataColumns := createValidTestDataColumns(t, 1)
resChan := make(chan error, 1)
resChans[i] = resChan
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
}
for i := range numRequests {
select {
case err := <-resChans[i]:
require.NoError(t, err)
case <-time.After(time.Second):
t.Fatalf("timeout waiting for verification result %d", i)
}
}
})
t.Run("context cancellation stops routine", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
routineDone := make(chan struct{})
go func() {
service.kzgVerifierRoutine()
close(routineDone)
}()
cancel()
select {
case <-routineDone:
case <-time.After(time.Second):
t.Fatal("timeout waiting for routine to exit")
}
})
}
func TestVerifyKzgBatch(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
t.Run("all valid data columns succeed", func(t *testing.T) {
dataColumns := createValidTestDataColumns(t, 3)
resChan := make(chan error, 1)
kzgVerifiers := []*kzgVerifier{{dataColumns: dataColumns, resChan: resChan}}
verifyKzgBatch(kzgVerifiers)
select {
case err := <-resChan:
require.NoError(t, err)
case <-time.After(time.Second):
t.Fatal("timeout waiting for batch verification")
}
})
t.Run("invalid proofs fail entire batch", func(t *testing.T) {
validColumns := createValidTestDataColumns(t, 1)
invalidColumns := createInvalidTestDataColumns(t, 1)
allColumns := append(validColumns, invalidColumns...)
resChan := make(chan error, 1)
kzgVerifiers := []*kzgVerifier{{dataColumns: allColumns, resChan: resChan}}
verifyKzgBatch(kzgVerifiers)
select {
case err := <-resChan:
assert.NotNil(t, err)
case <-time.After(time.Second):
t.Fatal("timeout waiting for batch verification")
}
})
t.Run("empty batch handling", func(t *testing.T) {
verifyKzgBatch([]*kzgVerifier{})
})
}
func TestKzgBatchVerifierConcurrency(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
const numGoroutines = 10
const numRequestsPerGoroutine = 5
var wg sync.WaitGroup
wg.Add(numGoroutines)
// Multiple goroutines sending verification requests simultaneously
for i := range numGoroutines {
go func(goroutineID int) {
defer wg.Done()
for range numRequestsPerGoroutine {
dataColumns := createValidTestDataColumns(t, 1)
result, err := service.validateWithKzgBatchVerifier(ctx, dataColumns)
require.Equal(t, pubsub.ValidationAccept, result)
require.NoError(t, err)
}
}(i)
}
wg.Wait()
}
func TestKzgBatchVerifierFallback(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
t.Run("fallback handles mixed valid/invalid batch correctly", func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
validColumns := createValidTestDataColumns(t, 1)
invalidColumns := createInvalidTestDataColumns(t, 1)
result, err := service.validateWithKzgBatchVerifier(ctx, validColumns)
require.Equal(t, pubsub.ValidationAccept, result)
require.NoError(t, err)
result, err = service.validateWithKzgBatchVerifier(ctx, invalidColumns)
require.Equal(t, pubsub.ValidationReject, result)
assert.NotNil(t, err)
})
t.Run("empty data columns fallback", func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
result, err := service.validateWithKzgBatchVerifier(ctx, []blocks.RODataColumn{})
require.Equal(t, pubsub.ValidationAccept, result)
require.NoError(t, err)
})
}
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.SecondsPerSlot = 0
params.OverrideBeaconConfig(cfg)
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier),
}
go service.kzgVerifierRoutine()
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
require.Equal(t, pubsub.ValidationIgnore, result)
require.ErrorIs(t, err, context.DeadlineExceeded)
done := make(chan struct{})
go func() {
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
close(done)
}()
select {
case <-done:
case <-time.After(500 * time.Millisecond):
t.Fatal("validateWithKzgBatchVerifier blocked")
}
}
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
cancelledCtx, cancel := context.WithCancel(t.Context())
cancel()
service := &Service{
ctx: context.Background(),
kzgChan: make(chan *kzgVerifier),
}
done := make(chan struct{})
go func() {
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
require.Equal(t, pubsub.ValidationIgnore, result)
require.ErrorIs(t, err, context.Canceled)
close(done)
}()
select {
case <-done:
case <-time.After(500 * time.Millisecond):
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
}
select {
case <-service.kzgChan:
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
default:
}
}
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
if len(roSidecars) >= count {

View File

@@ -165,7 +165,6 @@ type Service struct {
syncContributionBitsOverlapLock sync.RWMutex
syncContributionBitsOverlapCache *lru.Cache
signatureChan chan *signatureVerifier
kzgChan chan *kzgVerifier
clockWaiter startup.ClockWaiter
initialSyncComplete chan struct{}
verifierWaiter *verification.InitializerWaiter
@@ -206,10 +205,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
}
// Initialize signature channel with configured limit
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
// Initialize KZG channel with fixed buffer size of 100.
// This buffer size is designed to handle burst traffic of data column gossip messages:
// - Data columns arrive less frequently than attestations (default batchVerifierLimit=1000)
r.kzgChan = make(chan *kzgVerifier, 100)
// Correctly remove it from our seen pending block map.
// The eviction method always assumes that the mutex is held.
r.slotToPendingBlocks.OnEvicted(func(s string, i any) {
@@ -262,7 +258,6 @@ func (s *Service) Start() {
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
go s.verifierRoutine()
go s.kzgVerifierRoutine()
go s.startDiscoveryAndSubscriptions()
go s.processDataColumnLogs()

View File

@@ -144,12 +144,9 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
}
// [REJECT] The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`.
validationResult, err := s.validateWithKzgBatchVerifier(ctx, roDataColumns)
if validationResult != pubsub.ValidationAccept {
return validationResult, err
if err := verifier.SidecarKzgProofVerified(); err != nil {
return pubsub.ValidationReject, err
}
// Mark KZG verification as satisfied since we did it via batch verifier
verifier.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
// [IGNORE] The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)`
// with valid header signature, sidecar inclusion proof, and kzg proof.

View File

@@ -71,10 +71,7 @@ func TestValidateDataColumn(t *testing.T) {
ctx: ctx,
newColumnsVerifier: newDataColumnsVerifier,
seenDataColumnCache: newSlotAwareCache(seenDataColumnSize),
kzgChan: make(chan *kzgVerifier, 100),
}
// Start the KZG verifier routine for batch verification
go service.kzgVerifierRoutine()
// Encode a `beaconBlock` message instead of expected.
buf := new(bytes.Buffer)

View File

@@ -1,5 +0,0 @@
### Added
- Added an ephemeral debug logfile that for beacon and validator nodes that captures debug-level logs for 24 hours. It
also keeps 1 backup of in case of size-based rotation. The logfiles are stored in `datadir/logs/`. This feature is
enabled by default and can be disabled by setting the `--disable-ephemeral-log-file` flag.

View File

@@ -1,4 +0,0 @@
### Changed
- Moved verbosity settings to be configurable per hook, rather than just globally. This allows us to control the
verbosity of individual output independently.

View File

@@ -1,3 +0,0 @@
### Changed
- post electra we now call attestation data once per slot and use a cache for subsequent requests

View File

@@ -0,0 +1,3 @@
### Removed
- Batching of KZG verification for incoming via gossip data column sidecars

View File

@@ -1,2 +0,0 @@
### Added
- Add a feature flag to pass spectests with low validator count.

View File

@@ -1,3 +0,0 @@
### Added
- Added basic Gloas builder support (`Builder` message and `BeaconStateGloas` `builders`/`next_withdrawal_builder_index` fields)

View File

@@ -356,12 +356,6 @@ var (
Usage: "A comma-separated list of exponents (of 2) in decreasing order, defining the state diff hierarchy levels. The last exponent must be greater than or equal to 5.",
Value: cli.NewIntSlice(21, 18, 16, 13, 11, 9, 5),
}
// DisableEphemeralLogFile disables the 24 hour debug log file.
DisableEphemeralLogFile = &cli.BoolFlag{
Name: "disable-ephemeral-log-file",
Usage: "Disables the creation of a debug log file that keeps 24 hours of logs.",
Value: false,
}
// DisableGetBlobsV2 disables the engine_getBlobsV2 usage.
DisableGetBlobsV2 = &cli.BoolFlag{
Name: "disable-get-blobs-v2",

View File

@@ -158,7 +158,6 @@ var appFlags = []cli.Flag{
dasFlags.BackfillOldestSlot,
dasFlags.BlobRetentionEpochFlag,
flags.BatchVerifierLimit,
flags.DisableEphemeralLogFile,
}
func init() {
@@ -171,15 +170,8 @@ func before(ctx *cli.Context) error {
return errors.Wrap(err, "failed to load flags from config file")
}
// determine log verbosity
verbosity := ctx.String(cmd.VerbosityFlag.Name)
verbosityLevel, err := logrus.ParseLevel(verbosity)
if err != nil {
return errors.Wrap(err, "failed to parse log verbosity")
}
logs.SetLoggingLevel(verbosityLevel)
format := ctx.String(cmd.LogFormat.Name)
switch format {
case "text":
// disabling logrus default output so we can control it via different hooks
@@ -193,9 +185,8 @@ func before(ctx *cli.Context) error {
formatter.ForceColors = true
logrus.AddHook(&logs.WriterHook{
Formatter: formatter,
Writer: os.Stderr,
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
Formatter: formatter,
Writer: os.Stderr,
})
case "fluentd":
f := joonix.NewFormatter()
@@ -219,17 +210,11 @@ func before(ctx *cli.Context) error {
logFileName := ctx.String(cmd.LogFileName.Name)
if logFileName != "" {
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
if err := logs.ConfigurePersistentLogging(logFileName, format); err != nil {
log.WithError(err).Error("Failed to configuring logging to disk.")
}
}
if !ctx.Bool(flags.DisableEphemeralLogFile.Name) {
if err := logs.ConfigureEphemeralLogFile(ctx.String(cmd.DataDirFlag.Name), ctx.App.Name); err != nil {
log.WithError(err).Error("Failed to configure debug log file")
}
}
if err := cmd.ExpandSingleEndpointIfFile(ctx, flags.ExecutionEngineEndpoint); err != nil {
return errors.Wrap(err, "failed to expand single endpoint")
}
@@ -306,7 +291,7 @@ func startNode(ctx *cli.Context, cancel context.CancelFunc) error {
if err != nil {
return err
}
logrus.SetLevel(level)
// Set libp2p logger to only panic logs for the info level.
golog.SetAllLoggers(golog.LevelPanic)

View File

@@ -200,7 +200,6 @@ var appHelpFlagGroups = []flagGroup{
cmd.LogFormat,
cmd.LogFileName,
cmd.VerbosityFlag,
flags.DisableEphemeralLogFile,
},
},
{ // Feature flags.

View File

@@ -86,7 +86,7 @@ func main() {
logFileName := ctx.String(cmd.LogFileName.Name)
if logFileName != "" {
if err := logs.ConfigurePersistentLogging(logFileName, format, level); err != nil {
if err := logs.ConfigurePersistentLogging(logFileName, format); err != nil {
log.WithError(err).Error("Failed to configuring logging to disk.")
}
}

View File

@@ -410,12 +410,6 @@ var (
Usage: "Maximum number of health checks to perform before exiting if not healthy. Set to 0 or a negative number for indefinite checks.",
Value: DefaultMaxHealthChecks,
}
// DisableEphemeralLogFile disables the 24 hour debug log file.
DisableEphemeralLogFile = &cli.BoolFlag{
Name: "disable-ephemeral-log-file",
Usage: "Disables the creation of a debug log file that keeps 24 hours of logs.",
Value: false,
}
)
// DefaultValidatorDir returns OS-specific default validator directory.

View File

@@ -115,7 +115,6 @@ var appFlags = []cli.Flag{
debug.BlockProfileRateFlag,
debug.MutexProfileFractionFlag,
cmd.AcceptTosFlag,
flags.DisableEphemeralLogFile,
}
func init() {
@@ -148,14 +147,6 @@ func main() {
return err
}
// determine log verbosity
verbosity := ctx.String(cmd.VerbosityFlag.Name)
verbosityLevel, err := logrus.ParseLevel(verbosity)
if err != nil {
return errors.Wrap(err, "failed to parse log verbosity")
}
logs.SetLoggingLevel(verbosityLevel)
logFileName := ctx.String(cmd.LogFileName.Name)
format := ctx.String(cmd.LogFormat.Name)
@@ -172,9 +163,8 @@ func main() {
formatter.ForceColors = true
logrus.AddHook(&logs.WriterHook{
Formatter: formatter,
Writer: os.Stderr,
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
Formatter: formatter,
Writer: os.Stderr,
})
case "fluentd":
f := joonix.NewFormatter()
@@ -195,17 +185,11 @@ func main() {
}
if logFileName != "" {
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
if err := logs.ConfigurePersistentLogging(logFileName, format); err != nil {
log.WithError(err).Error("Failed to configuring logging to disk.")
}
}
if !ctx.Bool(flags.DisableEphemeralLogFile.Name) {
if err := logs.ConfigureEphemeralLogFile(ctx.String(cmd.DataDirFlag.Name), ctx.App.Name); err != nil {
log.WithError(err).Error("Failed to configure debug log file")
}
}
// Fix data dir for Windows users.
outdatedDataDir := filepath.Join(file.HomeDir(), "AppData", "Roaming", "Eth2Validators")
currentDataDir := flags.DefaultValidatorDir()

View File

@@ -75,7 +75,6 @@ var appHelpFlagGroups = []flagGroup{
cmd.GrpcMaxCallRecvMsgSizeFlag,
cmd.AcceptTosFlag,
cmd.ApiTimeoutFlag,
flags.DisableEphemeralLogFile,
},
},
{

View File

@@ -80,7 +80,6 @@ type Flags struct {
SaveInvalidBlob bool // SaveInvalidBlob saves invalid blob to temp.
EnableDiscoveryReboot bool // EnableDiscoveryReboot allows the node to have its local listener to be rebooted in the event of discovery issues.
LowValcountSweep bool // LowValcountSweep bounds withdrawal sweep by validator count.
// KeystoreImportDebounceInterval specifies the time duration the validator waits to reload new keys if they have
// changed on disk. This feature is for advanced use cases only.
@@ -285,10 +284,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
logEnabled(ignoreUnviableAttestations)
cfg.IgnoreUnviableAttestations = true
}
if ctx.Bool(lowValcountSweep.Name) {
logEnabled(lowValcountSweep)
cfg.LowValcountSweep = true
}
if ctx.IsSet(EnableStateDiff.Name) {
logEnabled(EnableStateDiff)

View File

@@ -211,13 +211,6 @@ var (
Name: "ignore-unviable-attestations",
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
}
// lowValcountSweep bounds withdrawal sweep by validator count.
lowValcountSweep = &cli.BoolFlag{
Name: "low-valcount-sweep",
Usage: "Uses validator count bound for withdrawal sweep when validator count is less than MaxValidatorsPerWithdrawalsSweep.",
Value: false,
Hidden: true,
}
)
// devModeFlags holds list of flags that are set when development mode is on.
@@ -279,7 +272,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
enableExperimentalAttestationPool,
forceHeadFlag,
blacklistRoots,
lowValcountSweep,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {

View File

@@ -9,7 +9,6 @@ const (
RandaoMixesLength = 65536 // EPOCHS_PER_HISTORICAL_VECTOR
HistoricalRootsLength = 16777216 // HISTORICAL_ROOTS_LIMIT
ValidatorRegistryLimit = 1099511627776 // VALIDATOR_REGISTRY_LIMIT
BuilderRegistryLimit = 1099511627776 // BUILDER_REGISTRY_LIMIT
Eth1DataVotesLength = 2048 // SLOTS_PER_ETH1_VOTING_PERIOD
PreviousEpochAttestationsLength = 4096 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
CurrentEpochAttestationsLength = 4096 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH

View File

@@ -9,7 +9,6 @@ const (
RandaoMixesLength = 64 // EPOCHS_PER_HISTORICAL_VECTOR
HistoricalRootsLength = 16777216 // HISTORICAL_ROOTS_LIMIT
ValidatorRegistryLimit = 1099511627776 // VALIDATOR_REGISTRY_LIMIT
BuilderRegistryLimit = 1099511627776 // BUILDER_REGISTRY_LIMIT
Eth1DataVotesLength = 32 // SLOTS_PER_ETH1_VOTING_PERIOD
PreviousEpochAttestationsLength = 1024 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
CurrentEpochAttestationsLength = 1024 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH

View File

@@ -206,7 +206,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
BeaconStateDenebFieldCount: 28,
BeaconStateElectraFieldCount: 37,
BeaconStateFuluFieldCount: 38,
BeaconStateGloasFieldCount: 45,
BeaconStateGloasFieldCount: 43,
// Slasher related values.
WeakSubjectivityPeriod: 54000,

View File

@@ -669,7 +669,6 @@ func hydrateBeaconBlockBodyGloas() *eth.BeaconBlockBodyGloas {
ParentBlockHash: make([]byte, fieldparams.RootLength),
ParentBlockRoot: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
PrevRandao: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, 20),
BlobKzgCommitmentsRoot: make([]byte, fieldparams.RootLength),
},

2
go.mod
View File

@@ -96,7 +96,6 @@ require (
google.golang.org/grpc v1.71.0
google.golang.org/protobuf v1.36.5
gopkg.in/d4l3k/messagediff.v1 v1.2.1
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
honnef.co/go/tools v0.6.0
@@ -274,6 +273,7 @@ require (
golang.org/x/tools/go/expect v0.1.1-deprecated // indirect
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
lukechampine.com/blake3 v1.3.0 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect

View File

@@ -17,9 +17,7 @@ go_library(
"//io/file:go_default_library",
"//runtime/logging/logrus-prefixed-formatter:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@in_gopkg_natefinch_lumberjack_v2//:go_default_library",
],
)
@@ -30,8 +28,5 @@ go_test(
"stream_test.go",
],
embed = [":go_default_library"],
deps = [
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -12,25 +12,16 @@ import (
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/io/file"
prefixed "github.com/OffchainLabs/prysm/v7/runtime/logging/logrus-prefixed-formatter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"gopkg.in/natefinch/lumberjack.v2"
)
var ephemeralLogFileVerbosity = logrus.DebugLevel
// SetLoggingLevel sets the base logging level for logrus.
func SetLoggingLevel(lvl logrus.Level) {
logrus.SetLevel(max(lvl, ephemeralLogFileVerbosity))
}
func addLogWriter(w io.Writer) {
mw := io.MultiWriter(logrus.StandardLogger().Out, w)
logrus.SetOutput(mw)
}
// ConfigurePersistentLogging adds a log-to-file writer. File content is identical to stdout.
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level) error {
func ConfigurePersistentLogging(logFileName string, format string) error {
logrus.WithField("logFileName", logFileName).Info("Logs will be made persistent")
if err := file.MkdirAll(filepath.Dir(logFileName)); err != nil {
return err
@@ -56,48 +47,14 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
formatter.DisableColors = true
logrus.AddHook(&WriterHook{
Formatter: formatter,
Writer: f,
AllowedLevels: logrus.AllLevels[:lvl+1],
Formatter: formatter,
Writer: f,
})
logrus.Info("File logging initialized")
return nil
}
// ConfigureEphemeralLogFile adds a log file that keeps 24 hours of logs with >debug verbosity.
func ConfigureEphemeralLogFile(datadirPath string, app string) error {
logFilePath := filepath.Join(datadirPath, "logs", app+".log")
if err := file.MkdirAll(filepath.Dir(logFilePath)); err != nil {
return errors.Wrap(err, "failed to create directory")
}
// Create formatter and writer hook
formatter := new(prefixed.TextFormatter)
formatter.TimestampFormat = "2006-01-02 15:04:05.00"
formatter.FullTimestamp = true
// If persistent log files are written - we disable the log messages coloring because
// the colors are ANSI codes and seen as gibberish in the log files.
formatter.DisableColors = true
// configure the lumberjack log writer to rotate logs every ~24 hours
debugWriter := &lumberjack.Logger{
Filename: logFilePath,
MaxSize: 250, // MB, to avoid unbounded growth
MaxBackups: 1, // one backup in case of size-based rotations
MaxAge: 1, // days; files older than this are removed
}
logrus.AddHook(&WriterHook{
Formatter: formatter,
Writer: debugWriter,
AllowedLevels: logrus.AllLevels[:ephemeralLogFileVerbosity+1],
})
logrus.Info("Ephemeral log file initialized")
return nil
}
// MaskCredentialsLogging masks the url credentials before logging for security purpose
// [scheme:][//[userinfo@]host][/]path[?query][#fragment] --> [scheme:][//[***]host][/***][#***]
// if the format is not matched nothing is done, string is returned as is.

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/sirupsen/logrus"
)
var urltests = []struct {
@@ -35,13 +34,13 @@ func TestConfigurePersistantLogging(t *testing.T) {
logFileName := "test.log"
existingDirectory := "test-1-existing-testing-dir"
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel)
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text")
require.NoError(t, err)
// 2. Test creation of file along with parent directory
nonExistingDirectory := "test-2-non-existing-testing-dir"
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel)
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text")
require.NoError(t, err)
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
@@ -52,7 +51,7 @@ func TestConfigurePersistantLogging(t *testing.T) {
return
}
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel)
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text")
require.NoError(t, err)
//4. Create log file in a directory without 700 permissions
@@ -62,38 +61,3 @@ func TestConfigurePersistantLogging(t *testing.T) {
return
}
}
func TestConfigureEphemeralLogFile(t *testing.T) {
testParentDir := t.TempDir()
// 1. Test creation of file in an existing parent directory
existingDirectory := "test-1-existing-testing-dir"
err := ConfigureEphemeralLogFile(fmt.Sprintf("%s/%s", testParentDir, existingDirectory), "beacon-chain")
require.NoError(t, err)
// 2. Test creation of file along with parent directory
nonExistingDirectory := "test-2-non-existing-testing-dir"
err = ConfigureEphemeralLogFile(fmt.Sprintf("%s/%s", testParentDir, nonExistingDirectory), "beacon-chain")
require.NoError(t, err)
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
existingDirectory = "test-3-existing-testing-dir"
nonExistingSubDirectory := "test-3-non-existing-sub-dir"
err = os.Mkdir(fmt.Sprintf("%s/%s", testParentDir, existingDirectory), 0700)
if err != nil {
return
}
err = ConfigureEphemeralLogFile(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory), "beacon-chain")
require.NoError(t, err)
//4. Create log file in a directory without 700 permissions
existingDirectory = "test-4-existing-testing-dir"
err = os.Mkdir(fmt.Sprintf("%s/%s", testParentDir, existingDirectory), 0750)
if err != nil {
return
}
err = ConfigureEphemeralLogFile(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory), "beacon-chain")
require.NoError(t, err)
}

View File

@@ -35,21 +35,6 @@ func CopyValidator(val *Validator) *Validator {
}
}
// CopyBuilder copies the provided builder.
func CopyBuilder(builder *Builder) *Builder {
if builder == nil {
return nil
}
return &Builder{
Pubkey: bytesutil.SafeCopyBytes(builder.Pubkey),
Version: bytesutil.SafeCopyBytes(builder.Version),
ExecutionAddress: bytesutil.SafeCopyBytes(builder.ExecutionAddress),
Balance: builder.Balance,
DepositEpoch: builder.DepositEpoch,
WithdrawableEpoch: builder.WithdrawableEpoch,
}
}
// CopySyncCommitteeMessage copies the provided sync committee message object.
func CopySyncCommitteeMessage(s *SyncCommitteeMessage) *SyncCommitteeMessage {
if s == nil {

View File

@@ -1220,7 +1220,7 @@ func genExecutionPayloadBidGloas() *v1alpha1.ExecutionPayloadBid {
BlockHash: bytes(32),
FeeRecipient: bytes(20),
GasLimit: rand.Uint64(),
BuilderIndex: primitives.BuilderIndex(rand.Uint64()),
BuilderIndex: primitives.ValidatorIndex(rand.Uint64()),
Slot: primitives.Slot(rand.Uint64()),
Value: primitives.Gwei(rand.Uint64()),
BlobKzgCommitmentsRoot: bytes(32),

View File

@@ -13,13 +13,11 @@ func (header *ExecutionPayloadBid) Copy() *ExecutionPayloadBid {
ParentBlockHash: bytesutil.SafeCopyBytes(header.ParentBlockHash),
ParentBlockRoot: bytesutil.SafeCopyBytes(header.ParentBlockRoot),
BlockHash: bytesutil.SafeCopyBytes(header.BlockHash),
PrevRandao: bytesutil.SafeCopyBytes(header.PrevRandao),
FeeRecipient: bytesutil.SafeCopyBytes(header.FeeRecipient),
GasLimit: header.GasLimit,
BuilderIndex: header.BuilderIndex,
Slot: header.Slot,
Value: header.Value,
ExecutionPayment: header.ExecutionPayment,
BlobKzgCommitmentsRoot: bytesutil.SafeCopyBytes(header.BlobKzgCommitmentsRoot),
}
}
@@ -30,9 +28,10 @@ func (withdrawal *BuilderPendingWithdrawal) Copy() *BuilderPendingWithdrawal {
return nil
}
return &BuilderPendingWithdrawal{
FeeRecipient: bytesutil.SafeCopyBytes(withdrawal.FeeRecipient),
Amount: withdrawal.Amount,
BuilderIndex: withdrawal.BuilderIndex,
FeeRecipient: bytesutil.SafeCopyBytes(withdrawal.FeeRecipient),
Amount: withdrawal.Amount,
BuilderIndex: withdrawal.BuilderIndex,
WithdrawableEpoch: withdrawal.WithdrawableEpoch,
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -26,37 +26,30 @@ option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
// parent_block_hash: Hash32
// parent_block_root: Root
// block_hash: Hash32
// prev_randao: Bytes32
// fee_recipient: ExecutionAddress
// gas_limit: uint64
// builder_index: BuilderIndex
// builder_index: ValidatorIndex
// slot: Slot
// value: Gwei
// execution_payment: Gwei
// blob_kzg_commitments_root: Root
message ExecutionPayloadBid {
bytes parent_block_hash = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
bytes parent_block_root = 2 [ (ethereum.eth.ext.ssz_size) = "32" ];
bytes block_hash = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
bytes prev_randao = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
bytes fee_recipient = 5 [ (ethereum.eth.ext.ssz_size) = "20" ];
uint64 gas_limit = 6;
uint64 builder_index = 7 [ (ethereum.eth.ext.cast_type) =
bytes fee_recipient = 4 [ (ethereum.eth.ext.ssz_size) = "20" ];
uint64 gas_limit = 5;
uint64 builder_index = 6 [ (ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/"
"consensus-types/primitives.BuilderIndex" ];
uint64 slot = 8 [
"consensus-types/primitives.ValidatorIndex" ];
uint64 slot = 7 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
];
uint64 value = 9 [
uint64 value = 8 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Gwei"
];
uint64 execution_payment = 10 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Gwei"
];
bytes blob_kzg_commitments_root = 11 [ (ethereum.eth.ext.ssz_size) = "32" ];
bytes blob_kzg_commitments_root = 9 [ (ethereum.eth.ext.ssz_size) = "32" ];
}
// SignedExecutionPayloadBid wraps an execution payload bid with a signature.
@@ -192,15 +185,12 @@ message SignedBeaconBlockGloas {
// [All previous fields from earlier forks]
// # Replaced existing latest execution header position
// latest_execution_payload_bid: ExecutionPayloadBid
// builders: List[Builder, BUILDER_REGISTRY_LIMIT]
// # [New in Gloas:EIP7732]
// next_withdrawal_builder_index: BuilderIndex
// # [New in Gloas:EIP7732]
// # New fields in Gloas:EIP7732
// execution_payload_availability: Bitvector[SLOTS_PER_HISTORICAL_ROOT]
// builder_pending_payments: Vector[BuilderPendingPayment, 2 * SLOTS_PER_EPOCH]
// builder_pending_withdrawals: List[BuilderPendingWithdrawal, BUILDER_PENDING_WITHDRAWALS_LIMIT]
// latest_block_hash: Hash32
// payload_expected_withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
// latest_withdrawals_root: Root
message BeaconStateGloas {
// Versioning [1001-2000]
uint64 genesis_time = 1001;
@@ -310,20 +300,13 @@ message BeaconStateGloas {
[ (ethereum.eth.ext.ssz_size) = "proposer_lookahead_size" ];
// Fields introduced in Gloas fork [14001-15000]
repeated Builder builders = 14001
[ (ethereum.eth.ext.ssz_max) = "builder_registry_limit" ];
uint64 next_withdrawal_builder_index = 14002
[ (ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/"
"primitives.BuilderIndex" ];
bytes execution_payload_availability = 14003 [
bytes execution_payload_availability = 14001 [
(ethereum.eth.ext.ssz_size) = "execution_payload_availability.size"
];
repeated BuilderPendingPayment builder_pending_payments = 14004 [(ethereum.eth.ext.ssz_size) = "builder_pending_payments.size"];
repeated BuilderPendingWithdrawal builder_pending_withdrawals = 14005 [(ethereum.eth.ext.ssz_max) = "1048576"];
bytes latest_block_hash = 14006 [ (ethereum.eth.ext.ssz_size) = "32" ];
repeated ethereum.engine.v1.Withdrawal payload_expected_withdrawals = 14007
[ (ethereum.eth.ext.ssz_max) = "withdrawal.size" ];
repeated BuilderPendingPayment builder_pending_payments = 14002 [(ethereum.eth.ext.ssz_size) = "builder_pending_payments.size"];
repeated BuilderPendingWithdrawal builder_pending_withdrawals = 14003 [(ethereum.eth.ext.ssz_max) = "1048576"];
bytes latest_block_hash = 14004 [ (ethereum.eth.ext.ssz_size) = "32" ];
bytes latest_withdrawals_root = 14005 [ (ethereum.eth.ext.ssz_size) = "32" ];
}
// BuilderPendingPayment represents a pending payment to a builder.
@@ -346,7 +329,8 @@ message BuilderPendingPayment {
// class BuilderPendingWithdrawal(Container):
// fee_recipient: ExecutionAddress
// amount: Gwei
// builder_index: BuilderIndex
// builder_index: ValidatorIndex
// withdrawable_epoch: Epoch
message BuilderPendingWithdrawal {
bytes fee_recipient = 1 [ (ethereum.eth.ext.ssz_size) = "20" ];
uint64 amount = 2 [
@@ -356,7 +340,11 @@ message BuilderPendingWithdrawal {
uint64 builder_index = 3
[ (ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/"
"primitives.BuilderIndex" ];
"primitives.ValidatorIndex" ];
uint64 withdrawable_epoch = 4 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"
];
}
// DataColumnSidecarGloas represents a data column sidecar in the Gloas fork.
@@ -399,7 +387,7 @@ message DataColumnSidecarGloas {
// class ExecutionPayloadEnvelope(Container):
// payload: ExecutionPayload
// execution_requests: ExecutionRequests
// builder_index: BuilderIndex
// builder_index: ValidatorIndex
// beacon_block_root: Root
// slot: Slot
// blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
@@ -409,7 +397,7 @@ message ExecutionPayloadEnvelope {
ethereum.engine.v1.ExecutionRequests execution_requests = 2;
uint64 builder_index = 3 [ (ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/"
"consensus-types/primitives.BuilderIndex" ];
"consensus-types/primitives.ValidatorIndex" ];
bytes beacon_block_root = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
uint64 slot = 5 [
(ethereum.eth.ext.cast_type) =
@@ -433,31 +421,3 @@ message SignedExecutionPayloadEnvelope {
ExecutionPayloadEnvelope message = 1;
bytes signature = 2 [ (ethereum.eth.ext.ssz_size) = "96" ];
}
// Builder represents a builder in the Gloas fork.
//
// Spec:
// class Builder(Container):
// pubkey: BLSPubkey
// version: uint8
// execution_address: ExecutionAddress
// balance: Gwei
// deposit_epoch: Epoch
// withdrawable_epoch: Epoch
message Builder {
bytes pubkey = 1 [ (ethereum.eth.ext.ssz_size) = "48" ];
bytes version = 2 [ (ethereum.eth.ext.ssz_size) = "1" ];
bytes execution_address = 3 [ (ethereum.eth.ext.ssz_size) = "20" ];
uint64 balance = 4 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Gwei"
];
uint64 deposit_epoch = 5 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"
];
uint64 withdrawable_epoch = 6 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"
];
}

View File

@@ -37,36 +37,26 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
}
dst = append(dst, e.BlockHash...)
// Field (3) 'PrevRandao'
if size := len(e.PrevRandao); size != 32 {
err = ssz.ErrBytesLengthFn("--.PrevRandao", size, 32)
return
}
dst = append(dst, e.PrevRandao...)
// Field (4) 'FeeRecipient'
// Field (3) 'FeeRecipient'
if size := len(e.FeeRecipient); size != 20 {
err = ssz.ErrBytesLengthFn("--.FeeRecipient", size, 20)
return
}
dst = append(dst, e.FeeRecipient...)
// Field (5) 'GasLimit'
// Field (4) 'GasLimit'
dst = ssz.MarshalUint64(dst, e.GasLimit)
// Field (6) 'BuilderIndex'
// Field (5) 'BuilderIndex'
dst = ssz.MarshalUint64(dst, uint64(e.BuilderIndex))
// Field (7) 'Slot'
// Field (6) 'Slot'
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
// Field (8) 'Value'
// Field (7) 'Value'
dst = ssz.MarshalUint64(dst, uint64(e.Value))
// Field (9) 'ExecutionPayment'
dst = ssz.MarshalUint64(dst, uint64(e.ExecutionPayment))
// Field (10) 'BlobKzgCommitmentsRoot'
// Field (8) 'BlobKzgCommitmentsRoot'
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
return
@@ -80,7 +70,7 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 220 {
if size != 180 {
return ssz.ErrSize
}
@@ -102,45 +92,36 @@ func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
}
e.BlockHash = append(e.BlockHash, buf[64:96]...)
// Field (3) 'PrevRandao'
if cap(e.PrevRandao) == 0 {
e.PrevRandao = make([]byte, 0, len(buf[96:128]))
}
e.PrevRandao = append(e.PrevRandao, buf[96:128]...)
// Field (4) 'FeeRecipient'
// Field (3) 'FeeRecipient'
if cap(e.FeeRecipient) == 0 {
e.FeeRecipient = make([]byte, 0, len(buf[128:148]))
e.FeeRecipient = make([]byte, 0, len(buf[96:116]))
}
e.FeeRecipient = append(e.FeeRecipient, buf[128:148]...)
e.FeeRecipient = append(e.FeeRecipient, buf[96:116]...)
// Field (5) 'GasLimit'
e.GasLimit = ssz.UnmarshallUint64(buf[148:156])
// Field (4) 'GasLimit'
e.GasLimit = ssz.UnmarshallUint64(buf[116:124])
// Field (6) 'BuilderIndex'
e.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[156:164]))
// Field (5) 'BuilderIndex'
e.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[124:132]))
// Field (7) 'Slot'
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[164:172]))
// Field (6) 'Slot'
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[132:140]))
// Field (8) 'Value'
e.Value = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[172:180]))
// Field (7) 'Value'
e.Value = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[140:148]))
// Field (9) 'ExecutionPayment'
e.ExecutionPayment = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[180:188]))
// Field (10) 'BlobKzgCommitmentsRoot'
// Field (8) 'BlobKzgCommitmentsRoot'
if cap(e.BlobKzgCommitmentsRoot) == 0 {
e.BlobKzgCommitmentsRoot = make([]byte, 0, len(buf[188:220]))
e.BlobKzgCommitmentsRoot = make([]byte, 0, len(buf[148:180]))
}
e.BlobKzgCommitmentsRoot = append(e.BlobKzgCommitmentsRoot, buf[188:220]...)
e.BlobKzgCommitmentsRoot = append(e.BlobKzgCommitmentsRoot, buf[148:180]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadBid object
func (e *ExecutionPayloadBid) SizeSSZ() (size int) {
size = 220
size = 180
return
}
@@ -174,36 +155,26 @@ func (e *ExecutionPayloadBid) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(e.BlockHash)
// Field (3) 'PrevRandao'
if size := len(e.PrevRandao); size != 32 {
err = ssz.ErrBytesLengthFn("--.PrevRandao", size, 32)
return
}
hh.PutBytes(e.PrevRandao)
// Field (4) 'FeeRecipient'
// Field (3) 'FeeRecipient'
if size := len(e.FeeRecipient); size != 20 {
err = ssz.ErrBytesLengthFn("--.FeeRecipient", size, 20)
return
}
hh.PutBytes(e.FeeRecipient)
// Field (5) 'GasLimit'
// Field (4) 'GasLimit'
hh.PutUint64(e.GasLimit)
// Field (6) 'BuilderIndex'
// Field (5) 'BuilderIndex'
hh.PutUint64(uint64(e.BuilderIndex))
// Field (7) 'Slot'
// Field (6) 'Slot'
hh.PutUint64(uint64(e.Slot))
// Field (8) 'Value'
// Field (7) 'Value'
hh.PutUint64(uint64(e.Value))
// Field (9) 'ExecutionPayment'
hh.PutUint64(uint64(e.ExecutionPayment))
// Field (10) 'BlobKzgCommitmentsRoot'
// Field (8) 'BlobKzgCommitmentsRoot'
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
return
@@ -245,7 +216,7 @@ func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err er
func (s *SignedExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 316 {
if size != 276 {
return ssz.ErrSize
}
@@ -253,22 +224,22 @@ func (s *SignedExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
if s.Message == nil {
s.Message = new(ExecutionPayloadBid)
}
if err = s.Message.UnmarshalSSZ(buf[0:220]); err != nil {
if err = s.Message.UnmarshalSSZ(buf[0:180]); err != nil {
return err
}
// Field (1) 'Signature'
if cap(s.Signature) == 0 {
s.Signature = make([]byte, 0, len(buf[220:316]))
s.Signature = make([]byte, 0, len(buf[180:276]))
}
s.Signature = append(s.Signature, buf[220:316]...)
s.Signature = append(s.Signature, buf[180:276]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the SignedExecutionPayloadBid object
func (s *SignedExecutionPayloadBid) SizeSSZ() (size int) {
size = 316
size = 276
return
}
@@ -742,7 +713,7 @@ func (b *BeaconBlockBodyGloas) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the BeaconBlockBodyGloas object to a target array
func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(704)
offset := int(664)
// Field (0) 'RandaoReveal'
if size := len(b.RandaoReveal); size != 96 {
@@ -914,7 +885,7 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 704 {
if size < 664 {
return ssz.ErrSize
}
@@ -946,7 +917,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o3 != 704 {
if o3 != 664 {
return ssz.ErrInvalidVariableOffset
}
@@ -987,12 +958,12 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
if b.SignedExecutionPayloadBid == nil {
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
}
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf[384:700]); err != nil {
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf[384:660]); err != nil {
return err
}
// Offset (11) 'PayloadAttestations'
if o11 = ssz.ReadOffset(buf[700:704]); o11 > size || o9 > o11 {
if o11 = ssz.ReadOffset(buf[660:664]); o11 > size || o9 > o11 {
return ssz.ErrOffset
}
@@ -1134,7 +1105,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockBodyGloas object
func (b *BeaconBlockBodyGloas) SizeSSZ() (size int) {
size = 704
size = 664
// Field (3) 'ProposerSlashings'
size += len(b.ProposerSlashings) * 416
@@ -1437,7 +1408,7 @@ func (b *BeaconStateGloas) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the BeaconStateGloas object to a target array
func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(2741333)
offset := int(2741821)
// Field (0) 'GenesisTime'
dst = ssz.MarshalUint64(dst, b.GenesisTime)
@@ -1659,21 +1630,14 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = ssz.MarshalUint64(dst, b.ProposerLookahead[ii])
}
// Offset (38) 'Builders'
dst = ssz.WriteOffset(dst, offset)
offset += len(b.Builders) * 93
// Field (39) 'NextWithdrawalBuilderIndex'
dst = ssz.MarshalUint64(dst, uint64(b.NextWithdrawalBuilderIndex))
// Field (40) 'ExecutionPayloadAvailability'
// Field (38) 'ExecutionPayloadAvailability'
if size := len(b.ExecutionPayloadAvailability); size != 1024 {
err = ssz.ErrBytesLengthFn("--.ExecutionPayloadAvailability", size, 1024)
return
}
dst = append(dst, b.ExecutionPayloadAvailability...)
// Field (41) 'BuilderPendingPayments'
// Field (39) 'BuilderPendingPayments'
if size := len(b.BuilderPendingPayments); size != 64 {
err = ssz.ErrVectorLengthFn("--.BuilderPendingPayments", size, 64)
return
@@ -1684,20 +1648,23 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
}
}
// Offset (42) 'BuilderPendingWithdrawals'
// Offset (40) 'BuilderPendingWithdrawals'
dst = ssz.WriteOffset(dst, offset)
offset += len(b.BuilderPendingWithdrawals) * 36
offset += len(b.BuilderPendingWithdrawals) * 44
// Field (43) 'LatestBlockHash'
// Field (41) 'LatestBlockHash'
if size := len(b.LatestBlockHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.LatestBlockHash", size, 32)
return
}
dst = append(dst, b.LatestBlockHash...)
// Offset (44) 'PayloadExpectedWithdrawals'
dst = ssz.WriteOffset(dst, offset)
offset += len(b.PayloadExpectedWithdrawals) * 44
// Field (42) 'LatestWithdrawalsRoot'
if size := len(b.LatestWithdrawalsRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.LatestWithdrawalsRoot", size, 32)
return
}
dst = append(dst, b.LatestWithdrawalsRoot...)
// Field (7) 'HistoricalRoots'
if size := len(b.HistoricalRoots); size > 16777216 {
@@ -1810,18 +1777,7 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
}
}
// Field (38) 'Builders'
if size := len(b.Builders); size > 1099511627776 {
err = ssz.ErrListTooBigFn("--.Builders", size, 1099511627776)
return
}
for ii := 0; ii < len(b.Builders); ii++ {
if dst, err = b.Builders[ii].MarshalSSZTo(dst); err != nil {
return
}
}
// Field (42) 'BuilderPendingWithdrawals'
// Field (40) 'BuilderPendingWithdrawals'
if size := len(b.BuilderPendingWithdrawals); size > 1048576 {
err = ssz.ErrListTooBigFn("--.BuilderPendingWithdrawals", size, 1048576)
return
@@ -1832,17 +1788,6 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
}
}
// Field (44) 'PayloadExpectedWithdrawals'
if size := len(b.PayloadExpectedWithdrawals); size > 16 {
err = ssz.ErrListTooBigFn("--.PayloadExpectedWithdrawals", size, 16)
return
}
for ii := 0; ii < len(b.PayloadExpectedWithdrawals); ii++ {
if dst, err = b.PayloadExpectedWithdrawals[ii].MarshalSSZTo(dst); err != nil {
return
}
}
return
}
@@ -1850,12 +1795,12 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 2741333 {
if size < 2741821 {
return ssz.ErrSize
}
tail := buf
var o7, o9, o11, o12, o15, o16, o21, o27, o34, o35, o36, o38, o42, o44 uint64
var o7, o9, o11, o12, o15, o16, o21, o27, o34, o35, o36, o40 uint64
// Field (0) 'GenesisTime'
b.GenesisTime = ssz.UnmarshallUint64(buf[0:8])
@@ -1908,7 +1853,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o7 != 2741333 {
if o7 != 2741821 {
return ssz.ErrInvalidVariableOffset
}
@@ -2018,100 +1963,93 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
if b.LatestExecutionPayloadBid == nil {
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
}
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf[2736629:2736849]); err != nil {
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf[2736629:2736809]); err != nil {
return err
}
// Field (25) 'NextWithdrawalIndex'
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736849:2736857])
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736809:2736817])
// Field (26) 'NextWithdrawalValidatorIndex'
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736857:2736865]))
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736817:2736825]))
// Offset (27) 'HistoricalSummaries'
if o27 = ssz.ReadOffset(buf[2736865:2736869]); o27 > size || o21 > o27 {
if o27 = ssz.ReadOffset(buf[2736825:2736829]); o27 > size || o21 > o27 {
return ssz.ErrOffset
}
// Field (28) 'DepositRequestsStartIndex'
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736869:2736877])
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736829:2736837])
// Field (29) 'DepositBalanceToConsume'
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736877:2736885]))
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736837:2736845]))
// Field (30) 'ExitBalanceToConsume'
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736885:2736893]))
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736845:2736853]))
// Field (31) 'EarliestExitEpoch'
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736893:2736901]))
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736853:2736861]))
// Field (32) 'ConsolidationBalanceToConsume'
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736901:2736909]))
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736861:2736869]))
// Field (33) 'EarliestConsolidationEpoch'
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736909:2736917]))
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736869:2736877]))
// Offset (34) 'PendingDeposits'
if o34 = ssz.ReadOffset(buf[2736917:2736921]); o34 > size || o27 > o34 {
if o34 = ssz.ReadOffset(buf[2736877:2736881]); o34 > size || o27 > o34 {
return ssz.ErrOffset
}
// Offset (35) 'PendingPartialWithdrawals'
if o35 = ssz.ReadOffset(buf[2736921:2736925]); o35 > size || o34 > o35 {
if o35 = ssz.ReadOffset(buf[2736881:2736885]); o35 > size || o34 > o35 {
return ssz.ErrOffset
}
// Offset (36) 'PendingConsolidations'
if o36 = ssz.ReadOffset(buf[2736925:2736929]); o36 > size || o35 > o36 {
if o36 = ssz.ReadOffset(buf[2736885:2736889]); o36 > size || o35 > o36 {
return ssz.ErrOffset
}
// Field (37) 'ProposerLookahead'
b.ProposerLookahead = ssz.ExtendUint64(b.ProposerLookahead, 64)
for ii := 0; ii < 64; ii++ {
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736929:2737441][ii*8 : (ii+1)*8])
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736889:2737401][ii*8 : (ii+1)*8])
}
// Offset (38) 'Builders'
if o38 = ssz.ReadOffset(buf[2737441:2737445]); o38 > size || o36 > o38 {
return ssz.ErrOffset
}
// Field (39) 'NextWithdrawalBuilderIndex'
b.NextWithdrawalBuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[2737445:2737453]))
// Field (40) 'ExecutionPayloadAvailability'
// Field (38) 'ExecutionPayloadAvailability'
if cap(b.ExecutionPayloadAvailability) == 0 {
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737453:2738477]))
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737401:2738425]))
}
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737453:2738477]...)
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737401:2738425]...)
// Field (41) 'BuilderPendingPayments'
// Field (39) 'BuilderPendingPayments'
b.BuilderPendingPayments = make([]*BuilderPendingPayment, 64)
for ii := 0; ii < 64; ii++ {
if b.BuilderPendingPayments[ii] == nil {
b.BuilderPendingPayments[ii] = new(BuilderPendingPayment)
}
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738477:2741293][ii*44 : (ii+1)*44]); err != nil {
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738425:2741753][ii*52 : (ii+1)*52]); err != nil {
return err
}
}
// Offset (42) 'BuilderPendingWithdrawals'
if o42 = ssz.ReadOffset(buf[2741293:2741297]); o42 > size || o38 > o42 {
// Offset (40) 'BuilderPendingWithdrawals'
if o40 = ssz.ReadOffset(buf[2741753:2741757]); o40 > size || o36 > o40 {
return ssz.ErrOffset
}
// Field (43) 'LatestBlockHash'
// Field (41) 'LatestBlockHash'
if cap(b.LatestBlockHash) == 0 {
b.LatestBlockHash = make([]byte, 0, len(buf[2741297:2741329]))
b.LatestBlockHash = make([]byte, 0, len(buf[2741757:2741789]))
}
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741297:2741329]...)
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741757:2741789]...)
// Offset (44) 'PayloadExpectedWithdrawals'
if o44 = ssz.ReadOffset(buf[2741329:2741333]); o44 > size || o42 > o44 {
return ssz.ErrOffset
// Field (42) 'LatestWithdrawalsRoot'
if cap(b.LatestWithdrawalsRoot) == 0 {
b.LatestWithdrawalsRoot = make([]byte, 0, len(buf[2741789:2741821]))
}
b.LatestWithdrawalsRoot = append(b.LatestWithdrawalsRoot, buf[2741789:2741821]...)
// Field (7) 'HistoricalRoots'
{
@@ -2271,7 +2209,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
// Field (36) 'PendingConsolidations'
{
buf = tail[o36:o38]
buf = tail[o36:o40]
num, err := ssz.DivideInt2(len(buf), 16, 262144)
if err != nil {
return err
@@ -2287,28 +2225,10 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
}
}
// Field (38) 'Builders'
// Field (40) 'BuilderPendingWithdrawals'
{
buf = tail[o38:o42]
num, err := ssz.DivideInt2(len(buf), 93, 1099511627776)
if err != nil {
return err
}
b.Builders = make([]*Builder, num)
for ii := 0; ii < num; ii++ {
if b.Builders[ii] == nil {
b.Builders[ii] = new(Builder)
}
if err = b.Builders[ii].UnmarshalSSZ(buf[ii*93 : (ii+1)*93]); err != nil {
return err
}
}
}
// Field (42) 'BuilderPendingWithdrawals'
{
buf = tail[o42:o44]
num, err := ssz.DivideInt2(len(buf), 36, 1048576)
buf = tail[o40:]
num, err := ssz.DivideInt2(len(buf), 44, 1048576)
if err != nil {
return err
}
@@ -2317,25 +2237,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
if b.BuilderPendingWithdrawals[ii] == nil {
b.BuilderPendingWithdrawals[ii] = new(BuilderPendingWithdrawal)
}
if err = b.BuilderPendingWithdrawals[ii].UnmarshalSSZ(buf[ii*36 : (ii+1)*36]); err != nil {
return err
}
}
}
// Field (44) 'PayloadExpectedWithdrawals'
{
buf = tail[o44:]
num, err := ssz.DivideInt2(len(buf), 44, 16)
if err != nil {
return err
}
b.PayloadExpectedWithdrawals = make([]*v1.Withdrawal, num)
for ii := 0; ii < num; ii++ {
if b.PayloadExpectedWithdrawals[ii] == nil {
b.PayloadExpectedWithdrawals[ii] = new(v1.Withdrawal)
}
if err = b.PayloadExpectedWithdrawals[ii].UnmarshalSSZ(buf[ii*44 : (ii+1)*44]); err != nil {
if err = b.BuilderPendingWithdrawals[ii].UnmarshalSSZ(buf[ii*44 : (ii+1)*44]); err != nil {
return err
}
}
@@ -2345,7 +2247,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
// SizeSSZ returns the ssz encoded size in bytes for the BeaconStateGloas object
func (b *BeaconStateGloas) SizeSSZ() (size int) {
size = 2741333
size = 2741821
// Field (7) 'HistoricalRoots'
size += len(b.HistoricalRoots) * 32
@@ -2380,14 +2282,8 @@ func (b *BeaconStateGloas) SizeSSZ() (size int) {
// Field (36) 'PendingConsolidations'
size += len(b.PendingConsolidations) * 16
// Field (38) 'Builders'
size += len(b.Builders) * 93
// Field (42) 'BuilderPendingWithdrawals'
size += len(b.BuilderPendingWithdrawals) * 36
// Field (44) 'PayloadExpectedWithdrawals'
size += len(b.PayloadExpectedWithdrawals) * 44
// Field (40) 'BuilderPendingWithdrawals'
size += len(b.BuilderPendingWithdrawals) * 44
return
}
@@ -2741,33 +2637,14 @@ func (b *BeaconStateGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.Merkleize(subIndx)
}
// Field (38) 'Builders'
{
subIndx := hh.Index()
num := uint64(len(b.Builders))
if num > 1099511627776 {
err = ssz.ErrIncorrectListSize
return
}
for _, elem := range b.Builders {
if err = elem.HashTreeRootWith(hh); err != nil {
return
}
}
hh.MerkleizeWithMixin(subIndx, num, 1099511627776)
}
// Field (39) 'NextWithdrawalBuilderIndex'
hh.PutUint64(uint64(b.NextWithdrawalBuilderIndex))
// Field (40) 'ExecutionPayloadAvailability'
// Field (38) 'ExecutionPayloadAvailability'
if size := len(b.ExecutionPayloadAvailability); size != 1024 {
err = ssz.ErrBytesLengthFn("--.ExecutionPayloadAvailability", size, 1024)
return
}
hh.PutBytes(b.ExecutionPayloadAvailability)
// Field (41) 'BuilderPendingPayments'
// Field (39) 'BuilderPendingPayments'
{
subIndx := hh.Index()
for _, elem := range b.BuilderPendingPayments {
@@ -2778,7 +2655,7 @@ func (b *BeaconStateGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.Merkleize(subIndx)
}
// Field (42) 'BuilderPendingWithdrawals'
// Field (40) 'BuilderPendingWithdrawals'
{
subIndx := hh.Index()
num := uint64(len(b.BuilderPendingWithdrawals))
@@ -2794,28 +2671,19 @@ func (b *BeaconStateGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
// Field (43) 'LatestBlockHash'
// Field (41) 'LatestBlockHash'
if size := len(b.LatestBlockHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.LatestBlockHash", size, 32)
return
}
hh.PutBytes(b.LatestBlockHash)
// Field (44) 'PayloadExpectedWithdrawals'
{
subIndx := hh.Index()
num := uint64(len(b.PayloadExpectedWithdrawals))
if num > 16 {
err = ssz.ErrIncorrectListSize
return
}
for _, elem := range b.PayloadExpectedWithdrawals {
if err = elem.HashTreeRootWith(hh); err != nil {
return
}
}
hh.MerkleizeWithMixin(subIndx, num, 16)
// Field (42) 'LatestWithdrawalsRoot'
if size := len(b.LatestWithdrawalsRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.LatestWithdrawalsRoot", size, 32)
return
}
hh.PutBytes(b.LatestWithdrawalsRoot)
hh.Merkleize(indx)
return
@@ -2848,7 +2716,7 @@ func (b *BuilderPendingPayment) MarshalSSZTo(buf []byte) (dst []byte, err error)
func (b *BuilderPendingPayment) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 44 {
if size != 52 {
return ssz.ErrSize
}
@@ -2859,7 +2727,7 @@ func (b *BuilderPendingPayment) UnmarshalSSZ(buf []byte) error {
if b.Withdrawal == nil {
b.Withdrawal = new(BuilderPendingWithdrawal)
}
if err = b.Withdrawal.UnmarshalSSZ(buf[8:44]); err != nil {
if err = b.Withdrawal.UnmarshalSSZ(buf[8:52]); err != nil {
return err
}
@@ -2868,7 +2736,7 @@ func (b *BuilderPendingPayment) UnmarshalSSZ(buf []byte) error {
// SizeSSZ returns the ssz encoded size in bytes for the BuilderPendingPayment object
func (b *BuilderPendingPayment) SizeSSZ() (size int) {
size = 44
size = 52
return
}
@@ -2915,6 +2783,9 @@ func (b *BuilderPendingWithdrawal) MarshalSSZTo(buf []byte) (dst []byte, err err
// Field (2) 'BuilderIndex'
dst = ssz.MarshalUint64(dst, uint64(b.BuilderIndex))
// Field (3) 'WithdrawableEpoch'
dst = ssz.MarshalUint64(dst, uint64(b.WithdrawableEpoch))
return
}
@@ -2922,7 +2793,7 @@ func (b *BuilderPendingWithdrawal) MarshalSSZTo(buf []byte) (dst []byte, err err
func (b *BuilderPendingWithdrawal) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 36 {
if size != 44 {
return ssz.ErrSize
}
@@ -2936,14 +2807,17 @@ func (b *BuilderPendingWithdrawal) UnmarshalSSZ(buf []byte) error {
b.Amount = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[20:28]))
// Field (2) 'BuilderIndex'
b.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[28:36]))
b.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[28:36]))
// Field (3) 'WithdrawableEpoch'
b.WithdrawableEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[36:44]))
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the BuilderPendingWithdrawal object
func (b *BuilderPendingWithdrawal) SizeSSZ() (size int) {
size = 36
size = 44
return
}
@@ -2969,6 +2843,9 @@ func (b *BuilderPendingWithdrawal) HashTreeRootWith(hh *ssz.Hasher) (err error)
// Field (2) 'BuilderIndex'
hh.PutUint64(uint64(b.BuilderIndex))
// Field (3) 'WithdrawableEpoch'
hh.PutUint64(uint64(b.WithdrawableEpoch))
hh.Merkleize(indx)
return
}
@@ -3341,7 +3218,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
}
// Field (2) 'BuilderIndex'
e.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[8:16]))
e.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[8:16]))
// Field (3) 'BeaconBlockRoot'
if cap(e.BeaconBlockRoot) == 0 {
@@ -3595,132 +3472,3 @@ func (s *SignedExecutionPayloadEnvelope) HashTreeRootWith(hh *ssz.Hasher) (err e
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the Builder object
func (b *Builder) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(b)
}
// MarshalSSZTo ssz marshals the Builder object to a target array
func (b *Builder) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'Pubkey'
if size := len(b.Pubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
return
}
dst = append(dst, b.Pubkey...)
// Field (1) 'Version'
if size := len(b.Version); size != 1 {
err = ssz.ErrBytesLengthFn("--.Version", size, 1)
return
}
dst = append(dst, b.Version...)
// Field (2) 'ExecutionAddress'
if size := len(b.ExecutionAddress); size != 20 {
err = ssz.ErrBytesLengthFn("--.ExecutionAddress", size, 20)
return
}
dst = append(dst, b.ExecutionAddress...)
// Field (3) 'Balance'
dst = ssz.MarshalUint64(dst, uint64(b.Balance))
// Field (4) 'DepositEpoch'
dst = ssz.MarshalUint64(dst, uint64(b.DepositEpoch))
// Field (5) 'WithdrawableEpoch'
dst = ssz.MarshalUint64(dst, uint64(b.WithdrawableEpoch))
return
}
// UnmarshalSSZ ssz unmarshals the Builder object
func (b *Builder) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 93 {
return ssz.ErrSize
}
// Field (0) 'Pubkey'
if cap(b.Pubkey) == 0 {
b.Pubkey = make([]byte, 0, len(buf[0:48]))
}
b.Pubkey = append(b.Pubkey, buf[0:48]...)
// Field (1) 'Version'
if cap(b.Version) == 0 {
b.Version = make([]byte, 0, len(buf[48:49]))
}
b.Version = append(b.Version, buf[48:49]...)
// Field (2) 'ExecutionAddress'
if cap(b.ExecutionAddress) == 0 {
b.ExecutionAddress = make([]byte, 0, len(buf[49:69]))
}
b.ExecutionAddress = append(b.ExecutionAddress, buf[49:69]...)
// Field (3) 'Balance'
b.Balance = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[69:77]))
// Field (4) 'DepositEpoch'
b.DepositEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[77:85]))
// Field (5) 'WithdrawableEpoch'
b.WithdrawableEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[85:93]))
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the Builder object
func (b *Builder) SizeSSZ() (size int) {
size = 93
return
}
// HashTreeRoot ssz hashes the Builder object
func (b *Builder) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(b)
}
// HashTreeRootWith ssz hashes the Builder object with a hasher
func (b *Builder) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Pubkey'
if size := len(b.Pubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
return
}
hh.PutBytes(b.Pubkey)
// Field (1) 'Version'
if size := len(b.Version); size != 1 {
err = ssz.ErrBytesLengthFn("--.Version", size, 1)
return
}
hh.PutBytes(b.Version)
// Field (2) 'ExecutionAddress'
if size := len(b.ExecutionAddress); size != 20 {
err = ssz.ErrBytesLengthFn("--.ExecutionAddress", size, 20)
return
}
hh.PutBytes(b.ExecutionAddress)
// Field (3) 'Balance'
hh.PutUint64(uint64(b.Balance))
// Field (4) 'DepositEpoch'
hh.PutUint64(uint64(b.DepositEpoch))
// Field (5) 'WithdrawableEpoch'
hh.PutUint64(uint64(b.WithdrawableEpoch))
hh.Merkleize(indx)
return
}

View File

@@ -26,12 +26,10 @@ func TestExecutionPayloadBid_Copy(t *testing.T) {
ParentBlockHash: []byte("parent_block_hash_32_bytes_long!"),
ParentBlockRoot: []byte("parent_block_root_32_bytes_long!"),
BlockHash: []byte("block_hash_32_bytes_are_long!!"),
PrevRandao: []byte("prev_randao_32_bytes_long!!!"),
FeeRecipient: []byte("fee_recipient_20_byt"),
GasLimit: 15000000,
BuilderIndex: primitives.BuilderIndex(42),
BuilderIndex: primitives.ValidatorIndex(42),
Slot: primitives.Slot(12345),
ExecutionPayment: 5645654,
Value: 1000000000000000000,
BlobKzgCommitmentsRoot: []byte("blob_kzg_commitments_32_bytes!!"),
},
@@ -78,9 +76,10 @@ func TestBuilderPendingWithdrawal_Copy(t *testing.T) {
{
name: "fully populated withdrawal",
withdrawal: &BuilderPendingWithdrawal{
FeeRecipient: []byte("fee_recipient_20_byt"),
Amount: primitives.Gwei(5000000000),
BuilderIndex: primitives.BuilderIndex(123),
FeeRecipient: []byte("fee_recipient_20_byt"),
Amount: primitives.Gwei(5000000000),
BuilderIndex: primitives.ValidatorIndex(123),
WithdrawableEpoch: primitives.Epoch(456),
},
},
}
@@ -135,9 +134,10 @@ func TestBuilderPendingPayment_Copy(t *testing.T) {
payment: &BuilderPendingPayment{
Weight: primitives.Gwei(2500),
Withdrawal: &BuilderPendingWithdrawal{
FeeRecipient: []byte("test_recipient_20byt"),
Amount: primitives.Gwei(10000),
BuilderIndex: primitives.BuilderIndex(789),
FeeRecipient: []byte("test_recipient_20byt"),
Amount: primitives.Gwei(10000),
BuilderIndex: primitives.ValidatorIndex(789),
WithdrawableEpoch: primitives.Epoch(999),
},
},
},
@@ -166,60 +166,3 @@ func TestBuilderPendingPayment_Copy(t *testing.T) {
})
}
}
func TestCopyBuilder(t *testing.T) {
tests := []struct {
name string
builder *Builder
}{
{
name: "nil builder",
builder: nil,
},
{
name: "empty builder",
builder: &Builder{},
},
{
name: "fully populated builder",
builder: &Builder{
Pubkey: []byte("pubkey_48_bytes_long_pubkey_48_bytes_long_pubkey_48!"),
Version: []byte{'a'},
ExecutionAddress: []byte("execution_address_20"),
Balance: primitives.Gwei(12345),
DepositEpoch: primitives.Epoch(10),
WithdrawableEpoch: primitives.Epoch(20),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
copied := CopyBuilder(tt.builder)
if tt.builder == nil {
if copied != nil {
t.Errorf("CopyBuilder() of nil should return nil, got %v", copied)
}
return
}
if !reflect.DeepEqual(tt.builder, copied) {
t.Errorf("CopyBuilder() = %v, want %v", copied, tt.builder)
}
if len(tt.builder.Pubkey) > 0 {
tt.builder.Pubkey[0] = 0xFF
if copied.Pubkey[0] == 0xFF {
t.Error("CopyBuilder() did not create deep copy of Pubkey")
}
}
if len(tt.builder.ExecutionAddress) > 0 {
tt.builder.ExecutionAddress[0] = 0xFF
if copied.ExecutionAddress[0] == 0xFF {
t.Error("CopyBuilder() did not create deep copy of ExecutionAddress")
}
}
})
}
}

View File

@@ -48,7 +48,6 @@ mainnet = {
"payload_attestation.size": "4", # Gloas: MAX_PAYLOAD_ATTESTATIONS defined in block body
"execution_payload_availability.size": "1024", # Gloas: SLOTS_PER_HISTORICAL_ROOT
"builder_pending_payments.size": "64", # Gloas: vector length (2 * SLOTS_PER_EPOCH)
"builder_registry_limit": "1099511627776", # Gloas: BUILDER_REGISTRY_LIMIT (same for mainnet/minimal)
}
minimal = {
@@ -92,8 +91,7 @@ minimal = {
"ptc.type": "github.com/OffchainLabs/go-bitfield.Bitvector2",
"payload_attestation.size": "4", # Gloas: MAX_PAYLOAD_ATTESTATIONS defined in block body
"execution_payload_availability.size": "8", # Gloas: SLOTS_PER_HISTORICAL_ROOT
"builder_pending_payments.size": "16", # Gloas: vector length (2 * SLOTS_PER_EPOCH)
"builder_registry_limit": "1099511627776", # Gloas: BUILDER_REGISTRY_LIMIT (same for mainnet/minimal)
"builder_pending_payments.size": "16" # Gloas: vector length (2 * SLOTS_PER_EPOCH)
}
###### Rules definitions #######

View File

@@ -278,7 +278,6 @@ go_test(
"//testing/spectest/shared/fulu/rewards:go_default_library",
"//testing/spectest/shared/fulu/sanity:go_default_library",
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
"//testing/spectest/shared/phase0/finality:go_default_library",
"//testing/spectest/shared/phase0/operations:go_default_library",

View File

@@ -2,10 +2,9 @@ package mainnet
import (
"testing"
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/ssz_static"
)
func TestMainnet_Gloas_SSZStatic(t *testing.T) {
ssz_static.RunSSZStaticTests(t, "mainnet")
t.Skip("Gloas is not implemented")
// ssz_static.RunSSZStaticTests(t, "mainnet")
}

View File

@@ -288,7 +288,6 @@ go_test(
"//testing/spectest/shared/fulu/rewards:go_default_library",
"//testing/spectest/shared/fulu/sanity:go_default_library",
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
"//testing/spectest/shared/phase0/finality:go_default_library",
"//testing/spectest/shared/phase0/operations:go_default_library",

View File

@@ -2,10 +2,9 @@ package minimal
import (
"testing"
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/ssz_static"
)
func TestMinimal_Gloas_SSZStatic(t *testing.T) {
ssz_static.RunSSZStaticTests(t, "minimal")
t.Skip("Gloas is not implemented")
// ssz_static.RunSSZStaticTests(t, "minimal")
}

View File

@@ -6,7 +6,6 @@ import (
"testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
// enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
@@ -57,8 +56,6 @@ func unmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (a
obj = &ethpb.BeaconBlockBodyGloas{}
case "BeaconState":
obj = &ethpb.BeaconStateGloas{}
case "Builder":
obj = &ethpb.Builder{}
case "BuilderPendingPayment":
obj = &ethpb.BuilderPendingPayment{}
case "BuilderPendingWithdrawal":
@@ -73,8 +70,6 @@ func unmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (a
t.Skip("Not a consensus type")
case "DataColumnSidecar":
obj = &ethpb.DataColumnSidecarGloas{}
case "SignedProposerPreferences", "ProposerPreferences":
t.Skip("p2p-only type; not part of the consensus state transition")
// Standard types that also exist in gloas
case "ExecutionPayload":

View File

@@ -10,7 +10,6 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/utils",
visibility = ["//testing/spectest:__subpackages__"],
deps = [
"//config/features:go_default_library",
"//config/params:go_default_library",
"//io/file:go_default_library",
"//testing/require:go_default_library",

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"testing"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
)
@@ -14,12 +13,6 @@ import (
// Provides reset function allowing to get back to the previous configuration at the end of a test.
func SetConfig(t testing.TB, config string) error {
params.SetupTestConfigCleanup(t)
resetFeatures := features.InitWithReset(&features.Flags{
LowValcountSweep: true,
})
t.Cleanup(resetFeatures)
switch config {
case "minimal":
params.OverrideBeaconConfig(params.MinimalSpecConfig().Copy())

View File

@@ -71,7 +71,7 @@ func main() {
flag.Parse()
if *logFileName != "" {
if err := logs.ConfigurePersistentLogging(*logFileName, "text", logrus.DebugLevel); err != nil {
if err := logs.ConfigurePersistentLogging(*logFileName, "text"); err != nil {
log.WithError(err).Error("Failed to configuring logging to disk.")
}
}

View File

@@ -71,9 +71,17 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
return
}
committeeIndex := duty.CommitteeIndex
postElectra := slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch
if postElectra {
committeeIndex = 0
}
data, err := v.getAttestationData(ctx, slot, duty.CommitteeIndex)
req := &ethpb.AttestationDataRequest{
Slot: slot,
CommitteeIndex: committeeIndex,
}
data, err := v.validatorClient.AttestationData(ctx, req)
if err != nil {
log.WithError(err).Error("Could not request attestation to sign at slot")
if v.emitAccountMetrics {

View File

@@ -112,9 +112,6 @@ type validator struct {
blacklistedPubkeysLock sync.RWMutex
attSelectionLock sync.Mutex
dutiesLock sync.RWMutex
attestationDataCacheLock sync.RWMutex
attestationDataCache *ethpb.AttestationData
attestationDataCacheSlot primitives.Slot
disableDutiesPolling bool
accountsChangedChannel chan [][fieldparams.BLSPubkeyLength]byte
eventsChannel chan *eventClient.Event
@@ -980,55 +977,6 @@ func (v *validator) domainData(ctx context.Context, epoch primitives.Epoch, doma
return res, nil
}
// getAttestationData fetches attestation data from the beacon node with caching for post-Electra.
// Post-Electra, attestation data is identical for all validators in the same slot (committee index is always 0),
// so we cache it to avoid redundant beacon node requests.
func (v *validator) getAttestationData(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) (*ethpb.AttestationData, error) {
ctx, span := trace.StartSpan(ctx, "validator.getAttestationData")
defer span.End()
postElectra := slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch
// Pre-Electra: no caching since committee index varies per validator
if !postElectra {
return v.validatorClient.AttestationData(ctx, &ethpb.AttestationDataRequest{
Slot: slot,
CommitteeIndex: committeeIndex,
})
}
// Post-Electra: check cache first (committee index is always 0)
v.attestationDataCacheLock.RLock()
if v.attestationDataCacheSlot == slot && v.attestationDataCache != nil {
data := v.attestationDataCache
v.attestationDataCacheLock.RUnlock()
return data, nil
}
v.attestationDataCacheLock.RUnlock()
// Cache miss - acquire write lock and fetch
v.attestationDataCacheLock.Lock()
defer v.attestationDataCacheLock.Unlock()
// Double-check after acquiring write lock (another goroutine may have filled the cache)
if v.attestationDataCacheSlot == slot && v.attestationDataCache != nil {
return v.attestationDataCache, nil
}
data, err := v.validatorClient.AttestationData(ctx, &ethpb.AttestationDataRequest{
Slot: slot,
CommitteeIndex: 0,
})
if err != nil {
return nil, err
}
v.attestationDataCache = data
v.attestationDataCacheSlot = slot
return data, nil
}
func (v *validator) logDuties(slot primitives.Slot, currentEpochDuties []*ethpb.ValidatorDuty, nextEpochDuties []*ethpb.ValidatorDuty) {
attesterKeys := make([][]string, params.BeaconConfig().SlotsPerEpoch)
for i := range attesterKeys {

View File

@@ -2977,207 +2977,3 @@ func TestValidator_CheckDependentRoots(t *testing.T) {
require.NoError(t, v.checkDependentRoots(ctx, head))
})
}
func TestGetAttestationData_PreElectraNoCaching(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
// Pre-Electra slot (Electra fork epoch is far in the future by default)
preElectraSlot := primitives.Slot(10)
expectedData := &ethpb.AttestationData{
Slot: preElectraSlot,
CommitteeIndex: 5,
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Each call should go to the beacon node (no caching pre-Electra)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: preElectraSlot,
CommitteeIndex: 5,
}).Return(expectedData, nil)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: preElectraSlot,
CommitteeIndex: 7,
}).Return(expectedData, nil)
// First call with committee index 5
data1, err := v.getAttestationData(context.Background(), preElectraSlot, 5)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data1)
// Second call with different committee index 7 - should still call beacon node
data2, err := v.getAttestationData(context.Background(), preElectraSlot, 7)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data2)
}
func TestGetAttestationData_PostElectraCaching(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Set up Electra fork epoch for this test
cfg := params.BeaconConfig().Copy()
originalElectraForkEpoch := cfg.ElectraForkEpoch
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
defer func() {
cfg.ElectraForkEpoch = originalElectraForkEpoch
params.OverrideBeaconConfig(cfg)
}()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
// Post-Electra slot
postElectraSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
expectedData := &ethpb.AttestationData{
Slot: postElectraSlot,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Only ONE call should go to the beacon node (caching post-Electra)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: postElectraSlot,
CommitteeIndex: 0,
}).Return(expectedData, nil).Times(1)
// First call - should hit beacon node
data1, err := v.getAttestationData(context.Background(), postElectraSlot, 5)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data1)
// Second call with different committee index - should use cache
data2, err := v.getAttestationData(context.Background(), postElectraSlot, 7)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data2)
// Third call - should still use cache
data3, err := v.getAttestationData(context.Background(), postElectraSlot, 10)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data3)
}
func TestGetAttestationData_PostElectraCacheInvalidatesOnNewSlot(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Set up Electra fork epoch for this test
cfg := params.BeaconConfig().Copy()
originalElectraForkEpoch := cfg.ElectraForkEpoch
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
defer func() {
cfg.ElectraForkEpoch = originalElectraForkEpoch
params.OverrideBeaconConfig(cfg)
}()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
slot1 := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
slot2 := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 6)
dataSlot1 := &ethpb.AttestationData{
Slot: slot1,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
dataSlot2 := &ethpb.AttestationData{
Slot: slot2,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Expect one call per slot
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: slot1,
CommitteeIndex: 0,
}).Return(dataSlot1, nil).Times(1)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: slot2,
CommitteeIndex: 0,
}).Return(dataSlot2, nil).Times(1)
// First slot - should hit beacon node
data1, err := v.getAttestationData(context.Background(), slot1, 5)
require.NoError(t, err)
require.DeepEqual(t, dataSlot1, data1)
// Same slot - should use cache
data1Again, err := v.getAttestationData(context.Background(), slot1, 7)
require.NoError(t, err)
require.DeepEqual(t, dataSlot1, data1Again)
// New slot - should invalidate cache and hit beacon node
data2, err := v.getAttestationData(context.Background(), slot2, 5)
require.NoError(t, err)
require.DeepEqual(t, dataSlot2, data2)
}
func TestGetAttestationData_PostElectraConcurrentAccess(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Set up Electra fork epoch for this test
cfg := params.BeaconConfig().Copy()
originalElectraForkEpoch := cfg.ElectraForkEpoch
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
defer func() {
cfg.ElectraForkEpoch = originalElectraForkEpoch
params.OverrideBeaconConfig(cfg)
}()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
postElectraSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
expectedData := &ethpb.AttestationData{
Slot: postElectraSlot,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Should only call beacon node once despite concurrent requests
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: postElectraSlot,
CommitteeIndex: 0,
}).Return(expectedData, nil).Times(1)
var wg sync.WaitGroup
numGoroutines := 10
results := make([]*ethpb.AttestationData, numGoroutines)
errs := make([]error, numGoroutines)
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
results[idx], errs[idx] = v.getAttestationData(context.Background(), postElectraSlot, primitives.CommitteeIndex(idx))
}(i)
}
wg.Wait()
for i := 0; i < numGoroutines; i++ {
require.NoError(t, errs[i])
require.DeepEqual(t, expectedData, results[i])
}
}

View File

@@ -75,6 +75,13 @@ func NewValidatorClient(cliCtx *cli.Context) (*ValidatorClient, error) {
return nil, err
}
verbosity := cliCtx.String(cmd.VerbosityFlag.Name)
level, err := logrus.ParseLevel(verbosity)
if err != nil {
return nil, err
}
logrus.SetLevel(level)
// Warn if user's platform is not supported
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)