mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-26 21:59:13 -05:00
Compare commits
6 Commits
add-proces
...
testing-ch
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e31ab0a09d | ||
|
|
520733ba55 | ||
|
|
bcf060619b | ||
|
|
37b27fdd3c | ||
|
|
6a9bcbab3a | ||
|
|
1b2524b0fe |
@@ -10,7 +10,10 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_pkg_errors//:go_default_library"],
|
||||
deps = [
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"net/url"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -28,6 +29,7 @@ type Client struct {
|
||||
// `host` is the base host + port used to construct request urls. This value can be
|
||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
log.Info("hi")
|
||||
u, err := urlForHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -3,7 +3,6 @@ package altair
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -24,7 +23,7 @@ func ProcessPreGenesisDeposits(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit")
|
||||
}
|
||||
beaconState, err = blocks.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
beaconState, err = helpers.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -37,7 +36,7 @@ func ProcessDeposits(
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -82,7 +81,7 @@ func ProcessDeposits(
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -122,7 +121,7 @@ func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSi
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if !allSignaturesVerified {
|
||||
valid, err := blocks.IsValidDepositSignature(data)
|
||||
valid, err := helpers.IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"attester_slashing.go",
|
||||
"deposit.go",
|
||||
"error.go",
|
||||
"eth1_data.go",
|
||||
"exit.go",
|
||||
@@ -34,8 +33,6 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -62,7 +59,6 @@ go_test(
|
||||
"attester_slashing_test.go",
|
||||
"block_operations_fuzz_test.go",
|
||||
"block_regression_test.go",
|
||||
"deposit_test.go",
|
||||
"eth1_data_test.go",
|
||||
"exit_test.go",
|
||||
"exports_test.go",
|
||||
@@ -91,7 +87,6 @@ go_test(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
|
||||
@@ -3,6 +3,7 @@ package blocks
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -318,7 +319,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyDeposit(s, deposit)
|
||||
err = helpers.VerifyDeposit(s, deposit)
|
||||
_ = err
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
|
||||
@@ -3,7 +3,6 @@ package electra
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
@@ -37,7 +36,7 @@ func ProcessDeposits(
|
||||
defer span.End()
|
||||
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
|
||||
// individual deposits with signature verification enabled.
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||
}
|
||||
@@ -82,7 +81,7 @@ func ProcessDeposits(
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -377,7 +376,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
return nil
|
||||
}
|
||||
|
||||
allSignaturesVerified, err := blocks.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||
allSignaturesVerified, err := helpers.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
@@ -386,7 +385,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
validSig := allSignaturesVerified
|
||||
|
||||
if !allSignaturesVerified {
|
||||
validSig, err = blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
validSig, err = helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(pd.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(pd.WithdrawalCredentials),
|
||||
Amount: pd.Amount,
|
||||
@@ -441,7 +440,7 @@ func ApplyPendingDeposit(ctx context.Context, st state.BeaconState, deposit *eth
|
||||
defer span.End()
|
||||
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(deposit.PublicKey))
|
||||
if !ok {
|
||||
verified, err := blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
verified, err := helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(deposit.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(deposit.WithdrawalCredentials),
|
||||
Amount: deposit.Amount,
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bid.go",
|
||||
"payload_attestation.go",
|
||||
"pending_payment.go",
|
||||
"proposer_slashing.go",
|
||||
],
|
||||
@@ -15,16 +14,12 @@ go_library(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -35,7 +30,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"bid_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"pending_payment_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
],
|
||||
@@ -46,7 +40,6 @@ go_test(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
@@ -57,10 +50,8 @@ go_test(
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,253 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessPayloadAttestations validates payload attestations in a block body.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
|
||||
//
|
||||
// data = payload_attestation.data
|
||||
// assert data.beacon_block_root == state.latest_block_header.parent_root
|
||||
// assert data.slot + 1 == state.slot
|
||||
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
|
||||
// assert is_valid_indexed_payload_attestation(state, indexed)
|
||||
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
atts, err := body.PayloadAttestations()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get payload attestations from block body")
|
||||
}
|
||||
if len(atts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
header := st.LatestBlockHeader()
|
||||
|
||||
for i, att := range atts {
|
||||
data := att.Data
|
||||
if !bytes.Equal(data.BeaconBlockRoot, header.ParentRoot) {
|
||||
return fmt.Errorf("payload attestation %d has wrong parent: got %x want %x", i, data.BeaconBlockRoot, header.ParentRoot)
|
||||
}
|
||||
|
||||
dataSlot, err := data.Slot.SafeAdd(1)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "payload attestation %d has invalid slot addition", i)
|
||||
}
|
||||
if dataSlot != st.Slot() {
|
||||
return fmt.Errorf("payload attestation %d has wrong slot: got %d want %d", i, data.Slot+1, st.Slot())
|
||||
}
|
||||
|
||||
indexed, err := indexedPayloadAttestation(ctx, st, att)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "payload attestation %d failed to convert to indexed form", i)
|
||||
}
|
||||
if err := validIndexedPayloadAttestation(st, indexed); err != nil {
|
||||
return errors.Wrapf(err, "payload attestation %d failed to verify indexed form", i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// indexedPayloadAttestation converts a payload attestation into its indexed form.
|
||||
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
|
||||
committee, err := payloadCommittee(ctx, st, att.Data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := make([]primitives.ValidatorIndex, 0, len(committee))
|
||||
for i, idx := range committee {
|
||||
if att.AggregationBits.BitAt(uint64(i)) {
|
||||
indices = append(indices, idx)
|
||||
}
|
||||
}
|
||||
slices.Sort(indices)
|
||||
|
||||
return &consensus_types.IndexedPayloadAttestation{
|
||||
AttestingIndices: indices,
|
||||
Data: att.Data,
|
||||
Signature: att.Signature,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||
//
|
||||
// epoch = compute_epoch_at_slot(slot)
|
||||
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
|
||||
// indices = []
|
||||
// committees_per_slot = get_committee_count_per_slot(state, epoch)
|
||||
// for i in range(committees_per_slot):
|
||||
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
|
||||
// indices.extend(committee)
|
||||
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
|
||||
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
seed, err := ptcSeed(st, epoch, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
activeCount, err := helpers.ActiveValidatorCount(ctx, st, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
||||
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
|
||||
}
|
||||
out = append(out, committee...)
|
||||
}
|
||||
|
||||
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
|
||||
}
|
||||
|
||||
// ptcSeed computes the seed for the payload timeliness committee.
|
||||
func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitives.Slot) ([32]byte, error) {
|
||||
seed, err := helpers.Seed(st, epoch, params.BeaconConfig().DomainPTCAttester)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return hash.Hash(append(seed[:], bytesutil.Bytes8(uint64(slot))...)), nil
|
||||
}
|
||||
|
||||
// selectByBalance selects a balance-weighted subset of input candidates.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
|
||||
// Note: shuffle_indices is false for PTC.
|
||||
//
|
||||
// total = len(indices); selected = []; i = 0
|
||||
// while len(selected) < size:
|
||||
// next = i % total
|
||||
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
|
||||
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
||||
// selected.append(indices[next])
|
||||
// i += 1
|
||||
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
|
||||
if len(candidates) == 0 {
|
||||
return nil, errors.New("no candidates for balance weighted selection")
|
||||
}
|
||||
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
|
||||
var buf [40]byte
|
||||
copy(buf[:], seed[:])
|
||||
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
|
||||
selected := make([]primitives.ValidatorIndex, 0, count)
|
||||
total := uint64(len(candidates))
|
||||
for i := uint64(0); uint64(len(selected)) < count; i++ {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
idx := candidates[i%total]
|
||||
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
selected = append(selected, idx)
|
||||
}
|
||||
}
|
||||
return selected, nil
|
||||
}
|
||||
|
||||
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// compute_balance_weighted_acceptance(state, index, seed, i):
|
||||
//
|
||||
// MAX_RANDOM_VALUE = 2**16 - 1
|
||||
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||
// offset = i % 16 * 2
|
||||
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
|
||||
// effective_balance = state.validators[index].effective_balance
|
||||
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
|
||||
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
|
||||
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
|
||||
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
|
||||
random := hashFunc(seedBuf)
|
||||
offset := (round % 16) * 2
|
||||
randomValue := uint64(binary.LittleEndian.Uint16(random[offset : offset+2])) // 16-bit draw per spec
|
||||
|
||||
val, err := st.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "validator %d", idx)
|
||||
}
|
||||
|
||||
return val.EffectiveBalance*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
|
||||
}
|
||||
|
||||
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
|
||||
//
|
||||
// indices = indexed_payload_attestation.attesting_indices
|
||||
// return len(indices) > 0 and indices == sorted(indices) and
|
||||
// bls.FastAggregateVerify(
|
||||
// [state.validators[i].pubkey for i in indices],
|
||||
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
|
||||
// indexed_payload_attestation.signature,
|
||||
// )
|
||||
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
|
||||
indices := att.AttestingIndices
|
||||
if len(indices) == 0 || !slices.IsSorted(indices) {
|
||||
return errors.New("attesting indices empty or unsorted")
|
||||
}
|
||||
|
||||
pubkeys := make([]bls.PublicKey, len(indices))
|
||||
for i, idx := range indices {
|
||||
val, err := st.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "validator %d", idx)
|
||||
}
|
||||
keyBytes := val.PublicKey()
|
||||
key, err := bls.PublicKeyFromBytes(keyBytes[:])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "pubkey %d", idx)
|
||||
}
|
||||
pubkeys[i] = key
|
||||
}
|
||||
|
||||
domain, err := signing.Domain(st.Fork(), slots.ToEpoch(att.Data.Slot), params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := signing.ComputeSigningRoot(att.Data, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(att.Signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !sig.FastAggregateVerify(pubkeys, root) {
|
||||
return errors.New("invalid signature")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,305 +0,0 @@
|
||||
package gloas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
testutil "github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
func TestProcessPayloadAttestations_WrongParent(t *testing.T) {
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk := newKey(t)
|
||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
||||
require.NoError(t, st.SetSlot(2))
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
att := ð.PayloadAttestation{
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: bytes.Repeat([]byte{0xbb}, 32),
|
||||
Slot: 1,
|
||||
},
|
||||
AggregationBits: bitfield.NewBitvector512(),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.ErrorContains(t, "wrong parent", err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_WrongSlot(t *testing.T) {
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk := newKey(t)
|
||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
||||
require.NoError(t, st.SetSlot(3))
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
att := ð.PayloadAttestation{
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
},
|
||||
AggregationBits: bitfield.NewBitvector512(),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.ErrorContains(t, "wrong slot", err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_InvalidSignature(t *testing.T) {
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk1 := newKey(t)
|
||||
sk2, pk2 := newKey(t)
|
||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
||||
st := newTestState(t, vals, 2)
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
attData := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
}
|
||||
att := ð.PayloadAttestation{
|
||||
Data: attData,
|
||||
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
|
||||
Signature: signAttestation(t, st, attData, []common.SecretKey{sk2}),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.ErrorContains(t, "failed to verify indexed form", err)
|
||||
require.ErrorContains(t, "invalid signature", err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_EmptyAggregationBits(t *testing.T) {
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk := newKey(t)
|
||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
||||
require.NoError(t, st.SetSlot(2))
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
attData := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
}
|
||||
att := ð.PayloadAttestation{
|
||||
Data: attData,
|
||||
AggregationBits: bitfield.NewBitvector512(),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.ErrorContains(t, "failed to verify indexed form", err)
|
||||
require.ErrorContains(t, "attesting indices empty or unsorted", err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_HappyPath(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
setupTestConfig(t)
|
||||
|
||||
sk1, pk1 := newKey(t)
|
||||
sk2, pk2 := newKey(t)
|
||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
||||
|
||||
st := newTestState(t, vals, 2)
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
attData := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
}
|
||||
aggBits := bitfield.NewBitvector512()
|
||||
aggBits.SetBitAt(0, true)
|
||||
aggBits.SetBitAt(1, true)
|
||||
|
||||
att := ð.PayloadAttestation{
|
||||
Data: attData,
|
||||
AggregationBits: aggBits,
|
||||
Signature: signAttestation(t, st, attData, []common.SecretKey{sk1, sk2}),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_MultipleAttestations(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
setupTestConfig(t)
|
||||
|
||||
sk1, pk1 := newKey(t)
|
||||
sk2, pk2 := newKey(t)
|
||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
||||
|
||||
st := newTestState(t, vals, 2)
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
attData1 := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
}
|
||||
attData2 := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
}
|
||||
|
||||
att1 := ð.PayloadAttestation{
|
||||
Data: attData1,
|
||||
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
|
||||
Signature: signAttestation(t, st, attData1, []common.SecretKey{sk1}),
|
||||
}
|
||||
att2 := ð.PayloadAttestation{
|
||||
Data: attData2,
|
||||
AggregationBits: setBits(bitfield.NewBitvector512(), 1),
|
||||
Signature: signAttestation(t, st, attData2, []common.SecretKey{sk2}),
|
||||
}
|
||||
|
||||
body := buildBody(t, att1, att2)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_IndexedVerificationError(t *testing.T) {
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk := newKey(t)
|
||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
attData := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 0,
|
||||
}
|
||||
att := ð.PayloadAttestation{
|
||||
Data: attData,
|
||||
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
errState := &validatorLookupErrState{
|
||||
BeaconState: st,
|
||||
errIndex: 0,
|
||||
}
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), errState, body)
|
||||
require.ErrorContains(t, "failed to verify indexed form", err)
|
||||
require.ErrorContains(t, "validator 0", err)
|
||||
}
|
||||
|
||||
func newTestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
for _, v := range vals {
|
||||
require.NoError(t, st.AppendValidator(v))
|
||||
require.NoError(t, st.AppendBalance(v.EffectiveBalance))
|
||||
}
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), st, slots.ToEpoch(slot)))
|
||||
return st
|
||||
}
|
||||
|
||||
func setupTestConfig(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SlotsPerEpoch = 1
|
||||
cfg.MaxEffectiveBalanceElectra = cfg.MaxEffectiveBalance
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
func buildBody(t *testing.T, atts ...*eth.PayloadAttestation) interfaces.ReadOnlyBeaconBlockBody {
|
||||
body := ð.BeaconBlockBodyGloas{
|
||||
PayloadAttestations: atts,
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ð.Eth1Data{},
|
||||
Graffiti: make([]byte, 32),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{},
|
||||
AttesterSlashings: []*eth.AttesterSlashingElectra{},
|
||||
Attestations: []*eth.AttestationElectra{},
|
||||
Deposits: []*eth.Deposit{},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{},
|
||||
SyncAggregate: ð.SyncAggregate{},
|
||||
BlsToExecutionChanges: []*eth.SignedBLSToExecutionChange{},
|
||||
}
|
||||
wrapped, err := blocks.NewBeaconBlockBody(body)
|
||||
require.NoError(t, err)
|
||||
return wrapped
|
||||
}
|
||||
|
||||
func setBits(bits bitfield.Bitvector512, idx uint64) bitfield.Bitvector512 {
|
||||
bits.SetBitAt(idx, true)
|
||||
return bits
|
||||
}
|
||||
|
||||
func activeValidator(pub []byte) *eth.Validator {
|
||||
return ð.Validator{
|
||||
PublicKey: pub,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ActivationEligibilityEpoch: 0,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
func newKey(t *testing.T) (common.SecretKey, []byte) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
return sk, sk.PublicKey().Marshal()
|
||||
}
|
||||
|
||||
func signAttestation(t *testing.T, st state.ReadOnlyBeaconState, data *eth.PayloadAttestationData, sks []common.SecretKey) []byte {
|
||||
domain, err := signing.Domain(st.Fork(), slots.ToEpoch(data.Slot), params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(data, domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
sigs := make([]common.Signature, len(sks))
|
||||
for i, sk := range sks {
|
||||
sigs[i] = sk.Sign(root[:])
|
||||
}
|
||||
agg := bls.AggregateSignatures(sigs)
|
||||
return agg.Marshal()
|
||||
}
|
||||
|
||||
type validatorLookupErrState struct {
|
||||
state.BeaconState
|
||||
errIndex primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
// ValidatorAtIndexReadOnly is overridden to simulate a missing validator lookup.
|
||||
func (s *validatorLookupErrState) ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (state.ReadOnlyValidator, error) {
|
||||
if idx == s.errIndex {
|
||||
return nil, state.ErrNilValidatorsInState
|
||||
}
|
||||
return s.BeaconState.ValidatorAtIndexReadOnly(idx)
|
||||
}
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"attestation.go",
|
||||
"beacon_committee.go",
|
||||
"block.go",
|
||||
"deposit.go",
|
||||
"genesis.go",
|
||||
"legacy.go",
|
||||
"log.go",
|
||||
@@ -23,6 +24,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -31,6 +33,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -54,6 +57,7 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"deposit_test.go",
|
||||
"legacy_test.go",
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
@@ -72,6 +76,7 @@ go_test(
|
||||
tags = ["CI_race_detection"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -80,6 +85,8 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package blocks
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,9 +1,9 @@
|
||||
package blocks_test
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -45,7 +45,7 @@ func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func TestBatchVerifyDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func TestVerifyDeposit_MerkleBranchFailsVerification(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
want := "deposit root did not verify"
|
||||
err = blocks.VerifyDeposit(beaconState, deposit)
|
||||
err = helpers.VerifyDeposit(beaconState, deposit)
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func TestIsValidDepositSignature_Ok(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
depositData.Signature = sig.Marshal()
|
||||
valid, err := blocks.IsValidDepositSignature(depositData)
|
||||
valid, err := helpers.IsValidDepositSignature(depositData)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
}
|
||||
@@ -163,7 +163,7 @@ func TestBatchVerifyPendingDepositsSignatures_Ok(t *testing.T) {
|
||||
sig2 := sk2.Sign(sr2[:])
|
||||
pendingDeposit2.Signature = sig2.Marshal()
|
||||
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
@@ -174,7 +174,7 @@ func TestBatchVerifyPendingDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
@@ -26,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -1044,112 +1042,27 @@ func (s *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
var root []byte
|
||||
blockID := r.PathValue("block_id")
|
||||
if blockID == "" {
|
||||
httputil.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
switch blockID {
|
||||
case "head":
|
||||
root, err = s.ChainInfoFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not retrieve head root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if root == nil {
|
||||
httputil.HandleError(w, "No head root was found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
case "finalized":
|
||||
finalized := s.ChainInfoFetcher.FinalizedCheckpt()
|
||||
root = finalized.Root
|
||||
case "genesis":
|
||||
blk, err := s.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not retrieve genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find genesis block: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not hash genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
root = blkRoot[:]
|
||||
default:
|
||||
isHex := strings.HasPrefix(blockID, "0x")
|
||||
if isHex {
|
||||
blockIDBytes, err := hexutil.Decode(blockID)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not decode block ID into bytes: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(blockIDBytes) != fieldparams.RootLength {
|
||||
httputil.HandleError(w, fmt.Sprintf("Block ID has length %d instead of %d", len(blockIDBytes), fieldparams.RootLength), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
blockID32 := bytesutil.ToBytes32(blockIDBytes)
|
||||
blk, err := s.BeaconDB.Block(ctx, blockID32)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not retrieve block for block root %#x: %v", blockID, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find block: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
root = blockIDBytes
|
||||
} else {
|
||||
slot, err := strconv.ParseUint(blockID, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse block ID: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
hasRoots, roots, err := s.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not retrieve blocks for slot %d: %v", slot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if !hasRoots {
|
||||
httputil.HandleError(w, "Could not find any blocks with given slot", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
root = roots[0][:]
|
||||
if len(roots) == 1 {
|
||||
break
|
||||
}
|
||||
for _, blockRoot := range roots {
|
||||
canonical, err := s.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not determine if block root is canonical: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if canonical {
|
||||
root = blockRoot[:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
root, err := s.Blocker.BlockRoot(ctx, []byte(blockID))
|
||||
if !shared.WriteBlockRootFetchError(w, err) {
|
||||
return
|
||||
}
|
||||
|
||||
b32Root := bytesutil.ToBytes32(root)
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, b32Root)
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
response := &structs.BlockRootResponse{
|
||||
Data: &structs.BlockRoot{
|
||||
Root: hexutil.Encode(root),
|
||||
Root: hexutil.Encode(root[:]),
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, b32Root),
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, root),
|
||||
}
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
@@ -2509,6 +2509,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
@@ -2524,7 +2528,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: map[string]string{"block_id": "3bad0"},
|
||||
wantErr: "Could not parse block ID",
|
||||
wantErr: "Invalid block ID",
|
||||
wantCode: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
@@ -2572,7 +2576,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: map[string]string{"block_id": hexutil.Encode(bytesutil.PadTo([]byte("hi there"), 32))},
|
||||
wantErr: "Could not find block",
|
||||
wantErr: "Block not found",
|
||||
wantCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
@@ -2585,7 +2589,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "no block",
|
||||
blockID: map[string]string{"block_id": "105"},
|
||||
wantErr: "Could not find any blocks with given slot",
|
||||
wantErr: "Block not found",
|
||||
wantCode: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
@@ -2633,6 +2637,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
@@ -2668,6 +2676,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
t.Run("true", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
|
||||
@@ -197,9 +197,6 @@ func TestGetSpec(t *testing.T) {
|
||||
var dbb [4]byte
|
||||
copy(dbb[:], []byte{'0', '0', '0', '8'})
|
||||
config.DomainBeaconBuilder = dbb
|
||||
var dptc [4]byte
|
||||
copy(dptc[:], []byte{'0', '0', '0', '8'})
|
||||
config.DomainPTCAttester = dptc
|
||||
var dam [4]byte
|
||||
copy(dam[:], []byte{'1', '0', '0', '0'})
|
||||
config.DomainApplicationMask = dam
|
||||
@@ -215,7 +212,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 187, len(data))
|
||||
assert.Equal(t, 186, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -417,8 +414,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "0x30303036", v)
|
||||
case "DOMAIN_AGGREGATE_AND_PROOF":
|
||||
assert.Equal(t, "0x30303037", v)
|
||||
case "DOMAIN_PTC_ATTESTER":
|
||||
assert.Equal(t, "0x30303038", v)
|
||||
case "DOMAIN_APPLICATION_MASK":
|
||||
assert.Equal(t, "0x31303030", v)
|
||||
case "DOMAIN_SYNC_COMMITTEE":
|
||||
|
||||
@@ -26,21 +26,30 @@ func WriteStateFetchError(w http.ResponseWriter, err error) {
|
||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
// writeBlockIdError handles common block ID lookup errors.
|
||||
// Returns true if an error was handled and written to the response, false if no error.
|
||||
func writeBlockIdError(w http.ResponseWriter, err error, fallbackMsg string) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var blockNotFoundErr *lookup.BlockNotFoundError
|
||||
if errors.As(err, &blockNotFoundErr) {
|
||||
httputil.HandleError(w, "Block not found: "+blockNotFoundErr.Error(), http.StatusNotFound)
|
||||
return false
|
||||
return true
|
||||
}
|
||||
var invalidBlockIdErr *lookup.BlockIdParseError
|
||||
if errors.As(err, &invalidBlockIdErr) {
|
||||
httputil.HandleError(w, "Invalid block ID: "+invalidBlockIdErr.Error(), http.StatusBadRequest)
|
||||
return false
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get block from block ID: "+err.Error(), http.StatusInternalServerError)
|
||||
httputil.HandleError(w, fallbackMsg+": "+err.Error(), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
if writeBlockIdError(w, err, "Could not get block from block ID") {
|
||||
return false
|
||||
}
|
||||
if err = blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
@@ -49,3 +58,10 @@ func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBe
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBlockRootFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block root.
|
||||
// Returns true if no error occurred, false otherwise.
|
||||
func WriteBlockRootFetchError(w http.ResponseWriter, err error) bool {
|
||||
return !writeBlockIdError(w, err, "Could not get block root from block ID")
|
||||
}
|
||||
|
||||
@@ -105,3 +105,59 @@ func TestWriteBlockFetchError(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteBlockRootFetchError tests the WriteBlockRootFetchError function
|
||||
// to ensure that the correct error message and code are written to the response
|
||||
// and that the function returns the correct boolean value.
|
||||
func TestWriteBlockRootFetchError(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
expectedMessage string
|
||||
expectedCode int
|
||||
expectedReturn bool
|
||||
}{
|
||||
{
|
||||
name: "Nil error should return true",
|
||||
err: nil,
|
||||
expectedReturn: true,
|
||||
},
|
||||
{
|
||||
name: "BlockNotFoundError should return 404",
|
||||
err: lookup.NewBlockNotFoundError("block not found at slot 123"),
|
||||
expectedMessage: "Block not found",
|
||||
expectedCode: http.StatusNotFound,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "BlockIdParseError should return 400",
|
||||
err: &lookup.BlockIdParseError{},
|
||||
expectedMessage: "Invalid block ID",
|
||||
expectedCode: http.StatusBadRequest,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "Generic error should return 500",
|
||||
err: errors.New("database connection failed"),
|
||||
expectedMessage: "Could not get block root from block ID",
|
||||
expectedCode: http.StatusInternalServerError,
|
||||
expectedReturn: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
writer := httptest.NewRecorder()
|
||||
result := WriteBlockRootFetchError(writer, c.err)
|
||||
|
||||
assert.Equal(t, c.expectedReturn, result, "incorrect return value")
|
||||
if !c.expectedReturn {
|
||||
assert.Equal(t, c.expectedCode, writer.Code, "incorrect status code")
|
||||
assert.StringContains(t, c.expectedMessage, writer.Body.String(), "incorrect error message")
|
||||
|
||||
e := &httputil.DefaultJsonError{}
|
||||
assert.NoError(t, json.Unmarshal(writer.Body.Bytes(), e), "failed to unmarshal response")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ func (e BlockIdParseError) Error() string {
|
||||
// Blocker is responsible for retrieving blocks.
|
||||
type Blocker interface {
|
||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error)
|
||||
BlobSidecars(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([][]byte, *core.RpcError)
|
||||
DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError)
|
||||
@@ -225,6 +226,18 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// BlockRoot returns the block root for a given identifier. The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
// - "finalized"
|
||||
// - "justified"
|
||||
// - <slot>
|
||||
// - <hex encoded block root with '0x' prefix>
|
||||
func (p *BeaconDbBlocker) BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error) {
|
||||
root, _, err := p.resolveBlockID(ctx, string(id))
|
||||
return root, err
|
||||
}
|
||||
|
||||
// blobsContext holds common information needed for blob retrieval
|
||||
type blobsContext struct {
|
||||
root [fieldparams.RootLength]byte
|
||||
|
||||
@@ -168,6 +168,111 @@ func TestGetBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
genBlk, blkContainers := testutil.FillDBWithBlocks(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
fetcher := &BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blkContainers[32].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
},
|
||||
}
|
||||
|
||||
genesisRoot, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want [32]byte
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: bytesutil.ToBytes32(blkContainers[30].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: bytesutil.ToBytes32(headBlock.BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: bytesutil.ToBytes32(blkContainers[64].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "justified",
|
||||
blockID: []byte("justified"),
|
||||
want: bytesutil.ToBytes32(blkContainers[32].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genesisRoot,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: genesisRoot[:],
|
||||
want: genesisRoot,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "hex root",
|
||||
blockID: []byte(hexutil.Encode(blkContainers[20].BlockRoot)),
|
||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block at slot",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := fetcher.BlockRoot(ctx, tt.blockID)
|
||||
if tt.wantErr {
|
||||
assert.NotEqual(t, err, nil, "no error has been returned")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.want, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobsErrorHandling(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
// MockBlocker is a fake implementation of lookup.Blocker.
|
||||
type MockBlocker struct {
|
||||
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
|
||||
RootToReturn [32]byte
|
||||
ErrorToReturn error
|
||||
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
|
||||
RootBlockMap map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
@@ -39,6 +40,14 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
|
||||
return m.SlotBlockMap[primitives.Slot(slotNumber)], nil
|
||||
}
|
||||
|
||||
// BlockRoot --
|
||||
func (m *MockBlocker) BlockRoot(_ context.Context, _ []byte) ([32]byte, error) {
|
||||
if m.ErrorToReturn != nil {
|
||||
return [32]byte{}, m.ErrorToReturn
|
||||
}
|
||||
return m.RootToReturn, nil
|
||||
}
|
||||
|
||||
// BlobSidecars --
|
||||
func (*MockBlocker) BlobSidecars(_ context.Context, _ string, _ ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
return nil, &core.RpcError{}
|
||||
|
||||
@@ -113,77 +113,100 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
withdrawals := make([]*enginev1.Withdrawal, 0, params.BeaconConfig().MaxWithdrawalsPerPayload)
|
||||
validatorIndex := b.nextWithdrawalValidatorIndex
|
||||
withdrawalIndex := b.nextWithdrawalIndex
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
|
||||
// Electra partial withdrawals functionality.
|
||||
var processedPartialWithdrawalsCount uint64
|
||||
if b.version >= version.Electra {
|
||||
for _, w := range b.pendingPartialWithdrawals {
|
||||
if w.WithdrawableEpoch > epoch || len(withdrawals) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||
break
|
||||
}
|
||||
|
||||
v, err := b.validatorAtIndexReadOnly(w.Index)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to determine withdrawals at index %d: %w", w.Index, err)
|
||||
}
|
||||
vBal, err := b.balanceAtIndex(w.Index)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
||||
}
|
||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
var totalWithdrawn uint64
|
||||
for _, wi := range withdrawals {
|
||||
if wi.ValidatorIndex == w.Index {
|
||||
totalWithdrawn += wi.Amount
|
||||
}
|
||||
}
|
||||
balance, err := mathutil.Sub64(vBal, totalWithdrawn)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
||||
}
|
||||
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
||||
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: w.Index,
|
||||
Address: v.GetWithdrawalCredentials()[12:],
|
||||
Amount: amount,
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
processedPartialWithdrawalsCount++
|
||||
}
|
||||
withdrawalIndex, processedPartialWithdrawalsCount, err := b.appendPendingPartialWithdrawals(withdrawalIndex, &withdrawals)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
err = b.appendValidatorsSweepWithdrawals(withdrawalIndex, &withdrawals)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return withdrawals, processedPartialWithdrawalsCount, nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, uint64, error) {
|
||||
if b.version < version.Electra {
|
||||
return withdrawalIndex, 0, nil
|
||||
}
|
||||
|
||||
ws := *withdrawals
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
var processedPartialWithdrawalsCount uint64
|
||||
for _, w := range b.pendingPartialWithdrawals {
|
||||
if w.WithdrawableEpoch > epoch || len(ws) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||
break
|
||||
}
|
||||
|
||||
v, err := b.validatorAtIndexReadOnly(w.Index)
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, fmt.Errorf("failed to determine withdrawals at index %d: %w", w.Index, err)
|
||||
}
|
||||
vBal, err := b.balanceAtIndex(w.Index)
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
||||
}
|
||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
var totalWithdrawn uint64
|
||||
for _, wi := range ws {
|
||||
if wi.ValidatorIndex == w.Index {
|
||||
totalWithdrawn += wi.Amount
|
||||
}
|
||||
}
|
||||
balance, err := mathutil.Sub64(vBal, totalWithdrawn)
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
||||
}
|
||||
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
||||
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: w.Index,
|
||||
Address: v.GetWithdrawalCredentials()[12:],
|
||||
Amount: amount,
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
processedPartialWithdrawalsCount++
|
||||
}
|
||||
|
||||
*withdrawals = ws
|
||||
return withdrawalIndex, processedPartialWithdrawalsCount, nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) error {
|
||||
ws := *withdrawals
|
||||
validatorIndex := b.nextWithdrawalValidatorIndex
|
||||
validatorsLen := b.validatorsLen()
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
for range bound {
|
||||
val, err := b.validatorAtIndexReadOnly(validatorIndex)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not retrieve validator at index %d", validatorIndex)
|
||||
return errors.Wrapf(err, "could not retrieve validator at index %d", validatorIndex)
|
||||
}
|
||||
balance, err := b.balanceAtIndex(validatorIndex)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not retrieve balance at index %d", validatorIndex)
|
||||
return errors.Wrapf(err, "could not retrieve balance at index %d", validatorIndex)
|
||||
}
|
||||
if b.version >= version.Electra {
|
||||
var partiallyWithdrawnBalance uint64
|
||||
for _, w := range withdrawals {
|
||||
for _, w := range ws {
|
||||
if w.ValidatorIndex == validatorIndex {
|
||||
partiallyWithdrawnBalance += w.Amount
|
||||
}
|
||||
}
|
||||
balance, err = mathutil.Sub64(balance, partiallyWithdrawnBalance)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not subtract balance %d with partial withdrawn balance %d", balance, partiallyWithdrawnBalance)
|
||||
return errors.Wrapf(err, "could not subtract balance %d with partial withdrawn balance %d", balance, partiallyWithdrawnBalance)
|
||||
}
|
||||
}
|
||||
if helpers.IsFullyWithdrawableValidator(val, balance, epoch, b.version) {
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: validatorIndex,
|
||||
Address: bytesutil.SafeCopyBytes(val.GetWithdrawalCredentials()[ETH1AddressOffset:]),
|
||||
@@ -191,7 +214,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
})
|
||||
withdrawalIndex++
|
||||
} else if helpers.IsPartiallyWithdrawableValidator(val, balance, epoch, b.version) {
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: validatorIndex,
|
||||
Address: bytesutil.SafeCopyBytes(val.GetWithdrawalCredentials()[ETH1AddressOffset:]),
|
||||
@@ -199,7 +222,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
if uint64(len(withdrawals)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
if uint64(len(ws)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
break
|
||||
}
|
||||
validatorIndex += 1
|
||||
@@ -208,7 +231,8 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
}
|
||||
}
|
||||
|
||||
return withdrawals, processedPartialWithdrawalsCount, nil
|
||||
*withdrawals = ws
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) PendingPartialWithdrawals() ([]*ethpb.PendingPartialWithdrawal, error) {
|
||||
|
||||
@@ -16,7 +16,7 @@ go_library(
|
||||
"//beacon-chain/state:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -3,7 +3,7 @@ package testing
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -31,7 +31,7 @@ func GeneratePendingDeposit(t *testing.T, key common.SecretKey, amount uint64, w
|
||||
Amount: dm.Amount,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
valid, err := blocks.IsValidDepositSignature(depositData)
|
||||
valid, err := helpers.IsValidDepositSignature(depositData)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
return ðpb.PendingDeposit{
|
||||
|
||||
3
changelog/bastin_gloas-new-state-test.md
Normal file
3
changelog/bastin_gloas-new-state-test.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Add `NewBeaconStateGloas()`.
|
||||
3
changelog/bastin_per-package-verbosity.md
Normal file
3
changelog/bastin_per-package-verbosity.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Flag `--log.vmodule` to set per-package verbosity levels for logging.
|
||||
3
changelog/james-prysm_blocker-for-block-root.md
Normal file
3
changelog/james-prysm_blocker-for-block-root.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- optimizing /eth/v1/beacon/blocks/{block_id}/root endpoint by reusing blocker lookup instead of duplicated logic.
|
||||
2
changelog/terencechain_move-deposit-to-helpers.md
Normal file
2
changelog/terencechain_move-deposit-to-helpers.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Move deposit helpers from `beacon-chain/core/blocks` to `beacon-chain/core/helpers` (refactor only).
|
||||
2
changelog/terencechain_refactor-expected-withdrawals.md
Normal file
2
changelog/terencechain_refactor-expected-withdrawals.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Refactor expected withdrawals into reusable helpers for future forks.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Add Gloas process payload attestation
|
||||
@@ -42,6 +42,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
runtimeDebug "runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/node"
|
||||
@@ -113,6 +114,7 @@ var appFlags = []cli.Flag{
|
||||
cmd.PubsubQueueSize,
|
||||
cmd.DataDirFlag,
|
||||
cmd.VerbosityFlag,
|
||||
cmd.LogVModuleFlag,
|
||||
cmd.EnableTracingFlag,
|
||||
cmd.TracingProcessNameFlag,
|
||||
cmd.TracingEndpointFlag,
|
||||
@@ -171,13 +173,22 @@ func before(ctx *cli.Context) error {
|
||||
return errors.Wrap(err, "failed to load flags from config file")
|
||||
}
|
||||
|
||||
// determine log verbosity
|
||||
// determine default log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
// determine per package verbosity. if not set, maxLevel will be 0.
|
||||
vmoduleInput := strings.Join(ctx.StringSlice(cmd.LogVModuleFlag.Name), ",")
|
||||
vmodule, maxLevel, err := cmd.ParseVModule(vmoduleInput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
switch format {
|
||||
@@ -191,11 +202,13 @@ func before(ctx *cli.Context) error {
|
||||
formatter.FullTimestamp = true
|
||||
formatter.ForceFormatting = true
|
||||
formatter.ForceColors = true
|
||||
formatter.VModule = vmodule
|
||||
formatter.BaseVerbosity = verbosityLevel
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -219,7 +232,7 @@ func before(ctx *cli.Context) error {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel, vmodule); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,6 +200,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.LogFileName,
|
||||
cmd.VerbosityFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
cmd.LogVModuleFlag,
|
||||
},
|
||||
},
|
||||
{ // Feature flags.
|
||||
|
||||
@@ -86,7 +86,7 @@ func main() {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level, map[string]logrus.Level{}); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,6 +36,11 @@ var (
|
||||
Usage: "Logging verbosity. (trace, debug, info, warn, error, fatal, panic)",
|
||||
Value: "info",
|
||||
}
|
||||
// LogVModuleFlag defines per-package log levels.
|
||||
LogVModuleFlag = &cli.StringSliceFlag{
|
||||
Name: "log.vmodule",
|
||||
Usage: "Per-package log verbosity. packagePath=level entries separated by commas.",
|
||||
}
|
||||
// DataDirFlag defines a path on disk where Prysm databases are stored.
|
||||
DataDirFlag = &cli.StringFlag{
|
||||
Name: "datadir",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -237,3 +238,41 @@ func TestValidateNoArgs_SubcommandFlags(t *testing.T) {
|
||||
err = app.Run([]string{"command", "bar", "subComm2", "--barfoo100", "garbage", "subComm4"})
|
||||
require.ErrorContains(t, "unrecognized argument: garbage", err)
|
||||
}
|
||||
|
||||
func TestParseVModule(t *testing.T) {
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
input := "beacon-chain/p2p=error, beacon-chain/light-client=trace"
|
||||
parsed, maxL, err := ParseVModule(input)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, logrus.ErrorLevel, parsed["beacon-chain/p2p"])
|
||||
require.Equal(t, logrus.TraceLevel, parsed["beacon-chain/light-client"])
|
||||
require.Equal(t, logrus.TraceLevel, maxL)
|
||||
})
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
parsed, maxL, err := ParseVModule(" ")
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, parsed)
|
||||
require.Equal(t, logrus.PanicLevel, maxL)
|
||||
})
|
||||
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
tests := []string{
|
||||
"beacon-chain/p2p",
|
||||
"beacon-chain/p2p=",
|
||||
"beacon-chain/p2p/=error",
|
||||
"=info",
|
||||
"beacon-chain/*=info",
|
||||
"beacon-chain/p2p=meow",
|
||||
"/beacon-chain/p2p=info",
|
||||
"beacon-chain/../p2p=info",
|
||||
"beacon-chain/p2p=info,",
|
||||
"beacon-chain/p2p=info,beacon-chain/p2p=debug",
|
||||
}
|
||||
for _, input := range tests {
|
||||
_, maxL, err := ParseVModule(input)
|
||||
require.ErrorContains(t, "invalid", err)
|
||||
require.Equal(t, logrus.PanicLevel, maxL)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
@@ -102,3 +103,63 @@ func ExpandSingleEndpointIfFile(ctx *cli.Context, flag *cli.StringFlag) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseVModule parses a comma-separated list of package=level entries.
|
||||
func ParseVModule(input string) (map[string]logrus.Level, logrus.Level, error) {
|
||||
var l logrus.Level
|
||||
trimmed := strings.TrimSpace(input)
|
||||
if trimmed == "" {
|
||||
return nil, l, nil
|
||||
}
|
||||
|
||||
parts := strings.Split(trimmed, ",")
|
||||
result := make(map[string]logrus.Level, len(parts))
|
||||
for _, raw := range parts {
|
||||
entry := strings.TrimSpace(raw)
|
||||
if entry == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry: empty segment")
|
||||
}
|
||||
kv := strings.Split(entry, "=")
|
||||
if len(kv) != 2 {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: expected path=level", entry)
|
||||
}
|
||||
pkg := strings.TrimSpace(kv[0])
|
||||
levelText := strings.TrimSpace(kv[1])
|
||||
if pkg == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: empty package path", entry)
|
||||
}
|
||||
if levelText == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: empty level", entry)
|
||||
}
|
||||
if strings.Contains(pkg, "*") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: wildcards are not allowed", pkg)
|
||||
}
|
||||
if strings.ContainsAny(pkg, " \t\n") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: whitespace is not allowed", pkg)
|
||||
}
|
||||
if strings.HasPrefix(pkg, "/") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: leading slash is not allowed", pkg)
|
||||
}
|
||||
cleaned := path.Clean(pkg)
|
||||
if cleaned != pkg || pkg == "." || pkg == ".." {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: must be an absolute package path. (trailing slash not allowed)", pkg)
|
||||
}
|
||||
if _, exists := result[pkg]; exists {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: duplicate entry", pkg)
|
||||
}
|
||||
level, err := logrus.ParseLevel(levelText)
|
||||
if err != nil {
|
||||
return nil, l, fmt.Errorf("invalid vmodule level %q: must be one of panic, fatal, error, warn, info, debug, trace", levelText)
|
||||
}
|
||||
result[pkg] = level
|
||||
}
|
||||
|
||||
maxLevel := logrus.PanicLevel
|
||||
for _, lvl := range result {
|
||||
if lvl > maxLevel {
|
||||
maxLevel = lvl
|
||||
}
|
||||
}
|
||||
|
||||
return result, maxLevel, nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
runtimeDebug "runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/cmd"
|
||||
accountcommands "github.com/OffchainLabs/prysm/v7/cmd/validator/accounts"
|
||||
@@ -95,6 +96,7 @@ var appFlags = []cli.Flag{
|
||||
cmd.MinimalConfigFlag,
|
||||
cmd.E2EConfigFlag,
|
||||
cmd.VerbosityFlag,
|
||||
cmd.LogVModuleFlag,
|
||||
cmd.DataDirFlag,
|
||||
cmd.ClearDB,
|
||||
cmd.ForceClearDB,
|
||||
@@ -150,13 +152,22 @@ func main() {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine log verbosity
|
||||
// determine default log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
// determine per package verbosity. if not set, maxLevel will be 0.
|
||||
vmoduleInput := strings.Join(ctx.StringSlice(cmd.LogVModuleFlag.Name), ",")
|
||||
vmodule, maxLevel, err := cmd.ParseVModule(vmoduleInput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
|
||||
@@ -172,11 +183,13 @@ func main() {
|
||||
formatter.FullTimestamp = true
|
||||
formatter.ForceFormatting = true
|
||||
formatter.ForceColors = true
|
||||
formatter.VModule = vmodule
|
||||
formatter.BaseVerbosity = verbosityLevel
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -197,7 +210,7 @@ func main() {
|
||||
}
|
||||
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel, vmodule); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,6 +76,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.AcceptTosFlag,
|
||||
cmd.ApiTimeoutFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
cmd.LogVModuleFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -50,7 +50,4 @@ const (
|
||||
// Introduced in Fulu network upgrade.
|
||||
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
|
||||
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
|
||||
|
||||
// Introduced in Gloas network upgrade.
|
||||
PTCSize = 512 // PTCSize is the size of the payload timeliness committee.
|
||||
)
|
||||
|
||||
@@ -50,7 +50,4 @@ const (
|
||||
// Introduced in Fulu network upgrade.
|
||||
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
|
||||
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
|
||||
|
||||
// Introduced in Gloas network upgrade.
|
||||
PTCSize = 2 // PTCSize is the size of the payload timeliness committee.
|
||||
)
|
||||
|
||||
@@ -142,7 +142,6 @@ type BeaconChainConfig struct {
|
||||
DomainApplicationBuilder [4]byte `yaml:"DOMAIN_APPLICATION_BUILDER" spec:"true"` // DomainApplicationBuilder defines the BLS signature domain for application builder.
|
||||
DomainBLSToExecutionChange [4]byte `yaml:"DOMAIN_BLS_TO_EXECUTION_CHANGE" spec:"true"` // DomainBLSToExecutionChange defines the BLS signature domain to change withdrawal addresses to ETH1 prefix
|
||||
DomainBeaconBuilder [4]byte `yaml:"DOMAIN_BEACON_BUILDER" spec:"true"` // DomainBeaconBuilder defines the BLS signature domain for beacon block builder.
|
||||
DomainPTCAttester [4]byte `yaml:"DOMAIN_PTC_ATTESTER" spec:"true"` // DomainPTCAttester defines the BLS signature domain for payload transaction committee attester.
|
||||
|
||||
// Prysm constants.
|
||||
GenesisValidatorsRoot [32]byte // GenesisValidatorsRoot is the root hash of the genesis validators.
|
||||
|
||||
@@ -185,7 +185,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
DomainApplicationBuilder: bytesutil.Uint32ToBytes4(0x00000001),
|
||||
DomainBLSToExecutionChange: bytesutil.Uint32ToBytes4(0x0A000000),
|
||||
DomainBeaconBuilder: bytesutil.Uint32ToBytes4(0x0B000000),
|
||||
DomainPTCAttester: bytesutil.Uint32ToBytes4(0x0C000000),
|
||||
|
||||
// Prysm constants.
|
||||
GenesisValidatorsRoot: [32]byte{75, 54, 61, 185, 78, 40, 97, 32, 215, 110, 185, 5, 52, 15, 221, 78, 84, 191, 233, 240, 107, 243, 63, 246, 207, 90, 210, 127, 81, 27, 254, 149},
|
||||
|
||||
@@ -2,15 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"indexed_payload_attestation.go",
|
||||
"types.go",
|
||||
],
|
||||
srcs = ["types.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/consensus-types",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
package consensus_types
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type IndexedPayloadAttestation struct {
|
||||
AttestingIndices []primitives.ValidatorIndex
|
||||
Data *eth.PayloadAttestationData
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
// GetAttestingIndices returns the attesting indices or nil when the receiver is nil.
|
||||
func (x *IndexedPayloadAttestation) GetAttestingIndices() []primitives.ValidatorIndex {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
return x.AttestingIndices
|
||||
}
|
||||
|
||||
// GetData returns the attestation data or nil when the receiver is nil.
|
||||
func (x *IndexedPayloadAttestation) GetData() *eth.PayloadAttestationData {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
return x.Data
|
||||
}
|
||||
|
||||
// GetSignature returns the signature bytes or nil when the receiver is nil.
|
||||
func (x *IndexedPayloadAttestation) GetSignature() []byte {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
return x.Signature
|
||||
}
|
||||
@@ -8,12 +8,13 @@ cd "$ROOT_DIR"
|
||||
./hack/gen-logs.sh
|
||||
|
||||
# Fail if that changed anything
|
||||
if ! git diff --quiet -- ./; then
|
||||
if ! git diff --quiet -- ./ || [[ -n "$(git ls-files --others --exclude-standard -- ./)" ]]; then
|
||||
echo "ERROR: log.go files are out of date. Please run:"
|
||||
echo " ./hack/gen-logs.sh"
|
||||
echo "and commit the changes."
|
||||
echo
|
||||
git diff --stat -- ./ || true
|
||||
git status --porcelain -- ./ || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ func addLogWriter(w io.Writer) {
|
||||
}
|
||||
|
||||
// ConfigurePersistentLogging adds a log-to-file writer. File content is identical to stdout.
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level) error {
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level, vmodule map[string]logrus.Level) error {
|
||||
logrus.WithField("logFileName", logFileName).Info("Logs will be made persistent")
|
||||
if err := file.MkdirAll(filepath.Dir(logFileName)); err != nil {
|
||||
return err
|
||||
@@ -47,6 +47,13 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
return nil
|
||||
}
|
||||
|
||||
maxVmoduleLevel := logrus.PanicLevel
|
||||
for _, level := range vmodule {
|
||||
if level > maxVmoduleLevel {
|
||||
maxVmoduleLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
// Create formatter and writer hook
|
||||
formatter := new(prefixed.TextFormatter)
|
||||
formatter.TimestampFormat = "2006-01-02 15:04:05.00"
|
||||
@@ -54,11 +61,13 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
// If persistent log files are written - we disable the log messages coloring because
|
||||
// the colors are ANSI codes and seen as gibberish in the log files.
|
||||
formatter.DisableColors = true
|
||||
formatter.BaseVerbosity = lvl
|
||||
formatter.VModule = vmodule
|
||||
|
||||
logrus.AddHook(&WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: f,
|
||||
AllowedLevels: logrus.AllLevels[:lvl+1],
|
||||
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
|
||||
})
|
||||
|
||||
logrus.Info("File logging initialized")
|
||||
|
||||
@@ -28,20 +28,20 @@ func TestMaskCredentialsLogging(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurePersistantLogging(t *testing.T) {
|
||||
func TestConfigurePersistentLogging(t *testing.T) {
|
||||
testParentDir := t.TempDir()
|
||||
|
||||
// 1. Test creation of file in an existing parent directory
|
||||
logFileName := "test.log"
|
||||
existingDirectory := "test-1-existing-testing-dir"
|
||||
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// 2. Test creation of file along with parent directory
|
||||
nonExistingDirectory := "test-2-non-existing-testing-dir"
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
|
||||
@@ -52,7 +52,7 @@ func TestConfigurePersistantLogging(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
require.NoError(t, err)
|
||||
|
||||
//4. Create log file in a directory without 700 permissions
|
||||
|
||||
@@ -120,6 +120,13 @@ type TextFormatter struct {
|
||||
|
||||
// Timestamp format to use for display when a full timestamp is printed.
|
||||
TimestampFormat string
|
||||
|
||||
// VModule overrides the allowed log level for exact package paths.
|
||||
// When using VModule, you should also set BaseVerbosity to the default verbosity level provided by the user.
|
||||
VModule map[string]logrus.Level
|
||||
|
||||
// BaseVerbosity is the default verbosity level for all packages.
|
||||
BaseVerbosity logrus.Level
|
||||
}
|
||||
|
||||
func getCompiledColor(main string, fallback string) func(string) string {
|
||||
@@ -168,6 +175,11 @@ func (f *TextFormatter) SetColorScheme(colorScheme *ColorScheme) {
|
||||
}
|
||||
|
||||
func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
// check for vmodule compatibility
|
||||
if !f.shouldLog(entry) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
var b *bytes.Buffer
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
@@ -236,6 +248,39 @@ func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) shouldLog(entry *logrus.Entry) bool {
|
||||
if len(f.VModule) == 0 {
|
||||
return true
|
||||
}
|
||||
packagePath, ok := entry.Data["package"]
|
||||
if !ok {
|
||||
return entry.Level <= f.BaseVerbosity
|
||||
}
|
||||
packageName, ok := packagePath.(string)
|
||||
if !ok {
|
||||
return entry.Level <= f.BaseVerbosity
|
||||
}
|
||||
|
||||
packageLevel := f.bestMatchLevel(packageName)
|
||||
|
||||
return entry.Level <= packageLevel
|
||||
}
|
||||
|
||||
// bestMatchLevel returns the level of the most specific path that matches package name.
|
||||
func (f *TextFormatter) bestMatchLevel(pkg string) logrus.Level {
|
||||
bestLen := 0
|
||||
bestLevel := f.BaseVerbosity
|
||||
for k, v := range f.VModule {
|
||||
if k == pkg || strings.HasPrefix(pkg, k+"/") {
|
||||
if len(k) > bestLen {
|
||||
bestLen = len(k)
|
||||
bestLevel = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return bestLevel
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys []string, timestampFormat string, colorScheme *compiledColorScheme) (err error) {
|
||||
var levelColor func(string) string
|
||||
var levelText string
|
||||
|
||||
@@ -4230,7 +4230,7 @@
|
||||
sources:
|
||||
- file: beacon-chain/core/electra/deposits.go
|
||||
search: func IsValidDepositSignature(
|
||||
- file: beacon-chain/core/blocks/deposit.go
|
||||
- file: beacon-chain/core/helpers/deposit.go
|
||||
search: func IsValidDepositSignature(
|
||||
spec: |
|
||||
<spec fn="is_valid_deposit_signature" fork="electra" hash="7347b754">
|
||||
|
||||
@@ -202,7 +202,6 @@ go_test(
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_header_test.go",
|
||||
"gloas__operations__payload_attestation_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_Operations_PayloadAttestation(t *testing.T) {
|
||||
operations.RunPayloadAttestationTest(t, "mainnet")
|
||||
}
|
||||
@@ -208,7 +208,6 @@ go_test(
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_bid_test.go",
|
||||
"gloas__operations__payload_attestation_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_Operations_PayloadAttestation(t *testing.T) {
|
||||
operations.RunPayloadAttestationTest(t, "minimal")
|
||||
}
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
"deposit_request.go",
|
||||
"execution_payload.go",
|
||||
"execution_payload_bid.go",
|
||||
"payload_attestation.go",
|
||||
"proposer_slashing.go",
|
||||
"slashing.go",
|
||||
"sync_aggregate.go",
|
||||
|
||||
@@ -1,122 +0,0 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
type PayloadAttestationOperation func(ctx context.Context, s state.BeaconState, att *eth.PayloadAttestation) (state.BeaconState, error)
|
||||
|
||||
func RunPayloadAttestationTest(t *testing.T, config string, fork string, sszToState SSZToState) {
|
||||
runPayloadAttestationTest(t, config, fork, "payload_attestation", sszToState, func(ctx context.Context, s state.BeaconState, att *eth.PayloadAttestation) (state.BeaconState, error) {
|
||||
// Create a mock block body with the payload attestation
|
||||
body, err := createMockBlockBodyWithPayloadAttestation(att)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
// Wrap the protobuf body in the interface
|
||||
wrappedBody, err := blocks.NewBeaconBlockBody(body)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
err = gloas.ProcessPayloadAttestations(ctx, s, wrappedBody)
|
||||
return s, err
|
||||
})
|
||||
}
|
||||
|
||||
func runPayloadAttestationTest(t *testing.T, config string, fork string, objName string, sszToState SSZToState, operationFn PayloadAttestationOperation) {
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
testFolders, testsFolderPath := utils.TestFolders(t, config, fork, "operations/"+objName+"/pyspec_tests")
|
||||
if len(testFolders) == 0 {
|
||||
t.Fatalf("No test folders found for %s/%s/%s", config, fork, "operations/"+objName+"/pyspec_tests")
|
||||
}
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
folderPath := path.Join(testsFolderPath, folder.Name())
|
||||
|
||||
// Load payload attestation from payload_attestation.ssz_snappy
|
||||
attestationFile, err := util.BazelFileBytes(folderPath, "payload_attestation.ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
attestationSSZ, err := snappy.Decode(nil /* dst */, attestationFile)
|
||||
require.NoError(t, err, "Failed to decompress payload attestation")
|
||||
|
||||
// Unmarshal payload attestation
|
||||
att := ð.PayloadAttestation{}
|
||||
err = att.UnmarshalSSZ(attestationSSZ)
|
||||
require.NoError(t, err, "Failed to unmarshal payload attestation")
|
||||
|
||||
runPayloadAttestationOperationTest(t, folderPath, att, sszToState, operationFn)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// runPayloadAttestationOperationTest runs a single payload attestation operation test
|
||||
func runPayloadAttestationOperationTest(t *testing.T, folderPath string, att *eth.PayloadAttestation, sszToState SSZToState, operationFn PayloadAttestationOperation) {
|
||||
preBeaconStateFile, err := util.BazelFileBytes(folderPath, "pre.ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
preBeaconStateSSZ, err := snappy.Decode(nil /* dst */, preBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
beaconState, err := sszToState(preBeaconStateSSZ)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check if post state exists
|
||||
postStateExists := true
|
||||
postBeaconStateFile, err := util.BazelFileBytes(folderPath, "post.ssz_snappy")
|
||||
if err != nil {
|
||||
postStateExists = false
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
resultState, err := operationFn(ctx, beaconState, att)
|
||||
|
||||
if postStateExists {
|
||||
// Test should succeed
|
||||
require.NoError(t, err, "Operation should succeed")
|
||||
|
||||
// Compare with expected post state
|
||||
postBeaconStateSSZ, err := snappy.Decode(nil /* dst */, postBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress post state")
|
||||
expectedState, err := sszToState(postBeaconStateSSZ)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedRoot, err := expectedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
resultRoot, err := resultState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedRoot, resultRoot, "Post state does not match expected")
|
||||
} else {
|
||||
// Test should fail (no post.ssz_snappy means the operation should error)
|
||||
require.NotNil(t, err, "Operation should fail but succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
// createMockBlockBodyWithPayloadAttestation creates a mock block body containing the payload attestation
|
||||
func createMockBlockBodyWithPayloadAttestation(att *eth.PayloadAttestation) (*eth.BeaconBlockBodyGloas, error) {
|
||||
body := ð.BeaconBlockBodyGloas{
|
||||
PayloadAttestations: []*eth.PayloadAttestation{att},
|
||||
// Default values
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ð.Eth1Data{},
|
||||
Graffiti: make([]byte, 32),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{},
|
||||
AttesterSlashings: []*eth.AttesterSlashingElectra{},
|
||||
Attestations: []*eth.AttestationElectra{},
|
||||
Deposits: []*eth.Deposit{},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{},
|
||||
SyncAggregate: ð.SyncAggregate{},
|
||||
BlsToExecutionChanges: []*eth.SignedBLSToExecutionChange{},
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
@@ -6,7 +6,6 @@ go_library(
|
||||
srcs = [
|
||||
"execution_payload_bid.go",
|
||||
"helpers.go",
|
||||
"payload_attestation.go",
|
||||
"proposer_slashing.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations",
|
||||
|
||||
@@ -15,6 +15,7 @@ func sszToState(b []byte) (state.BeaconState, error) {
|
||||
}
|
||||
return state_native.InitializeFromProtoGloas(base)
|
||||
}
|
||||
|
||||
func sszToBlock(b []byte) (interfaces.SignedBeaconBlock, error) {
|
||||
base := ðpb.BeaconBlockGloas{}
|
||||
if err := base.UnmarshalSSZ(b); err != nil {
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
common "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/common/operations"
|
||||
)
|
||||
|
||||
func RunPayloadAttestationTest(t *testing.T, config string) {
|
||||
common.RunPayloadAttestationTest(t, config, version.String(version.Gloas), sszToState)
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -75,7 +74,7 @@ func processPreGenesisDeposits(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit")
|
||||
}
|
||||
beaconState, err = blocks.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
beaconState, err = helpers.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func NewBeaconState(options ...NewBeaconStateOption) (state.BeaconState, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateAltair creates a beacon state with minimum marshalable fields.
|
||||
@@ -167,7 +167,7 @@ func NewBeaconStateAltair(options ...func(state *ethpb.BeaconStateAltair) error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateBellatrix creates a beacon state with minimum marshalable fields.
|
||||
@@ -234,7 +234,7 @@ func NewBeaconStateBellatrix(options ...func(state *ethpb.BeaconStateBellatrix)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateCapella creates a beacon state with minimum marshalable fields.
|
||||
@@ -302,7 +302,7 @@ func NewBeaconStateCapella(options ...func(state *ethpb.BeaconStateCapella) erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateDeneb creates a beacon state with minimum marshalable fields.
|
||||
@@ -370,7 +370,7 @@ func NewBeaconStateDeneb(options ...func(state *ethpb.BeaconStateDeneb) error) (
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateElectra creates a beacon state with minimum marshalable fields.
|
||||
@@ -438,7 +438,7 @@ func NewBeaconStateElectra(options ...func(state *ethpb.BeaconStateElectra) erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateFulu creates a beacon state with minimum marshalable fields.
|
||||
@@ -507,7 +507,86 @@ func NewBeaconStateFulu(options ...func(state *ethpb.BeaconStateFulu) error) (st
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateGloas creates a beacon state with minimum marshalable fields.
|
||||
func NewBeaconStateGloas(options ...func(state *ethpb.BeaconStateGloas) error) (state.BeaconState, error) {
|
||||
pubkeys := make([][]byte, 512)
|
||||
for i := range pubkeys {
|
||||
pubkeys[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
builderPendingPayments := make([]*ethpb.BuilderPendingPayment, 64)
|
||||
for i := range builderPendingPayments {
|
||||
builderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
seed := ðpb.BeaconStateGloas{
|
||||
BlockRoots: filledByteSlice2D(uint64(params.BeaconConfig().SlotsPerHistoricalRoot), 32),
|
||||
StateRoots: filledByteSlice2D(uint64(params.BeaconConfig().SlotsPerHistoricalRoot), 32),
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
RandaoMixes: filledByteSlice2D(uint64(params.BeaconConfig().EpochsPerHistoricalVector), 32),
|
||||
Validators: make([]*ethpb.Validator, 0),
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: make([]byte, 4),
|
||||
CurrentVersion: make([]byte, 4),
|
||||
},
|
||||
Eth1DataVotes: make([]*ethpb.Eth1Data, 0),
|
||||
HistoricalRoots: make([][]byte, 0),
|
||||
JustificationBits: bitfield.Bitvector4{0x0},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
LatestBlockHeader: HydrateBeaconHeader(ðpb.BeaconBlockHeader{}),
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
PreviousEpochParticipation: make([]byte, 0),
|
||||
CurrentEpochParticipation: make([]byte, 0),
|
||||
CurrentSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
NextSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Builders: make([]*ethpb.Builder, 0),
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
BuilderPendingPayments: builderPendingPayments,
|
||||
BuilderPendingWithdrawals: make([]*ethpb.BuilderPendingWithdrawal, 0),
|
||||
LatestBlockHash: make([]byte, 32),
|
||||
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
err := opt(seed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var st, err = state_native.InitializeFromProtoUnsafeGloas(seed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// SSZ will fill 2D byte slices with their respective values, so we must fill these in too for round
|
||||
|
||||
@@ -68,6 +68,26 @@ func TestNewBeaconStateElectra(t *testing.T) {
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconStateFulu(t *testing.T) {
|
||||
st, err := NewBeaconStateFulu()
|
||||
require.NoError(t, err)
|
||||
b, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
got := ðpb.BeaconStateFulu{}
|
||||
require.NoError(t, got.UnmarshalSSZ(b))
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconStateGloas(t *testing.T) {
|
||||
st, err := NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
b, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
got := ðpb.BeaconStateGloas{}
|
||||
require.NoError(t, got.UnmarshalSSZ(b))
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconState_HashTreeRoot(t *testing.T) {
|
||||
st, err := NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -71,7 +71,7 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(*logFileName, "text", logrus.DebugLevel); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(*logFileName, "text", logrus.DebugLevel, map[string]logrus.Level{}); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user