mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
24 Commits
debugE2EFa
...
changed_he
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
181890e85c | ||
|
|
74790e9986 | ||
|
|
52af63f25a | ||
|
|
2dad245bc8 | ||
|
|
9a9990605c | ||
|
|
2cddb5ca86 | ||
|
|
73ce28c356 | ||
|
|
7a294e861e | ||
|
|
258123341e | ||
|
|
224b136737 | ||
|
|
3ed4866eec | ||
|
|
373c853d17 | ||
|
|
23b0718b5f | ||
|
|
3a9854145c | ||
|
|
1b70d2b566 | ||
|
|
59b310a221 | ||
|
|
22b6d1751d | ||
|
|
9c13d47f4c | ||
|
|
835dce5f6e | ||
|
|
c4c28e4825 | ||
|
|
c996109b3a | ||
|
|
e397f8a2bd | ||
|
|
6438060733 | ||
|
|
a2892b1ed5 |
@@ -80,7 +80,6 @@ linters:
|
||||
- thelper
|
||||
- unparam
|
||||
- varnamelen
|
||||
- wastedassign
|
||||
- wrapcheck
|
||||
- wsl
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ bazel build //beacon-chain --config=release
|
||||
## Adding / updating dependencies
|
||||
|
||||
1. Add your dependency as you would with go modules. I.e. `go get ...`
|
||||
1. Run `gazelle update-repos -from_file=go.mod` to update the bazel managed dependencies.
|
||||
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod` to update the bazel managed dependencies.
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
@@ -165,7 +165,7 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
|
||||
// Header returns the execution data interface.
|
||||
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
@@ -249,7 +249,7 @@ func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||
// Header --
|
||||
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -357,7 +358,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadCapella(p, 0)
|
||||
payload, err := blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
@@ -394,7 +395,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadDeneb(p, 0)
|
||||
payload, err := blocks.WrappedExecutionPayloadDeneb(p, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
package api
|
||||
|
||||
const WebUrlPrefix = "/v2/validator/"
|
||||
const (
|
||||
WebUrlPrefix = "/v2/validator/"
|
||||
WebApiUrlPrefix = "/api/v2/validator/"
|
||||
KeymanagerApiPrefix = "/eth/v1"
|
||||
)
|
||||
|
||||
@@ -559,15 +559,20 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
|
||||
|
||||
// inRegularSync applies the following heuristics to decide if the node is in
|
||||
// regular sync mode vs init sync mode using only forkchoice.
|
||||
// It checks that the highest received block is behind the current time by at least 2 epochs
|
||||
// and that it was imported at least one epoch late if both of these
|
||||
// tests pass then the node is in init sync. The caller of this function MUST
|
||||
// have a lock on forkchoice
|
||||
// The caller of this function MUST have a lock on forkchoice
|
||||
func (s *Service) inRegularSync() bool {
|
||||
currentSlot := s.CurrentSlot()
|
||||
fc := s.cfg.ForkChoiceStore
|
||||
if currentSlot-fc.HighestReceivedBlockSlot() < 2*params.BeaconConfig().SlotsPerEpoch {
|
||||
highestSlot := fc.HighestReceivedBlockSlot()
|
||||
// if the highest received slot is from the same epoch, we are in regular sync
|
||||
if slots.ToEpoch(currentSlot) == slots.ToEpoch(highestSlot) {
|
||||
return true
|
||||
}
|
||||
return fc.HighestReceivedBlockDelay() < params.BeaconConfig().SlotsPerEpoch
|
||||
// If the highest received block is less than 2 blocks away we are in regular sync
|
||||
if currentSlot-highestSlot < primitives.Slot(2) {
|
||||
return true
|
||||
}
|
||||
// At this stage the last block received is from the previous epoch and more than 2 blocks ago.
|
||||
// If the highest slot was received during its slot or the next one then we are in regular sync
|
||||
return fc.HighestReceivedBlockDelay() < primitives.Slot(2)
|
||||
}
|
||||
|
||||
@@ -609,7 +609,7 @@ func TestService_inRegularSync(t *testing.T) {
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.Equal(t, false, c.inRegularSync())
|
||||
|
||||
c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-5*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot))))
|
||||
c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-4*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot))))
|
||||
require.Equal(t, true, c.inRegularSync())
|
||||
|
||||
c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot))))
|
||||
|
||||
@@ -182,6 +182,10 @@ var (
|
||||
Name: "chain_service_processing_milliseconds",
|
||||
Help: "Total time to call a chain service in ReceiveBlock()",
|
||||
})
|
||||
dataAvailWaitedTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "da_waited_time_milliseconds",
|
||||
Help: "Total time spent waiting for a data availability check in ReceiveBlock()",
|
||||
})
|
||||
processAttsElapsedTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "process_attestations_milliseconds",
|
||||
|
||||
@@ -325,7 +325,10 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
// The proposer indices cache takes the target root for the previous
|
||||
// epoch as key
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e-1)
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
return nil
|
||||
|
||||
@@ -911,7 +911,6 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state older than Bellatrix, nil payload",
|
||||
stateVersion: 1,
|
||||
payload: nil,
|
||||
errString: "attempted to wrap nil",
|
||||
},
|
||||
{
|
||||
name: "state older than Bellatrix, empty payload",
|
||||
@@ -940,7 +939,6 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state is Bellatrix, nil payload",
|
||||
stateVersion: 2,
|
||||
payload: nil,
|
||||
errString: "attempted to wrap nil",
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, empty payload",
|
||||
|
||||
@@ -122,6 +122,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
@@ -2,6 +2,7 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
|
||||
@@ -54,13 +55,13 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Capella:
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Deneb:
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, 0)
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
|
||||
}
|
||||
|
||||
1
beacon-chain/cache/proposer_indices.go
vendored
1
beacon-chain/cache/proposer_indices.go
vendored
@@ -115,6 +115,7 @@ func (p *ProposerIndicesCache) IndicesFromCheckpoint(c forkchoicetypes.Checkpoin
|
||||
root, ok := p.rootMap[c]
|
||||
p.Unlock()
|
||||
if !ok {
|
||||
ProposerIndicesCacheMiss.Inc()
|
||||
return emptyIndices, ok
|
||||
}
|
||||
return p.ProposerIndices(c.Epoch+1, root)
|
||||
|
||||
33
beacon-chain/cache/proposer_indices_test.go
vendored
33
beacon-chain/cache/proposer_indices_test.go
vendored
@@ -37,70 +37,69 @@ func TestProposerCache_Set(t *testing.T) {
|
||||
func TestProposerCache_CheckpointAndPrune(t *testing.T) {
|
||||
cache := NewProposerIndicesCache()
|
||||
indices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
|
||||
root := [32]byte{'a'}
|
||||
cpRoot := [32]byte{'b'}
|
||||
copy(indices[3:], []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6})
|
||||
for i := 1; i < 10; i++ {
|
||||
root := [32]byte{byte(i)}
|
||||
cache.Set(primitives.Epoch(i), root, indices)
|
||||
cpRoot := [32]byte{byte(i - 1)}
|
||||
cache.SetCheckpoint(forkchoicetypes.Checkpoint{Epoch: primitives.Epoch(i - 1), Root: cpRoot}, root)
|
||||
}
|
||||
received, ok := cache.ProposerIndices(1, root)
|
||||
received, ok := cache.ProposerIndices(1, [32]byte{1})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(4, root)
|
||||
received, ok = cache.ProposerIndices(4, [32]byte{4})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(9, root)
|
||||
received, ok = cache.ProposerIndices(9, [32]byte{9})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: cpRoot})
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: cpRoot})
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{3}})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: cpRoot})
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: [32]byte{4}})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: cpRoot})
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: [32]byte{8}})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
cache.Prune(5)
|
||||
|
||||
emptyIndices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
|
||||
received, ok = cache.ProposerIndices(1, root)
|
||||
received, ok = cache.ProposerIndices(1, [32]byte{1})
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(4, root)
|
||||
received, ok = cache.ProposerIndices(4, [32]byte{4})
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(9, root)
|
||||
received, ok = cache.ProposerIndices(9, [32]byte{9})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: cpRoot})
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: [32]byte{0}})
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: cpRoot})
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{3}})
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: cpRoot})
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: [32]byte{4}})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: cpRoot})
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: [32]byte{8}})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
@@ -609,7 +610,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
payload.PrevRandao = random
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, 0)
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, wrapped)
|
||||
require.NoError(t, err)
|
||||
@@ -873,7 +874,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
}, 0)
|
||||
}, big.NewInt(0))
|
||||
}
|
||||
|
||||
func emptyPayload() *enginev1.ExecutionPayload {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
@@ -642,7 +643,10 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wdRoot, err := ssz.WithdrawalSliceRoot(test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]}, 0)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(
|
||||
&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]},
|
||||
big.NewInt(0),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Control.ExpectedError {
|
||||
@@ -1060,7 +1064,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
}
|
||||
st, err := prepareValidators(spb, test.Args)
|
||||
require.NoError(t, err)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, 0)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Control.ExpectedError {
|
||||
|
||||
@@ -50,7 +50,6 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"main_test.go",
|
||||
"randao_test.go",
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
|
||||
func TestAttestation_IsAggregator(t *testing.T) {
|
||||
t.Run("aggregator", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, 0, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -30,6 +32,8 @@ func TestAttestation_IsAggregator(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("not aggregator", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 2048)
|
||||
@@ -44,6 +48,8 @@ func TestAttestation_IsAggregator(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create 10 committees
|
||||
committeeCount := uint64(10)
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
|
||||
@@ -204,6 +210,8 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
err := helpers.ValidateAttestationTime(tt.args.attSlot, tt.args.genesisTime,
|
||||
params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
if tt.wantedErr != "" {
|
||||
@@ -216,6 +224,8 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Genesis was 6 epochs ago exactly.
|
||||
offset := params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot * 6)
|
||||
genesis := time.Now().Add(-1 * time.Second * time.Duration(offset))
|
||||
@@ -285,6 +295,8 @@ func TestValidateNilAttestation(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, helpers.ValidateNilAttestation(tt.attestation))
|
||||
} else {
|
||||
@@ -326,6 +338,8 @@ func TestValidateSlotTargetEpoch(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, helpers.ValidateSlotTargetEpoch(tt.attestation.Data))
|
||||
} else {
|
||||
|
||||
@@ -379,7 +379,7 @@ func UpdateCachedCheckpointToStateRoot(state state.ReadOnlyBeaconState, cp *fork
|
||||
if cp.Epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
return nil
|
||||
}
|
||||
slot, err := slots.EpochEnd(cp.Epoch - 1)
|
||||
slot, err := slots.EpochEnd(cp.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
)
|
||||
|
||||
func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
// Create 10 committees
|
||||
committeeCount := uint64(10)
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
|
||||
@@ -71,6 +73,8 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestComputeCommittee_RegressionTest(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
indices := []primitives.ValidatorIndex{1, 3, 8, 16, 18, 19, 20, 23, 30, 35, 43, 46, 47, 54, 56, 58, 69, 70, 71, 83, 84, 85, 91, 96, 100, 103, 105, 106, 112, 121, 127, 128, 129, 140, 142, 144, 146, 147, 149, 152, 153, 154, 157, 160, 173, 175, 180, 182, 188, 189, 191, 194, 201, 204, 217, 221, 226, 228, 230, 231, 239, 241, 249, 250, 255}
|
||||
seed := [32]byte{68, 110, 161, 250, 98, 230, 161, 172, 227, 226, 99, 11, 138, 124, 201, 134, 38, 197, 0, 120, 6, 165, 122, 34, 19, 216, 43, 226, 210, 114, 165, 183}
|
||||
index := uint64(215)
|
||||
@@ -80,6 +84,8 @@ func TestComputeCommittee_RegressionTest(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyBitfieldLength_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
bf := bitfield.Bitlist{0xFF, 0x01}
|
||||
committeeSize := uint64(8)
|
||||
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
@@ -91,7 +97,7 @@ func TestVerifyBitfieldLength_OK(t *testing.T) {
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
epoch := primitives.Epoch(1)
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 0, // Epoch 0.
|
||||
@@ -103,7 +109,7 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
|
||||
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
var activationEpoch primitives.Epoch
|
||||
@@ -190,10 +196,10 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
validatorIndexToCommittee, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
cac := validatorIndexToCommittee[tt.index]
|
||||
@@ -209,6 +215,8 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -239,6 +247,8 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -259,7 +269,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
|
||||
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -380,9 +390,9 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
for i, tt := range tests {
|
||||
ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(tt.stateSlot))
|
||||
err := VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
|
||||
if tt.verificationFailure {
|
||||
@@ -395,7 +405,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
|
||||
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
indices := make([]primitives.ValidatorIndex, validatorCount)
|
||||
@@ -425,7 +435,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
|
||||
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
indices := make([]primitives.ValidatorIndex, validatorCount)
|
||||
|
||||
@@ -60,6 +60,8 @@ func TestBlockRootAtSlot_CorrectBlockRoot(t *testing.T) {
|
||||
}
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
s.Slot = tt.stateSlot
|
||||
state, err := state_native.InitializeFromProtoPhase0(s)
|
||||
require.NoError(t, err)
|
||||
@@ -110,6 +112,8 @@ func TestBlockRootAtSlot_OutOfBounds(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
helpers.ClearCache()
|
||||
|
||||
state.Slot = tt.stateSlot
|
||||
s, err := state_native.InitializeFromProtoPhase0(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// run ClearCache before each test to prevent cross-test side effects
|
||||
func TestMain(m *testing.M) {
|
||||
ClearCache()
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -40,6 +40,8 @@ func TestRandaoMix_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
|
||||
mix, err := RandaoMix(state, test.epoch)
|
||||
require.NoError(t, err)
|
||||
@@ -74,6 +76,8 @@ func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
|
||||
mix, err := RandaoMix(state, test.epoch)
|
||||
require.NoError(t, err)
|
||||
@@ -88,6 +92,8 @@ func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateSeed_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
intInBytes := make([]byte, 32)
|
||||
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
)
|
||||
|
||||
func TestTotalBalance_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
|
||||
{EffectiveBalance: 32 * 1e9}, {EffectiveBalance: 40 * 1e9},
|
||||
@@ -27,6 +29,8 @@ func TestTotalBalance_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: []*ethpb.Validator{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -47,6 +51,8 @@ func TestGetBalance_OK(t *testing.T) {
|
||||
{i: 2, b: []uint64{0, 0, 0}},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Balances: test.b})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.b[test.i], state.Balances()[test.i], "Incorrect Validator balance")
|
||||
@@ -62,6 +68,8 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
{10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
validators = append(validators, ðpb.Validator{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: 1})
|
||||
@@ -75,8 +83,6 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
tests := []struct {
|
||||
vCount int
|
||||
}{
|
||||
@@ -85,6 +91,8 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
{10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
validators = append(validators, ðpb.Validator{EffectiveBalance: 1, ExitEpoch: 1})
|
||||
@@ -98,8 +106,6 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
tests := []struct {
|
||||
vCount int
|
||||
wantCount int
|
||||
@@ -109,6 +115,8 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
{vCount: 10000, wantCount: 10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
validators = append(validators, ðpb.Validator{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: 1})
|
||||
@@ -133,6 +141,8 @@ func TestIncreaseBalance_OK(t *testing.T) {
|
||||
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}},
|
||||
@@ -157,6 +167,8 @@ func TestDecreaseBalance_OK(t *testing.T) {
|
||||
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 3}},
|
||||
@@ -169,6 +181,8 @@ func TestDecreaseBalance_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFinalityDelay(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
|
||||
base.FinalizedCheckpoint = ðpb.Checkpoint{Epoch: 3}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(base)
|
||||
@@ -199,6 +213,8 @@ func TestFinalityDelay(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsInInactivityLeak(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
|
||||
base.FinalizedCheckpoint = ðpb.Checkpoint{Epoch: 3}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(base)
|
||||
@@ -269,6 +285,8 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
|
||||
{i: 2, b: []uint64{math.MaxUint64, math.MaxUint64, math.MaxUint64}, nb: 33 * 1e9},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}},
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
)
|
||||
|
||||
func TestShuffleList_InvalidValidatorCount(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
maxShuffleListSize = 20
|
||||
list := make([]primitives.ValidatorIndex, 21)
|
||||
if _, err := ShuffleList(list, [32]byte{123, 125}); err == nil {
|
||||
@@ -23,6 +25,8 @@ func TestShuffleList_InvalidValidatorCount(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestShuffleList_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
var list1 []primitives.ValidatorIndex
|
||||
seed1 := [32]byte{1, 128, 12}
|
||||
seed2 := [32]byte{2, 128, 12}
|
||||
@@ -47,6 +51,8 @@ func TestShuffleList_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSplitIndices_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
var l []uint64
|
||||
numValidators := uint64(64000)
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
@@ -61,6 +67,8 @@ func TestSplitIndices_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestShuffleList_Vs_ShuffleIndex(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
var list []primitives.ValidatorIndex
|
||||
listSize := uint64(1000)
|
||||
seed := [32]byte{123, 42}
|
||||
@@ -125,6 +133,8 @@ func BenchmarkShuffleList(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestShuffledIndex(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
var list []primitives.ValidatorIndex
|
||||
listSize := uint64(399)
|
||||
for i := primitives.ValidatorIndex(0); uint64(i) < listSize; i++ {
|
||||
@@ -147,6 +157,8 @@ func TestShuffledIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSplitIndicesAndOffset_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
var l []uint64
|
||||
validators := uint64(64000)
|
||||
for i := uint64(0); i < validators; i++ {
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -49,7 +49,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -77,7 +77,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -105,7 +105,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -135,6 +135,8 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -161,6 +163,8 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -188,7 +192,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -219,7 +223,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -260,7 +264,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -288,7 +292,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -318,6 +322,8 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -345,7 +351,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -372,6 +378,8 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
})
|
||||
@@ -388,6 +396,8 @@ func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
|
||||
@@ -399,7 +409,7 @@ func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
|
||||
@@ -179,8 +179,6 @@ func TestIsSlashableValidator_OK(t *testing.T) {
|
||||
|
||||
func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
c := params.BeaconConfig()
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
@@ -224,9 +222,9 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
for _, tt := range tests {
|
||||
ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(tt.slot))
|
||||
result, err := BeaconProposerIndex(context.Background(), state)
|
||||
require.NoError(t, err, "Failed to get shard and committees at slot")
|
||||
@@ -235,9 +233,9 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig()
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
@@ -268,6 +266,8 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -309,12 +309,16 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDelayedActivationExitEpoch_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
epoch := primitives.Epoch(9999)
|
||||
wanted := epoch + 1 + params.BeaconConfig().MaxSeedLookahead
|
||||
assert.Equal(t, wanted, ActivationExitEpoch(epoch))
|
||||
}
|
||||
|
||||
func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
c := 1000
|
||||
validators := make([]*ethpb.Validator, c)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -348,7 +352,6 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
{validatorCount: 1000000, wantedChurn: 15 /* validatorCount/churnLimitQuotient */},
|
||||
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
|
||||
}
|
||||
defer ClearCache()
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
@@ -382,9 +385,6 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
{1000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
@@ -417,7 +417,7 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
// Test basic functionality of ActiveValidatorIndices without caching. This test will need to be
|
||||
// rewritten when releasing some cache flag.
|
||||
func TestActiveValidatorIndices(t *testing.T) {
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
//farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
type args struct {
|
||||
state *ethpb.BeaconState
|
||||
epoch primitives.Epoch
|
||||
@@ -428,7 +428,7 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
want []primitives.ValidatorIndex
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
/*{
|
||||
name: "all_active_epoch_10",
|
||||
args: args{
|
||||
state: ðpb.BeaconState{
|
||||
@@ -559,7 +559,7 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
epoch: 10,
|
||||
},
|
||||
want: []primitives.ValidatorIndex{0, 2, 3},
|
||||
},
|
||||
},*/
|
||||
{
|
||||
name: "impossible_zero_validators", // Regression test for issue #13051
|
||||
args: args{
|
||||
@@ -569,22 +569,21 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
},
|
||||
epoch: 10,
|
||||
},
|
||||
wantedErr: "no active validator indices",
|
||||
wantedErr: "state has nil validator slice",
|
||||
},
|
||||
}
|
||||
defer ClearCache()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators(tt.args.state.Validators))
|
||||
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
return
|
||||
}
|
||||
assert.DeepEqual(t, tt.want, got, "ActiveValidatorIndices()")
|
||||
ClearCache()
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -685,6 +684,8 @@ func TestComputeProposerIndex(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
bState := ðpb.BeaconState{Validators: tt.args.validators}
|
||||
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
|
||||
require.NoError(t, err)
|
||||
@@ -717,6 +718,8 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
assert.Equal(t, tt.want, IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
|
||||
})
|
||||
}
|
||||
@@ -744,6 +747,8 @@ func TestIsIsEligibleForActivation(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
|
||||
@@ -782,6 +787,8 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
|
||||
}
|
||||
|
||||
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -805,6 +812,8 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
ClearCache()
|
||||
|
||||
e := primitives.Epoch(2)
|
||||
r := [32]byte{'a'}
|
||||
root := [32]byte{'b'}
|
||||
|
||||
@@ -48,6 +48,7 @@ func TestWeakSubjectivity_ComputeWeakSubjectivityPeriod(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("valCount: %d, avgBalance: %d", tt.valCount, tt.avgBalance), func(t *testing.T) {
|
||||
// Reset committee cache - as we need to recalculate active validator set for each test.
|
||||
helpers.ClearCache()
|
||||
|
||||
got, err := helpers.ComputeWeakSubjectivityPeriod(context.Background(), genState(t, tt.valCount, tt.avgBalance), params.BeaconConfig())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, got, "valCount: %v, avgBalance: %v", tt.valCount, tt.avgBalance)
|
||||
@@ -177,6 +178,8 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
sr, _, e := tt.genWsCheckpoint()
|
||||
got, err := helpers.IsWithinWeakSubjectivityPeriod(context.Background(), tt.epoch, tt.genWsState(), sr, e, params.BeaconConfig())
|
||||
if tt.wantedErr != "" {
|
||||
@@ -247,6 +250,8 @@ func TestWeakSubjectivity_ParseWeakSubjectivityInputString(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
wsCheckpt, err := helpers.ParseWeakSubjectivityInputString(tt.input)
|
||||
if tt.wantedErr != "" {
|
||||
require.ErrorContains(t, tt.wantedErr, err)
|
||||
@@ -283,6 +288,8 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
|
||||
}
|
||||
|
||||
func TestMinEpochsForBlockRequests(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
params.SetActiveTestCleanup(t, params.MainnetConfig())
|
||||
var expected primitives.Epoch = 33024
|
||||
// expected value of 33024 via spec commentary:
|
||||
|
||||
@@ -893,6 +893,7 @@ func createStateIndicesFromStateSlot(ctx context.Context, slot primitives.Slot)
|
||||
//
|
||||
// 3.) state with current finalized root
|
||||
// 4.) unfinalized States
|
||||
// 5.) not origin root
|
||||
func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB. CleanUpDirtyStates")
|
||||
defer span.End()
|
||||
@@ -907,6 +908,11 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint pr
|
||||
}
|
||||
deletedRoots := make([][32]byte, 0)
|
||||
|
||||
oRoot, err := s.OriginCheckpointBlockRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
return bkt.ForEach(func(k, v []byte) error {
|
||||
@@ -914,15 +920,31 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint pr
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
finalizedChkpt := bytesutil.ToBytes32(f.Root) == bytesutil.ToBytes32(v)
|
||||
root := bytesutil.ToBytes32(v)
|
||||
slot := bytesutil.BytesToSlotBigEndian(k)
|
||||
mod := slot % slotsPerArchivedPoint
|
||||
nonFinalized := slot > finalizedSlot
|
||||
|
||||
// The following conditions cover 1, 2, 3 and 4 above.
|
||||
if mod != 0 && mod <= slotsPerArchivedPoint-slotsPerArchivedPoint/3 && !finalizedChkpt && !nonFinalized {
|
||||
deletedRoots = append(deletedRoots, bytesutil.ToBytes32(v))
|
||||
if mod == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if mod > slotsPerArchivedPoint-slotsPerArchivedPoint/3 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bytesutil.ToBytes32(f.Root) == root {
|
||||
return nil
|
||||
}
|
||||
|
||||
if slot > finalizedSlot {
|
||||
return nil
|
||||
}
|
||||
|
||||
if oRoot == root {
|
||||
return nil
|
||||
}
|
||||
|
||||
deletedRoots = append(deletedRoots, root)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
||||
@@ -3,6 +3,7 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
@@ -99,7 +100,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
}, 0)
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
|
||||
return st
|
||||
@@ -124,7 +125,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
}, 0)
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
|
||||
return st
|
||||
@@ -675,6 +676,7 @@ func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) {
|
||||
genesisRoot := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
|
||||
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'}))
|
||||
|
||||
bRoots := make([][32]byte, 0)
|
||||
slotsPerArchivedPoint := primitives.Slot(128)
|
||||
@@ -720,6 +722,7 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) {
|
||||
genesisRoot := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
|
||||
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'}))
|
||||
|
||||
for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
b := util.NewBeaconBlock()
|
||||
@@ -741,6 +744,35 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) {
|
||||
require.Equal(t, true, db.HasState(context.Background(), genesisRoot))
|
||||
}
|
||||
|
||||
func TestStore_CleanUpDirtyStates_OriginRoot(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), r))
|
||||
require.NoError(t, db.SaveState(context.Background(), genesisState, r))
|
||||
|
||||
for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), wsb))
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(i))
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
}
|
||||
|
||||
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), r))
|
||||
require.NoError(t, db.CleanUpDirtyStates(context.Background(), params.BeaconConfig().SlotsPerEpoch))
|
||||
require.Equal(t, true, db.HasState(context.Background(), r))
|
||||
}
|
||||
|
||||
func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
@@ -749,6 +781,7 @@ func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
|
||||
genesisRoot := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
|
||||
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'}))
|
||||
|
||||
var unfinalizedRoots [][32]byte
|
||||
for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
|
||||
@@ -7,10 +7,21 @@ package slasherkv
|
||||
// it easy to scan for keys that have a certain shard number as a prefix and return those
|
||||
// corresponding attestations.
|
||||
var (
|
||||
// Slasher buckets.
|
||||
attestedEpochsByValidator = []byte("attested-epochs-by-validator")
|
||||
attestationRecordsBucket = []byte("attestation-records")
|
||||
|
||||
// key: (encoded) ValidatorIndex
|
||||
// value: (encoded) Epoch
|
||||
attestedEpochsByValidator = []byte("attested-epochs-by-validator")
|
||||
|
||||
// key: attestation SigningRoot
|
||||
// value: (encoded + compressed) IndexedAttestation
|
||||
attestationRecordsBucket = []byte("attestation-records")
|
||||
|
||||
// key: (encoded) Target Epoch + (encoded) ValidatorIndex
|
||||
// value: attestation SigningRoot
|
||||
attestationDataRootsBucket = []byte("attestation-data-roots")
|
||||
proposalRecordsBucket = []byte("proposal-records")
|
||||
slasherChunksBucket = []byte("slasher-chunks")
|
||||
|
||||
// key: Slot+ValidatorIndex
|
||||
// value: (encoded) SignedBlockHeaderWrapper
|
||||
proposalRecordsBucket = []byte("proposal-records")
|
||||
slasherChunksBucket = []byte("slasher-chunks")
|
||||
)
|
||||
|
||||
@@ -29,72 +29,90 @@ const (
|
||||
// LastEpochWrittenForValidators given a list of validator indices returns the latest
|
||||
// epoch we have recorded the validators writing data for.
|
||||
func (s *Store) LastEpochWrittenForValidators(
|
||||
ctx context.Context, validatorIndices []primitives.ValidatorIndex,
|
||||
ctx context.Context, validatorIndexes []primitives.ValidatorIndex,
|
||||
) ([]*slashertypes.AttestedEpochForValidator, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastEpochWrittenForValidators")
|
||||
defer span.End()
|
||||
|
||||
attestedEpochs := make([]*slashertypes.AttestedEpochForValidator, 0)
|
||||
encodedIndices := make([][]byte, len(validatorIndices))
|
||||
for i, valIdx := range validatorIndices {
|
||||
encodedIndices[i] = encodeValidatorIndex(valIdx)
|
||||
encodedIndexes := make([][]byte, len(validatorIndexes))
|
||||
|
||||
for i, validatorIndex := range validatorIndexes {
|
||||
encodedIndexes[i] = encodeValidatorIndex(validatorIndex)
|
||||
}
|
||||
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(attestedEpochsByValidator)
|
||||
for i, encodedIndex := range encodedIndices {
|
||||
|
||||
for i, encodedIndex := range encodedIndexes {
|
||||
var epoch primitives.Epoch
|
||||
|
||||
epochBytes := bkt.Get(encodedIndex)
|
||||
if epochBytes != nil {
|
||||
if err := epoch.UnmarshalSSZ(epochBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
attestedEpochs = append(attestedEpochs, &slashertypes.AttestedEpochForValidator{
|
||||
ValidatorIndex: validatorIndices[i],
|
||||
ValidatorIndex: validatorIndexes[i],
|
||||
Epoch: epoch,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return attestedEpochs, err
|
||||
}
|
||||
|
||||
// SaveLastEpochsWrittenForValidators updates the latest epoch a slice
|
||||
// of validator indices has attested to.
|
||||
func (s *Store) SaveLastEpochsWrittenForValidators(
|
||||
ctx context.Context, epochByValidator map[primitives.ValidatorIndex]primitives.Epoch,
|
||||
ctx context.Context, epochByValIndex map[primitives.ValidatorIndex]primitives.Epoch,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
|
||||
defer span.End()
|
||||
encodedIndices := make([][]byte, 0, len(epochByValidator))
|
||||
encodedEpochs := make([][]byte, 0, len(epochByValidator))
|
||||
for valIdx, epoch := range epochByValidator {
|
||||
|
||||
const batchSize = 10000
|
||||
|
||||
encodedIndexes := make([][]byte, 0, len(epochByValIndex))
|
||||
encodedEpochs := make([][]byte, 0, len(epochByValIndex))
|
||||
|
||||
for valIndex, epoch := range epochByValIndex {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
encodedEpoch, err := epoch.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedIndices = append(encodedIndices, encodeValidatorIndex(valIdx))
|
||||
|
||||
encodedIndexes = append(encodedIndexes, encodeValidatorIndex(valIndex))
|
||||
encodedEpochs = append(encodedEpochs, encodedEpoch)
|
||||
}
|
||||
|
||||
// The list of validators might be too massive for boltdb to handle in a single transaction,
|
||||
// so instead we split it into batches and write each batch.
|
||||
batchSize := 10000
|
||||
for i := 0; i < len(encodedIndices); i += batchSize {
|
||||
for i := 0; i < len(encodedIndexes); i += batchSize {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
bkt := tx.Bucket(attestedEpochsByValidator)
|
||||
min := i + batchSize
|
||||
if min > len(encodedIndices) {
|
||||
min = len(encodedIndices)
|
||||
|
||||
minimum := i + batchSize
|
||||
if minimum > len(encodedIndexes) {
|
||||
minimum = len(encodedIndexes)
|
||||
}
|
||||
for j, encodedIndex := range encodedIndices[i:min] {
|
||||
|
||||
for j, encodedIndex := range encodedIndexes[i:minimum] {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
@@ -102,79 +120,106 @@ func (s *Store) SaveLastEpochsWrittenForValidators(
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckAttesterDoubleVotes retries any slashable double votes that exist
|
||||
// for a series of input attestations.
|
||||
// CheckAttesterDoubleVotes retrieves any slashable double votes that exist
|
||||
// for a series of input attestations with respect to the database.
|
||||
func (s *Store) CheckAttesterDoubleVotes(
|
||||
ctx context.Context, attestations []*slashertypes.IndexedAttestationWrapper,
|
||||
) ([]*slashertypes.AttesterDoubleVote, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.CheckAttesterDoubleVotes")
|
||||
defer span.End()
|
||||
|
||||
doubleVotes := make([]*slashertypes.AttesterDoubleVote, 0)
|
||||
doubleVotesMu := sync.Mutex{}
|
||||
mu := sync.Mutex{}
|
||||
eg, egctx := errgroup.WithContext(ctx)
|
||||
for _, att := range attestations {
|
||||
|
||||
for _, attestation := range attestations {
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
attToProcess := att
|
||||
// process every attestation parallelly.
|
||||
attToProcess := attestation
|
||||
|
||||
// Process each attestation in parallel.
|
||||
eg.Go(func() error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
|
||||
encEpoch := encodeTargetEpoch(attToProcess.IndexedAttestation.Data.Target.Epoch)
|
||||
localDoubleVotes := make([]*slashertypes.AttesterDoubleVote, 0)
|
||||
localDoubleVotes := []*slashertypes.AttesterDoubleVote{}
|
||||
|
||||
for _, valIdx := range attToProcess.IndexedAttestation.AttestingIndices {
|
||||
// Check if there is signing root in the database for this combination
|
||||
// of validator index and target epoch.
|
||||
encIdx := encodeValidatorIndex(primitives.ValidatorIndex(valIdx))
|
||||
validatorEpochKey := append(encEpoch, encIdx...)
|
||||
attRecordsKey := signingRootsBkt.Get(validatorEpochKey)
|
||||
|
||||
// An attestation record key is comprised of a signing root (32 bytes).
|
||||
if len(attRecordsKey) < attestationRecordKeySize {
|
||||
// If there is no signing root for this combination,
|
||||
// then there is no double vote. We can continue to the next validator.
|
||||
continue
|
||||
}
|
||||
|
||||
// Retrieve the attestation record corresponding to the signing root
|
||||
// from the database.
|
||||
encExistingAttRecord := attRecordsBkt.Get(attRecordsKey)
|
||||
if encExistingAttRecord == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
existingSigningRoot := bytesutil.ToBytes32(attRecordsKey[:signingRootSize])
|
||||
if existingSigningRoot != attToProcess.SigningRoot {
|
||||
existingAttRecord, err := decodeAttestationRecord(encExistingAttRecord)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
slashAtt := &slashertypes.AttesterDoubleVote{
|
||||
ValidatorIndex: primitives.ValidatorIndex(valIdx),
|
||||
Target: attToProcess.IndexedAttestation.Data.Target.Epoch,
|
||||
PrevAttestationWrapper: existingAttRecord,
|
||||
AttestationWrapper: attToProcess,
|
||||
}
|
||||
localDoubleVotes = append(localDoubleVotes, slashAtt)
|
||||
if existingSigningRoot == attToProcess.SigningRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
// There is a double vote.
|
||||
existingAttRecord, err := decodeAttestationRecord(encExistingAttRecord)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the proof of double vote.
|
||||
slashAtt := &slashertypes.AttesterDoubleVote{
|
||||
ValidatorIndex: primitives.ValidatorIndex(valIdx),
|
||||
Target: attToProcess.IndexedAttestation.Data.Target.Epoch,
|
||||
PrevAttestationWrapper: existingAttRecord,
|
||||
AttestationWrapper: attToProcess,
|
||||
}
|
||||
|
||||
localDoubleVotes = append(localDoubleVotes, slashAtt)
|
||||
}
|
||||
// if any routine is cancelled, then cancel this routine too
|
||||
|
||||
// If any routine is cancelled, then cancel this routine too.
|
||||
select {
|
||||
case <-egctx.Done():
|
||||
return egctx.Err()
|
||||
default:
|
||||
}
|
||||
// if there are any doible votes in this attestation, add it to the global double votes
|
||||
|
||||
// If there are any double votes in this attestation, add it to the global double votes.
|
||||
if len(localDoubleVotes) > 0 {
|
||||
doubleVotesMu.Lock()
|
||||
defer doubleVotesMu.Unlock()
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
doubleVotes = append(doubleVotes, localDoubleVotes...)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
return doubleVotes, eg.Wait()
|
||||
}
|
||||
|
||||
@@ -211,6 +256,8 @@ func (s *Store) AttestationRecordForValidator(
|
||||
}
|
||||
|
||||
// SaveAttestationRecordsForValidators saves attestation records for the specified indices.
|
||||
// If multiple attestations are provided for the same validator index + target epoch combination,
|
||||
// then only the first one is (arbitrarily) saved in the `attestationDataRootsBucket` bucket.
|
||||
func (s *Store) SaveAttestationRecordsForValidators(
|
||||
ctx context.Context,
|
||||
attestations []*slashertypes.IndexedAttestationWrapper,
|
||||
@@ -219,37 +266,40 @@ func (s *Store) SaveAttestationRecordsForValidators(
|
||||
defer span.End()
|
||||
encodedTargetEpoch := make([][]byte, len(attestations))
|
||||
encodedRecords := make([][]byte, len(attestations))
|
||||
encodedIndices := make([][]byte, len(attestations))
|
||||
for i, att := range attestations {
|
||||
encEpoch := encodeTargetEpoch(att.IndexedAttestation.Data.Target.Epoch)
|
||||
value, err := encodeAttestationRecord(att)
|
||||
|
||||
for i, attestation := range attestations {
|
||||
encEpoch := encodeTargetEpoch(attestation.IndexedAttestation.Data.Target.Epoch)
|
||||
|
||||
value, err := encodeAttestationRecord(attestation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indicesBytes := make([]byte, len(att.IndexedAttestation.AttestingIndices)*8)
|
||||
for _, idx := range att.IndexedAttestation.AttestingIndices {
|
||||
encodedIdx := encodeValidatorIndex(primitives.ValidatorIndex(idx))
|
||||
indicesBytes = append(indicesBytes, encodedIdx...)
|
||||
}
|
||||
encodedIndices[i] = indicesBytes
|
||||
|
||||
encodedTargetEpoch[i] = encEpoch
|
||||
encodedRecords[i] = value
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
for i, att := range attestations {
|
||||
if err := attRecordsBkt.Put(att.SigningRoot[:], encodedRecords[i]); err != nil {
|
||||
|
||||
for i := len(attestations) - 1; i >= 0; i-- {
|
||||
attestation := attestations[i]
|
||||
|
||||
if err := attRecordsBkt.Put(attestation.SigningRoot[:], encodedRecords[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, valIdx := range att.IndexedAttestation.AttestingIndices {
|
||||
|
||||
for _, valIdx := range attestation.IndexedAttestation.AttestingIndices {
|
||||
encIdx := encodeValidatorIndex(primitives.ValidatorIndex(valIdx))
|
||||
|
||||
key := append(encodedTargetEpoch[i], encIdx...)
|
||||
if err := signingRootsBkt.Put(key, att.SigningRoot[:]); err != nil {
|
||||
if err := signingRootsBkt.Put(key, attestation.SigningRoot[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -314,43 +364,60 @@ func (s *Store) SaveSlasherChunks(
|
||||
}
|
||||
|
||||
// CheckDoubleBlockProposals takes in a list of proposals and for each,
|
||||
// checks if there already exists a proposal at the same slot+validatorIndex combination. If so,
|
||||
// We check if the existing signing root is not-empty and is different than the incoming
|
||||
// proposal signing root. If so, we return a double block proposal object.
|
||||
// checks if there already exists a proposal at the same slot+validatorIndex combination.
|
||||
// If so, it checks if the existing signing root is not-empty and is different than
|
||||
// the incoming proposal signing root.
|
||||
// If so, it returns a double block proposal object.
|
||||
func (s *Store) CheckDoubleBlockProposals(
|
||||
ctx context.Context, proposals []*slashertypes.SignedBlockHeaderWrapper,
|
||||
ctx context.Context, incomingProposals []*slashertypes.SignedBlockHeaderWrapper,
|
||||
) ([]*ethpb.ProposerSlashing, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.CheckDoubleBlockProposals")
|
||||
defer span.End()
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposals))
|
||||
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(incomingProposals))
|
||||
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the proposal records bucket
|
||||
bkt := tx.Bucket(proposalRecordsBucket)
|
||||
for _, proposal := range proposals {
|
||||
|
||||
for _, incomingProposal := range incomingProposals {
|
||||
// Build the key corresponding to this slot + validator index combination
|
||||
key, err := keyForValidatorProposal(
|
||||
proposal.SignedBeaconBlockHeader.Header.Slot,
|
||||
proposal.SignedBeaconBlockHeader.Header.ProposerIndex,
|
||||
incomingProposal.SignedBeaconBlockHeader.Header.Slot,
|
||||
incomingProposal.SignedBeaconBlockHeader.Header.ProposerIndex,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Retrieve the existing proposal record from the database
|
||||
encExistingProposalWrapper := bkt.Get(key)
|
||||
|
||||
// If there is no existing proposal record (empty result), then there is no double proposal.
|
||||
// We can continue to the next proposal.
|
||||
if len(encExistingProposalWrapper) < signingRootSize {
|
||||
continue
|
||||
}
|
||||
|
||||
// Compare the proposal signing root in the DB with the incoming proposal signing root.
|
||||
// If they differ, we have a double proposal.
|
||||
existingSigningRoot := bytesutil.ToBytes32(encExistingProposalWrapper[:signingRootSize])
|
||||
if existingSigningRoot != proposal.SigningRoot {
|
||||
if existingSigningRoot != incomingProposal.SigningRoot {
|
||||
existingProposalWrapper, err := decodeProposalRecord(encExistingProposalWrapper)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
proposerSlashings = append(proposerSlashings, ðpb.ProposerSlashing{
|
||||
Header_1: existingProposalWrapper.SignedBeaconBlockHeader,
|
||||
Header_2: proposal.SignedBeaconBlockHeader,
|
||||
Header_2: incomingProposal.SignedBeaconBlockHeader,
|
||||
})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return proposerSlashings, err
|
||||
}
|
||||
|
||||
@@ -384,14 +451,20 @@ func (s *Store) BlockProposalForValidator(
|
||||
|
||||
// SaveBlockProposals takes in a list of block proposals and saves them to our
|
||||
// proposal records bucket in the database.
|
||||
// If multiple proposals are provided for the same slot + validatorIndex combination,
|
||||
// then only the last one is saved in the database.
|
||||
func (s *Store) SaveBlockProposals(
|
||||
ctx context.Context, proposals []*slashertypes.SignedBlockHeaderWrapper,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBlockProposals")
|
||||
defer span.End()
|
||||
|
||||
encodedKeys := make([][]byte, len(proposals))
|
||||
encodedProposals := make([][]byte, len(proposals))
|
||||
|
||||
// Loop over all proposals to encode keys and proposals themselves.
|
||||
for i, proposal := range proposals {
|
||||
// Encode the key for this proposal.
|
||||
key, err := keyForValidatorProposal(
|
||||
proposal.SignedBeaconBlockHeader.Header.Slot,
|
||||
proposal.SignedBeaconBlockHeader.Header.ProposerIndex,
|
||||
@@ -399,20 +472,29 @@ func (s *Store) SaveBlockProposals(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Encode the proposal itself.
|
||||
enc, err := encodeProposalRecord(proposal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encodedKeys[i] = key
|
||||
encodedProposals[i] = enc
|
||||
}
|
||||
|
||||
// All proposals are saved into the DB in a single transaction.
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the proposal records bucket.
|
||||
bkt := tx.Bucket(proposalRecordsBucket)
|
||||
|
||||
// Save all proposals.
|
||||
for i := range proposals {
|
||||
if err := bkt.Put(encodedKeys[i], encodedProposals[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -472,7 +554,7 @@ func suffixForAttestationRecordsKey(key, encodedValidatorIndex []byte) bool {
|
||||
return bytes.Equal(encIdx, encodedValidatorIndex)
|
||||
}
|
||||
|
||||
// Disk key for a validator proposal, including a slot+validatorIndex as a byte slice.
|
||||
// keyForValidatorProposal returns a disk key for a validator proposal, including a slot+validatorIndex as a byte slice.
|
||||
func keyForValidatorProposal(slot primitives.Slot, proposerIndex primitives.ValidatorIndex) ([]byte, error) {
|
||||
encSlot, err := slot.MarshalSSZ()
|
||||
if err != nil {
|
||||
@@ -512,37 +594,55 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
|
||||
return chunk, nil
|
||||
}
|
||||
|
||||
// Decode attestation record from bytes.
|
||||
// Encode attestation record to bytes.
|
||||
// The output encoded attestation record consists in the signing root concatened with the compressed attestation record.
|
||||
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
|
||||
if att == nil || att.IndexedAttestation == nil {
|
||||
return []byte{}, errors.New("nil proposal record")
|
||||
}
|
||||
|
||||
// Encode attestation.
|
||||
encodedAtt, err := att.IndexedAttestation.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Compress attestation.
|
||||
compressedAtt := snappy.Encode(nil, encodedAtt)
|
||||
|
||||
return append(att.SigningRoot[:], compressedAtt...), nil
|
||||
}
|
||||
|
||||
// Decode attestation record from bytes.
|
||||
// The input encoded attestation record consists in the signing root concatened with the compressed attestation record.
|
||||
func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWrapper, error) {
|
||||
if len(encoded) < signingRootSize {
|
||||
return nil, fmt.Errorf("wrong length for encoded attestation record, want 32, got %d", len(encoded))
|
||||
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", signingRootSize, len(encoded))
|
||||
}
|
||||
signingRoot := encoded[:signingRootSize]
|
||||
decodedAtt := ðpb.IndexedAttestation{}
|
||||
|
||||
// Decompress attestation.
|
||||
decodedAttBytes, err := snappy.Decode(nil, encoded[signingRootSize:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Decode attestation.
|
||||
decodedAtt := ðpb.IndexedAttestation{}
|
||||
if err := decodedAtt.UnmarshalSSZ(decodedAttBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &slashertypes.IndexedAttestationWrapper{
|
||||
|
||||
// Decode signing root.
|
||||
signingRootBytes := encoded[:signingRootSize]
|
||||
signingRoot := bytesutil.ToBytes32(signingRootBytes)
|
||||
|
||||
// Return decoded attestation.
|
||||
attestation := &slashertypes.IndexedAttestationWrapper{
|
||||
IndexedAttestation: decodedAtt,
|
||||
SigningRoot: bytesutil.ToBytes32(signingRoot),
|
||||
}, nil
|
||||
SigningRoot: signingRoot,
|
||||
}
|
||||
|
||||
return attestation, nil
|
||||
}
|
||||
|
||||
func encodeProposalRecord(blkHdr *slashertypes.SignedBlockHeaderWrapper) ([]byte, error) {
|
||||
|
||||
@@ -113,7 +113,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
|
||||
cursorTime := latestBlkTime
|
||||
|
||||
numOfBlocks := uint64(0)
|
||||
var numOfBlocks uint64
|
||||
estimatedBlk := cursorNum.Uint64()
|
||||
maxTimeBuffer := searchThreshold * params.BeaconConfig().SecondsPerETH1Block
|
||||
// Terminate if we can't find an acceptable block after
|
||||
|
||||
@@ -260,7 +260,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
if err != nil {
|
||||
return nil, nil, false, handleRPCError(err)
|
||||
}
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToGwei(result.Value))
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToWei(result.Value))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
if err != nil {
|
||||
return nil, nil, false, handleRPCError(err)
|
||||
}
|
||||
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToGwei(result.Value))
|
||||
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToWei(result.Value))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
@@ -734,7 +734,7 @@ func fullPayloadFromExecutionBlock(
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
Withdrawals: block.Withdrawals,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Deneb:
|
||||
ebg, err := header.ExcessBlobGas()
|
||||
if err != nil {
|
||||
@@ -763,7 +763,7 @@ func fullPayloadFromExecutionBlock(
|
||||
Withdrawals: block.Withdrawals,
|
||||
BlobGasUsed: bgu,
|
||||
ExcessBlobGas: ebg,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown execution block version %d", block.Version)
|
||||
}
|
||||
@@ -811,7 +811,7 @@ func fullPayloadFromPayloadBody(
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Withdrawals: body.Withdrawals,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Deneb:
|
||||
ebg, err := header.ExcessBlobGas()
|
||||
if err != nil {
|
||||
@@ -840,7 +840,7 @@ func fullPayloadFromPayloadBody(
|
||||
Withdrawals: body.Withdrawals,
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown execution block version for payload %d", bVersion)
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
req, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, 0)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.NoError(t, err)
|
||||
@@ -476,7 +476,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadV2Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.NoError(t, err)
|
||||
@@ -490,7 +490,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.NoError(t, err)
|
||||
@@ -518,7 +518,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadV2Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
@@ -532,7 +532,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
@@ -560,7 +560,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadV2Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
@@ -574,7 +574,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
@@ -602,7 +602,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadV2Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
@@ -616,7 +616,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
@@ -1537,7 +1537,7 @@ func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
|
||||
p, err := blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
BlockHash: wantedHash[:],
|
||||
Transactions: [][]byte{},
|
||||
}, 0)
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return p
|
||||
},
|
||||
@@ -1545,7 +1545,7 @@ func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, 0)
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
|
||||
if err != nil {
|
||||
@@ -1598,7 +1598,7 @@ func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
|
||||
p, err := blocks.WrappedExecutionPayloadDeneb(&pb.ExecutionPayloadDeneb{
|
||||
BlockHash: wantedHash[:],
|
||||
Transactions: [][]byte{},
|
||||
}, 0)
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return p
|
||||
},
|
||||
@@ -1606,7 +1606,7 @@ func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, 0)
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
|
||||
if err != nil {
|
||||
|
||||
@@ -753,7 +753,7 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
} else {
|
||||
if eth1DataInDB.Trie == nil && eth1DataInDB.DepositSnapshot != nil {
|
||||
return errors.Errorf("trying to use old deposit trie after migration to the new trie. "+
|
||||
"Run with the --%s flag to resume normal operations.", features.EnableEIP4881.Name)
|
||||
"Remove the --%s flag to resume normal operations.", features.DisableEIP4881.Name)
|
||||
}
|
||||
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ go_library(
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
@@ -63,14 +62,14 @@ func (e *EngineClient) ForkchoiceUpdated(
|
||||
// GetPayload --
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error) {
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().DenebForkEpoch {
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, math.Gwei(e.BlockValue))
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, big.NewInt(int64(e.BlockValue)))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, e.BlobsBundle, e.BuilderOverride, nil
|
||||
}
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, math.Gwei(e.BlockValue))
|
||||
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, big.NewInt(int64(e.BlockValue)))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
@@ -559,7 +559,7 @@ func (b *SignedBlindedBeaconBlockBellatrix) ToGeneric() (*eth.GenericSignedBeaco
|
||||
Block: bl,
|
||||
Signature: sig,
|
||||
}
|
||||
return ð.GenericSignedBeaconBlock{Block: ð.GenericSignedBeaconBlock_BlindedBellatrix{BlindedBellatrix: block}, IsBlinded: true, PayloadValue: 0 /* can't get payload value from blinded block */}, nil
|
||||
return ð.GenericSignedBeaconBlock{Block: ð.GenericSignedBeaconBlock_BlindedBellatrix{BlindedBellatrix: block}, IsBlinded: true}, nil
|
||||
}
|
||||
|
||||
func (b *BlindedBeaconBlockBellatrix) ToGeneric() (*eth.GenericBeaconBlock, error) {
|
||||
@@ -567,7 +567,7 @@ func (b *BlindedBeaconBlockBellatrix) ToGeneric() (*eth.GenericBeaconBlock, erro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ð.GenericBeaconBlock{Block: ð.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: block}, IsBlinded: true, PayloadValue: 0 /* can't get payload value from blinded block */}, nil
|
||||
return ð.GenericBeaconBlock{Block: ð.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: block}, IsBlinded: true}, nil
|
||||
}
|
||||
|
||||
func (b *BlindedBeaconBlockBellatrix) ToConsensus() (*eth.BlindedBeaconBlockBellatrix, error) {
|
||||
@@ -1016,7 +1016,7 @@ func (b *SignedBlindedBeaconBlockCapella) ToGeneric() (*eth.GenericSignedBeaconB
|
||||
Block: bl,
|
||||
Signature: sig,
|
||||
}
|
||||
return ð.GenericSignedBeaconBlock{Block: ð.GenericSignedBeaconBlock_BlindedCapella{BlindedCapella: block}, IsBlinded: true, PayloadValue: 0 /* can't get payload value from blinded block */}, nil
|
||||
return ð.GenericSignedBeaconBlock{Block: ð.GenericSignedBeaconBlock_BlindedCapella{BlindedCapella: block}, IsBlinded: true}, nil
|
||||
}
|
||||
|
||||
func (b *BlindedBeaconBlockCapella) ToGeneric() (*eth.GenericBeaconBlock, error) {
|
||||
@@ -1024,7 +1024,7 @@ func (b *BlindedBeaconBlockCapella) ToGeneric() (*eth.GenericBeaconBlock, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ð.GenericBeaconBlock{Block: ð.GenericBeaconBlock_BlindedCapella{BlindedCapella: block}, IsBlinded: true, PayloadValue: 0 /* can't get payload value from blinded block */}, nil
|
||||
return ð.GenericBeaconBlock{Block: ð.GenericBeaconBlock_BlindedCapella{BlindedCapella: block}, IsBlinded: true}, nil
|
||||
}
|
||||
|
||||
func (b *BlindedBeaconBlockCapella) ToConsensus() (*eth.BlindedBeaconBlockCapella, error) {
|
||||
|
||||
@@ -31,11 +31,11 @@ func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBe
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get block from block ID: %s"+err.Error(), http.StatusInternalServerError)
|
||||
httputil.HandleError(w, "Could not get block from block ID: "+err.Error(), http.StatusInternalServerError)
|
||||
return false
|
||||
}
|
||||
if err = blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find requested block: %s"+err.Error(), http.StatusNotFound)
|
||||
httputil.HandleError(w, "Could not find requested block: "+err.Error(), http.StatusNotFound)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@@ -38,7 +38,7 @@ import (
|
||||
|
||||
// GetAggregateAttestation aggregates all attestations matching the given attestation data root and slot, returning the aggregated result.
|
||||
func (s *Server) GetAggregateAttestation(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.GetAggregateAttestation")
|
||||
_, span := trace.StartSpan(r.Context(), "validator.GetAggregateAttestation")
|
||||
defer span.End()
|
||||
|
||||
_, attDataRoot, ok := shared.HexFromQuery(w, r, "attestation_data_root", fieldparams.RootLength, true)
|
||||
@@ -51,53 +51,67 @@ func (s *Server) GetAggregateAttestation(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.AttestationsPool.AggregateUnaggregatedAttestations(ctx); err != nil {
|
||||
httputil.HandleError(w, "Could not aggregate unaggregated attestations: "+err.Error(), http.StatusBadRequest)
|
||||
var match *ethpbalpha.Attestation
|
||||
var err error
|
||||
|
||||
match, err = matchingAtt(s.AttestationsPool.AggregatedAttestations(), primitives.Slot(slot), attDataRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get matching attestation: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
allAtts := s.AttestationsPool.AggregatedAttestations()
|
||||
var bestMatchingAtt *ethpbalpha.Attestation
|
||||
for _, att := range allAtts {
|
||||
if att.Data.Slot == primitives.Slot(slot) {
|
||||
root, err := att.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get attestation data root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if bytes.Equal(root[:], attDataRoot) {
|
||||
if bestMatchingAtt == nil || len(att.AggregationBits) > len(bestMatchingAtt.AggregationBits) {
|
||||
bestMatchingAtt = att
|
||||
}
|
||||
}
|
||||
if match == nil {
|
||||
atts, err := s.AttestationsPool.UnaggregatedAttestations()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
match, err = matchingAtt(atts, primitives.Slot(slot), attDataRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get matching attestation: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
if bestMatchingAtt == nil {
|
||||
if match == nil {
|
||||
httputil.HandleError(w, "No matching attestation found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
response := &AggregateAttestationResponse{
|
||||
Data: &shared.Attestation{
|
||||
AggregationBits: hexutil.Encode(bestMatchingAtt.AggregationBits),
|
||||
AggregationBits: hexutil.Encode(match.AggregationBits),
|
||||
Data: &shared.AttestationData{
|
||||
Slot: strconv.FormatUint(uint64(bestMatchingAtt.Data.Slot), 10),
|
||||
CommitteeIndex: strconv.FormatUint(uint64(bestMatchingAtt.Data.CommitteeIndex), 10),
|
||||
BeaconBlockRoot: hexutil.Encode(bestMatchingAtt.Data.BeaconBlockRoot),
|
||||
Slot: strconv.FormatUint(uint64(match.Data.Slot), 10),
|
||||
CommitteeIndex: strconv.FormatUint(uint64(match.Data.CommitteeIndex), 10),
|
||||
BeaconBlockRoot: hexutil.Encode(match.Data.BeaconBlockRoot),
|
||||
Source: &shared.Checkpoint{
|
||||
Epoch: strconv.FormatUint(uint64(bestMatchingAtt.Data.Source.Epoch), 10),
|
||||
Root: hexutil.Encode(bestMatchingAtt.Data.Source.Root),
|
||||
Epoch: strconv.FormatUint(uint64(match.Data.Source.Epoch), 10),
|
||||
Root: hexutil.Encode(match.Data.Source.Root),
|
||||
},
|
||||
Target: &shared.Checkpoint{
|
||||
Epoch: strconv.FormatUint(uint64(bestMatchingAtt.Data.Target.Epoch), 10),
|
||||
Root: hexutil.Encode(bestMatchingAtt.Data.Target.Root),
|
||||
Epoch: strconv.FormatUint(uint64(match.Data.Target.Epoch), 10),
|
||||
Root: hexutil.Encode(match.Data.Target.Root),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode(bestMatchingAtt.Signature),
|
||||
Signature: hexutil.Encode(match.Signature),
|
||||
}}
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
func matchingAtt(atts []*ethpbalpha.Attestation, slot primitives.Slot, attDataRoot []byte) (*ethpbalpha.Attestation, error) {
|
||||
for _, att := range atts {
|
||||
if att.Data.Slot == slot {
|
||||
root, err := att.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attestation data root")
|
||||
}
|
||||
if bytes.Equal(root[:], attDataRoot) {
|
||||
return att, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// SubmitContributionAndProofs publishes multiple signed sync committee contribution and proofs.
|
||||
func (s *Server) SubmitContributionAndProofs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.SubmitContributionAndProofs")
|
||||
|
||||
@@ -223,7 +223,7 @@ func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
w.Header().Set(api.ExecutionPayloadBlindedHeader, fmt.Sprintf("%v", v1alpha1resp.IsBlinded))
|
||||
w.Header().Set(api.ExecutionPayloadValueHeader, fmt.Sprintf("%d", v1alpha1resp.PayloadValue))
|
||||
w.Header().Set(api.ExecutionPayloadValueHeader, v1alpha1resp.PayloadValue)
|
||||
w.Header().Set(api.ConsensusBlockValueHeader, consensusBlockValue)
|
||||
|
||||
phase0Block, ok := v1alpha1resp.Block.(*eth.GenericBeaconBlock_Phase0)
|
||||
@@ -310,7 +310,7 @@ func handleProducePhase0V3(
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_Phase0,
|
||||
payloadValue uint64,
|
||||
payloadValue string,
|
||||
) {
|
||||
if isSSZ {
|
||||
sszResp, err := blk.Phase0.MarshalSSZ()
|
||||
@@ -329,8 +329,8 @@ func handleProducePhase0V3(
|
||||
httputil.WriteJson(w, &ProduceBlockV3Response{
|
||||
Version: version.String(version.Phase0),
|
||||
ExecutionPayloadBlinded: false,
|
||||
ExecutionPayloadValue: fmt.Sprintf("%d", payloadValue), // mev not available at this point
|
||||
ConsensusBlockValue: "", // rewards not applicable before altair
|
||||
ExecutionPayloadValue: payloadValue, // mev not available at this point
|
||||
ConsensusBlockValue: "", // rewards not applicable before altair
|
||||
Data: jsonBytes,
|
||||
})
|
||||
}
|
||||
@@ -339,7 +339,7 @@ func handleProduceAltairV3(
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_Altair,
|
||||
executionPayloadValue uint64,
|
||||
executionPayloadValue string,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
if isSSZ {
|
||||
@@ -359,7 +359,7 @@ func handleProduceAltairV3(
|
||||
httputil.WriteJson(w, &ProduceBlockV3Response{
|
||||
Version: version.String(version.Altair),
|
||||
ExecutionPayloadBlinded: false,
|
||||
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue), // mev not available at this point
|
||||
ExecutionPayloadValue: executionPayloadValue, // mev not available at this point
|
||||
ConsensusBlockValue: consensusPayloadValue,
|
||||
Data: jsonBytes,
|
||||
})
|
||||
@@ -369,7 +369,7 @@ func handleProduceBellatrixV3(
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_Bellatrix,
|
||||
executionPayloadValue uint64,
|
||||
executionPayloadValue string,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
if isSSZ {
|
||||
@@ -394,7 +394,7 @@ func handleProduceBellatrixV3(
|
||||
httputil.WriteJson(w, &ProduceBlockV3Response{
|
||||
Version: version.String(version.Bellatrix),
|
||||
ExecutionPayloadBlinded: false,
|
||||
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue), // mev not available at this point
|
||||
ExecutionPayloadValue: executionPayloadValue, // mev not available at this point
|
||||
ConsensusBlockValue: consensusPayloadValue,
|
||||
Data: jsonBytes,
|
||||
})
|
||||
@@ -404,7 +404,7 @@ func handleProduceBlindedBellatrixV3(
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_BlindedBellatrix,
|
||||
executionPayloadValue uint64,
|
||||
executionPayloadValue string,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
if isSSZ {
|
||||
@@ -429,7 +429,7 @@ func handleProduceBlindedBellatrixV3(
|
||||
httputil.WriteJson(w, &ProduceBlockV3Response{
|
||||
Version: version.String(version.Bellatrix),
|
||||
ExecutionPayloadBlinded: true,
|
||||
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue),
|
||||
ExecutionPayloadValue: executionPayloadValue,
|
||||
ConsensusBlockValue: consensusPayloadValue,
|
||||
Data: jsonBytes,
|
||||
})
|
||||
@@ -439,7 +439,7 @@ func handleProduceBlindedCapellaV3(
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_BlindedCapella,
|
||||
executionPayloadValue uint64,
|
||||
executionPayloadValue string,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
if isSSZ {
|
||||
@@ -464,7 +464,7 @@ func handleProduceBlindedCapellaV3(
|
||||
httputil.WriteJson(w, &ProduceBlockV3Response{
|
||||
Version: version.String(version.Capella),
|
||||
ExecutionPayloadBlinded: true,
|
||||
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue),
|
||||
ExecutionPayloadValue: executionPayloadValue,
|
||||
ConsensusBlockValue: consensusPayloadValue,
|
||||
Data: jsonBytes,
|
||||
})
|
||||
@@ -474,7 +474,7 @@ func handleProduceCapellaV3(
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_Capella,
|
||||
executionPayloadValue uint64,
|
||||
executionPayloadValue string,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
if isSSZ {
|
||||
@@ -499,7 +499,7 @@ func handleProduceCapellaV3(
|
||||
httputil.WriteJson(w, &ProduceBlockV3Response{
|
||||
Version: version.String(version.Capella),
|
||||
ExecutionPayloadBlinded: false,
|
||||
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue), // mev not available at this point
|
||||
ExecutionPayloadValue: executionPayloadValue, // mev not available at this point
|
||||
ConsensusBlockValue: consensusPayloadValue,
|
||||
Data: jsonBytes,
|
||||
})
|
||||
@@ -509,7 +509,7 @@ func handleProduceBlindedDenebV3(
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_BlindedDeneb,
|
||||
executionPayloadValue uint64,
|
||||
executionPayloadValue string,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
if isSSZ {
|
||||
@@ -534,7 +534,7 @@ func handleProduceBlindedDenebV3(
|
||||
httputil.WriteJson(w, &ProduceBlockV3Response{
|
||||
Version: version.String(version.Deneb),
|
||||
ExecutionPayloadBlinded: true,
|
||||
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue),
|
||||
ExecutionPayloadValue: executionPayloadValue,
|
||||
ConsensusBlockValue: consensusPayloadValue,
|
||||
Data: jsonBytes,
|
||||
})
|
||||
@@ -544,7 +544,7 @@ func handleProduceDenebV3(
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_Deneb,
|
||||
executionPayloadValue uint64,
|
||||
executionPayloadValue string,
|
||||
consensusBlockValue string,
|
||||
) {
|
||||
if isSSZ {
|
||||
@@ -570,7 +570,7 @@ func handleProduceDenebV3(
|
||||
httputil.WriteJson(w, &ProduceBlockV3Response{
|
||||
Version: version.String(version.Deneb),
|
||||
ExecutionPayloadBlinded: false,
|
||||
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue), // mev not available at this point
|
||||
ExecutionPayloadValue: executionPayloadValue, // mev not available at this point
|
||||
ConsensusBlockValue: consensusBlockValue,
|
||||
Data: jsonBytes,
|
||||
})
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"phase0","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"phase0","execution_payload_blinded":false,"execution_payload_value":"","consensus_block_value":"","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "phase0", writer.Header().Get(api.VersionHeader))
|
||||
@@ -84,7 +84,6 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
SkipMevBoost: true,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
server := &Server{
|
||||
@@ -98,7 +97,7 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"altair","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"altair","execution_payload_blinded":false,"execution_payload_value":"","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "altair", writer.Header().Get(api.VersionHeader))
|
||||
@@ -118,7 +117,10 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
SkipMevBoost: true,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -132,7 +134,7 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
|
||||
@@ -184,7 +186,10 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
SkipMevBoost: true,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -198,7 +203,7 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"capella","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"capella","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "capella", writer.Header().Get(api.VersionHeader))
|
||||
@@ -216,10 +221,7 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
SkipMevBoost: true,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
g, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
g.PayloadValue = 2000 //some fake value
|
||||
return g, err
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -253,7 +255,10 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
SkipMevBoost: true,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.ToUnsigned().ToGeneric()
|
||||
b, err := block.ToUnsigned().ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -267,7 +272,7 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
|
||||
@@ -566,7 +571,7 @@ func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
g, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
g.PayloadValue = 2000 //some fake value
|
||||
g.PayloadValue = "2000"
|
||||
return g, err
|
||||
}())
|
||||
server := &Server{
|
||||
@@ -780,7 +785,10 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -794,7 +802,7 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlindedBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":true,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":true,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
|
||||
@@ -848,7 +856,7 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
g, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
g.PayloadValue = 2000 //some fake value
|
||||
g.PayloadValue = "2000"
|
||||
return g, err
|
||||
}())
|
||||
server := &Server{
|
||||
@@ -915,7 +923,10 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -929,7 +940,7 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlindedBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":true,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":true,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
|
||||
@@ -1027,11 +1038,11 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"phase0","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"phase0","execution_payload_blinded":false,"execution_payload_value":"","consensus_block_value":"","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "phase0", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1063,11 +1074,11 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"altair","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"altair","execution_payload_blinded":false,"execution_payload_value":"","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "altair", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1085,7 +1096,10 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -1098,11 +1112,11 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1120,7 +1134,10 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -1133,11 +1150,11 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":true,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":true,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "true", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1155,7 +1172,10 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -1168,11 +1188,11 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"capella","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"capella","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "capella", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1192,7 +1212,7 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
g, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
g.PayloadValue = 2000 //some fake value
|
||||
g.PayloadValue = "2000"
|
||||
return g, err
|
||||
}())
|
||||
server := &Server{
|
||||
@@ -1228,7 +1248,10 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.ToUnsigned().ToGeneric()
|
||||
b, err := block.ToUnsigned().ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -1241,11 +1264,11 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1263,7 +1286,10 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -1276,11 +1302,11 @@ func TestProduceBlockV3(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV3(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":true,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":true,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "true", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1383,7 +1409,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "phase0", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1421,7 +1447,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "altair", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1437,7 +1463,10 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
server := &Server{
|
||||
@@ -1460,7 +1489,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1476,7 +1505,10 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -1498,7 +1530,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "true", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1514,7 +1546,10 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -1536,7 +1571,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "capella", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1554,7 +1589,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
g, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
g.PayloadValue = 2000 //some fake value
|
||||
g.PayloadValue = "2000"
|
||||
return g, err
|
||||
}())
|
||||
server := &Server{
|
||||
@@ -1593,7 +1628,10 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.ToUnsigned().ToGeneric()
|
||||
b, err := block.ToUnsigned().ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -1615,7 +1653,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
@@ -1631,7 +1669,10 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
SkipMevBoost: false,
|
||||
}).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
b, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
b.PayloadValue = "2000"
|
||||
return b, nil
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
@@ -1653,7 +1694,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "true", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
|
||||
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
|
||||
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
|
||||
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
|
||||
})
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
AggregationBits: []byte{0, 1, 1},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 2,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: root21,
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 1,
|
||||
@@ -90,7 +90,7 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
AggregationBits: []byte{0, 1, 1, 1},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 3,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: root22,
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 1,
|
||||
@@ -103,33 +103,56 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
},
|
||||
Signature: sig22,
|
||||
}
|
||||
root33 := bytesutil.PadTo([]byte("root3_3"), 32)
|
||||
sig33 := bls.NewAggregateSignature().Marshal()
|
||||
attslot33 := ðpbalpha.Attestation{
|
||||
AggregationBits: []byte{1, 0, 0, 1},
|
||||
root31 := bytesutil.PadTo([]byte("root3_1"), 32)
|
||||
sig31 := bls.NewAggregateSignature().Marshal()
|
||||
attslot31 := ðpbalpha.Attestation{
|
||||
AggregationBits: []byte{1, 0},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 3,
|
||||
BeaconBlockRoot: root33,
|
||||
Slot: 3,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: root31,
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root33,
|
||||
Root: root31,
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root33,
|
||||
Root: root31,
|
||||
},
|
||||
},
|
||||
Signature: sig33,
|
||||
Signature: sig31,
|
||||
}
|
||||
root32 := bytesutil.PadTo([]byte("root3_2"), 32)
|
||||
sig32 := bls.NewAggregateSignature().Marshal()
|
||||
attslot32 := ðpbalpha.Attestation{
|
||||
AggregationBits: []byte{0, 1},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 3,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: root32,
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root32,
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root32,
|
||||
},
|
||||
},
|
||||
Signature: sig32,
|
||||
}
|
||||
|
||||
pool := attestations.NewPool()
|
||||
err := pool.SaveAggregatedAttestations([]*ethpbalpha.Attestation{attSlot1, attslot21, attslot22})
|
||||
assert.NoError(t, err)
|
||||
err = pool.SaveUnaggregatedAttestations([]*ethpbalpha.Attestation{attslot31, attslot32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
AttestationsPool: pool,
|
||||
}
|
||||
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
t.Run("matching aggregated att", func(t *testing.T) {
|
||||
reqRoot, err := attslot22.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
@@ -147,7 +170,7 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
assert.DeepEqual(t, "0x00010101", resp.Data.AggregationBits)
|
||||
assert.DeepEqual(t, hexutil.Encode(sig22), resp.Data.Signature)
|
||||
assert.Equal(t, "2", resp.Data.Data.Slot)
|
||||
assert.Equal(t, "3", resp.Data.Data.CommitteeIndex)
|
||||
assert.Equal(t, "1", resp.Data.Data.CommitteeIndex)
|
||||
assert.DeepEqual(t, hexutil.Encode(root22), resp.Data.Data.BeaconBlockRoot)
|
||||
require.NotNil(t, resp.Data.Data.Source)
|
||||
assert.Equal(t, "1", resp.Data.Data.Source.Epoch)
|
||||
@@ -156,19 +179,11 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
assert.Equal(t, "1", resp.Data.Data.Target.Epoch)
|
||||
assert.DeepEqual(t, hexutil.Encode(root22), resp.Data.Data.Target.Root)
|
||||
})
|
||||
|
||||
t.Run("aggregate beforehand", func(t *testing.T) {
|
||||
err = s.AttestationsPool.SaveUnaggregatedAttestation(attslot33)
|
||||
require.NoError(t, err)
|
||||
newAtt := ethpbalpha.CopyAttestation(attslot33)
|
||||
newAtt.AggregationBits = []byte{0, 1, 0, 1}
|
||||
err = s.AttestationsPool.SaveUnaggregatedAttestation(newAtt)
|
||||
require.NoError(t, err)
|
||||
|
||||
reqRoot, err := attslot33.Data.HashTreeRoot()
|
||||
t.Run("matching unaggregated att", func(t *testing.T) {
|
||||
reqRoot, err := attslot32.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=2"
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=3"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -178,7 +193,18 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
resp := &AggregateAttestationResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
assert.DeepEqual(t, "0x01010001", resp.Data.AggregationBits)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.DeepEqual(t, "0x0001", resp.Data.AggregationBits)
|
||||
assert.DeepEqual(t, hexutil.Encode(sig32), resp.Data.Signature)
|
||||
assert.Equal(t, "3", resp.Data.Data.Slot)
|
||||
assert.Equal(t, "1", resp.Data.Data.CommitteeIndex)
|
||||
assert.DeepEqual(t, hexutil.Encode(root32), resp.Data.Data.BeaconBlockRoot)
|
||||
require.NotNil(t, resp.Data.Data.Source)
|
||||
assert.Equal(t, "1", resp.Data.Data.Source.Epoch)
|
||||
assert.DeepEqual(t, hexutil.Encode(root32), resp.Data.Data.Source.Root)
|
||||
require.NotNil(t, resp.Data.Data.Target)
|
||||
assert.Equal(t, "1", resp.Data.Data.Target.Epoch)
|
||||
assert.DeepEqual(t, hexutil.Encode(root32), resp.Data.Data.Target.Root)
|
||||
})
|
||||
t.Run("no matching attestation", func(t *testing.T) {
|
||||
attDataRoot := hexutil.Encode(bytesutil.PadTo([]byte("foo"), 32))
|
||||
|
||||
@@ -76,6 +76,7 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
@@ -22,7 +23,7 @@ func (vs *Server) constructGenericBeaconBlock(sBlk interfaces.SignedBeaconBlock,
|
||||
}
|
||||
|
||||
isBlinded := sBlk.IsBlinded()
|
||||
payloadValue := sBlk.ValueInGwei()
|
||||
payloadValue := sBlk.ValueInWei()
|
||||
|
||||
switch sBlk.Version() {
|
||||
case version.Deneb:
|
||||
@@ -41,30 +42,30 @@ func (vs *Server) constructGenericBeaconBlock(sBlk interfaces.SignedBeaconBlock,
|
||||
}
|
||||
|
||||
// Helper functions for constructing blocks for each version
|
||||
func (vs *Server) constructDenebBlock(blockProto proto.Message, isBlinded bool, payloadValue uint64, bundle *enginev1.BlobsBundle) *ethpb.GenericBeaconBlock {
|
||||
func (vs *Server) constructDenebBlock(blockProto proto.Message, isBlinded bool, payloadValue math.Wei, bundle *enginev1.BlobsBundle) *ethpb.GenericBeaconBlock {
|
||||
if isBlinded {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedDeneb{BlindedDeneb: blockProto.(*ethpb.BlindedBeaconBlockDeneb)}, IsBlinded: true, PayloadValue: payloadValue}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedDeneb{BlindedDeneb: blockProto.(*ethpb.BlindedBeaconBlockDeneb)}, IsBlinded: true, PayloadValue: (*payloadValue).String()}
|
||||
}
|
||||
denebContents := ðpb.BeaconBlockContentsDeneb{Block: blockProto.(*ethpb.BeaconBlockDeneb)}
|
||||
if bundle != nil {
|
||||
denebContents.KzgProofs = bundle.Proofs
|
||||
denebContents.Blobs = bundle.Blobs
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Deneb{Deneb: denebContents}, IsBlinded: false, PayloadValue: payloadValue}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Deneb{Deneb: denebContents}, IsBlinded: false, PayloadValue: (*payloadValue).String()}
|
||||
}
|
||||
|
||||
func (vs *Server) constructCapellaBlock(pb proto.Message, isBlinded bool, payloadValue uint64) *ethpb.GenericBeaconBlock {
|
||||
func (vs *Server) constructCapellaBlock(pb proto.Message, isBlinded bool, payloadValue math.Wei) *ethpb.GenericBeaconBlock {
|
||||
if isBlinded {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: pb.(*ethpb.BlindedBeaconBlockCapella)}, IsBlinded: true, PayloadValue: payloadValue}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: pb.(*ethpb.BlindedBeaconBlockCapella)}, IsBlinded: true, PayloadValue: (*payloadValue).String()}
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Capella{Capella: pb.(*ethpb.BeaconBlockCapella)}, IsBlinded: false, PayloadValue: payloadValue}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Capella{Capella: pb.(*ethpb.BeaconBlockCapella)}, IsBlinded: false, PayloadValue: (*payloadValue).String()}
|
||||
}
|
||||
|
||||
func (vs *Server) constructBellatrixBlock(pb proto.Message, isBlinded bool, payloadValue uint64) *ethpb.GenericBeaconBlock {
|
||||
func (vs *Server) constructBellatrixBlock(pb proto.Message, isBlinded bool, payloadValue math.Wei) *ethpb.GenericBeaconBlock {
|
||||
if isBlinded {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: pb.(*ethpb.BlindedBeaconBlockBellatrix)}, IsBlinded: true, PayloadValue: payloadValue}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: pb.(*ethpb.BlindedBeaconBlockBellatrix)}, IsBlinded: true, PayloadValue: (*payloadValue).String()}
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: pb.(*ethpb.BeaconBlockBellatrix)}, IsBlinded: false, PayloadValue: payloadValue}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: pb.(*ethpb.BeaconBlockBellatrix)}, IsBlinded: false, PayloadValue: (*payloadValue).String()}
|
||||
}
|
||||
|
||||
func (vs *Server) constructAltairBlock(pb proto.Message) *ethpb.GenericBeaconBlock {
|
||||
|
||||
@@ -46,7 +46,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
capellaTransitionState, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
wrappedHeaderCapella, err := blocks.WrappedExecutionPayloadHeaderCapella(&v1.ExecutionPayloadHeaderCapella{BlockNumber: 1}, 0)
|
||||
wrappedHeaderCapella, err := blocks.WrappedExecutionPayloadHeaderCapella(&v1.ExecutionPayloadHeaderCapella{BlockNumber: 1}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, capellaTransitionState.SetLatestExecutionPayloadHeader(wrappedHeaderCapella))
|
||||
b2pbCapella := util.NewBeaconBlockCapella()
|
||||
@@ -59,7 +59,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveFeeRecipientsByValidatorIDs(context.Background(), []primitives.ValidatorIndex{0}, []common.Address{{}}))
|
||||
|
||||
denebTransitionState, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
wrappedHeaderDeneb, err := blocks.WrappedExecutionPayloadHeaderDeneb(&v1.ExecutionPayloadHeaderDeneb{BlockNumber: 2}, 0)
|
||||
wrappedHeaderDeneb, err := blocks.WrappedExecutionPayloadHeaderDeneb(&v1.ExecutionPayloadHeaderDeneb{BlockNumber: 2}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, denebTransitionState.SetLatestExecutionPayloadHeader(wrappedHeaderDeneb))
|
||||
b2pbDeneb := util.NewBeaconBlockDeneb()
|
||||
@@ -356,7 +356,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
t.Run("Builder configured. Local block has higher value", func(t *testing.T) {
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 2}
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 2 * 1e9}
|
||||
b := blk.Block()
|
||||
localPayload, _, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
@@ -377,7 +377,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 1}
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 1 * 1e9}
|
||||
b := blk.Block()
|
||||
localPayload, _, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
@@ -748,7 +748,7 @@ func TestServer_getPayloadHeader(t *testing.T) {
|
||||
require.DeepEqual(t, want, h)
|
||||
}
|
||||
if tc.returnedHeaderCapella != nil {
|
||||
want, err := blocks.WrappedExecutionPayloadHeaderCapella(tc.returnedHeaderCapella, 0) // value is a mock
|
||||
want, err := blocks.WrappedExecutionPayloadHeaderCapella(tc.returnedHeaderCapella, big.NewInt(197121)) // value is a mock
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, h)
|
||||
}
|
||||
@@ -805,7 +805,7 @@ func Test_matchingWithdrawalsRoot(t *testing.T) {
|
||||
})
|
||||
t.Run("could not get builder withdrawals root", func(t *testing.T) {
|
||||
local := &v1.ExecutionPayloadCapella{}
|
||||
p, err := blocks.WrappedExecutionPayloadCapella(local, 0)
|
||||
p, err := blocks.WrappedExecutionPayloadCapella(local, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
header := &v1.ExecutionPayloadHeader{}
|
||||
h, err := blocks.WrappedExecutionPayloadHeader(header)
|
||||
@@ -815,10 +815,10 @@ func Test_matchingWithdrawalsRoot(t *testing.T) {
|
||||
})
|
||||
t.Run("withdrawals mismatch", func(t *testing.T) {
|
||||
local := &v1.ExecutionPayloadCapella{}
|
||||
p, err := blocks.WrappedExecutionPayloadCapella(local, 0)
|
||||
p, err := blocks.WrappedExecutionPayloadCapella(local, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
header := &v1.ExecutionPayloadHeaderCapella{}
|
||||
h, err := blocks.WrappedExecutionPayloadHeaderCapella(header, 0)
|
||||
h, err := blocks.WrappedExecutionPayloadHeaderCapella(header, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
matched, err := matchingWithdrawalsRoot(p, h)
|
||||
require.NoError(t, err)
|
||||
@@ -832,13 +832,13 @@ func Test_matchingWithdrawalsRoot(t *testing.T) {
|
||||
Amount: 3,
|
||||
}}
|
||||
local := &v1.ExecutionPayloadCapella{Withdrawals: wds}
|
||||
p, err := blocks.WrappedExecutionPayloadCapella(local, 0)
|
||||
p, err := blocks.WrappedExecutionPayloadCapella(local, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
header := &v1.ExecutionPayloadHeaderCapella{}
|
||||
wr, err := ssz.WithdrawalSliceRoot(wds, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
header.WithdrawalsRoot = wr[:]
|
||||
h, err := blocks.WrappedExecutionPayloadHeaderCapella(header, 0)
|
||||
h, err := blocks.WrappedExecutionPayloadHeaderCapella(header, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
matched, err := matchingWithdrawalsRoot(p, h)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -3,6 +3,7 @@ package validator
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -60,7 +61,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
}))
|
||||
|
||||
capellaTransitionState, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
wrappedHeaderCapella, err := blocks.WrappedExecutionPayloadHeaderCapella(&pb.ExecutionPayloadHeaderCapella{BlockNumber: 1}, 0)
|
||||
wrappedHeaderCapella, err := blocks.WrappedExecutionPayloadHeaderCapella(&pb.ExecutionPayloadHeaderCapella{BlockNumber: 1}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, capellaTransitionState.SetLatestExecutionPayloadHeader(wrappedHeaderCapella))
|
||||
b2pbCapella := util.NewBeaconBlockCapella()
|
||||
|
||||
@@ -23,7 +23,7 @@ type chunkUpdateArgs struct {
|
||||
}
|
||||
|
||||
// Chunker defines a struct which represents a slice containing a chunk for K different validator's
|
||||
// min spans used for surround vote detection in slasher. The interface defines methods used to check
|
||||
// min/max spans used for surround vote detection in slasher. The interface defines methods used to check
|
||||
// if an attestation is slashable for a validator index based on the contents of
|
||||
// the chunk as well as the ability to update the data in the chunk with incoming information.
|
||||
type Chunker interface {
|
||||
@@ -153,12 +153,12 @@ func MaxChunkSpansSliceFrom(params *Parameters, chunk []uint16) (*MaxSpanChunksS
|
||||
|
||||
// NeutralElement for a min span chunks slice is undefined, in this case
|
||||
// using MaxUint16 as a sane value given it is impossible we reach it.
|
||||
func (_ *MinSpanChunksSlice) NeutralElement() uint16 {
|
||||
func (*MinSpanChunksSlice) NeutralElement() uint16 {
|
||||
return math.MaxUint16
|
||||
}
|
||||
|
||||
// NeutralElement for a max span chunks slice is 0.
|
||||
func (_ *MaxSpanChunksSlice) NeutralElement() uint16 {
|
||||
func (*MaxSpanChunksSlice) NeutralElement() uint16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -191,12 +191,14 @@ func (m *MinSpanChunksSlice) CheckSlashable(
|
||||
) (*ethpb.AttesterSlashing, error) {
|
||||
sourceEpoch := attestation.IndexedAttestation.Data.Source.Epoch
|
||||
targetEpoch := attestation.IndexedAttestation.Data.Target.Epoch
|
||||
|
||||
minTarget, err := chunkDataAtEpoch(m.params, m.data, validatorIdx, sourceEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(
|
||||
err, "could not get min target for validator %d at epoch %d", validatorIdx, sourceEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
if targetEpoch > minTarget {
|
||||
existingAttRecord, err := slasherDB.AttestationRecordForValidator(
|
||||
ctx, validatorIdx, minTarget,
|
||||
@@ -206,16 +208,20 @@ func (m *MinSpanChunksSlice) CheckSlashable(
|
||||
err, "could not get existing attestation record at target %d", minTarget,
|
||||
)
|
||||
}
|
||||
if existingAttRecord != nil {
|
||||
if sourceEpoch < existingAttRecord.IndexedAttestation.Data.Source.Epoch {
|
||||
surroundingVotesTotal.Inc()
|
||||
return ðpb.AttesterSlashing{
|
||||
Attestation_1: attestation.IndexedAttestation,
|
||||
Attestation_2: existingAttRecord.IndexedAttestation,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if existingAttRecord == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if sourceEpoch < existingAttRecord.IndexedAttestation.Data.Source.Epoch {
|
||||
surroundingVotesTotal.Inc()
|
||||
return ðpb.AttesterSlashing{
|
||||
Attestation_1: attestation.IndexedAttestation,
|
||||
Attestation_2: existingAttRecord.IndexedAttestation,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -224,7 +230,7 @@ func (m *MinSpanChunksSlice) CheckSlashable(
|
||||
// within the max span chunks slice. Recall that for an incoming attestation, B, and an
|
||||
// existing attestation, A:
|
||||
//
|
||||
// B surrounds A if and only if B.target < max_spans[B.source]
|
||||
// B is surrounded by A if and only if B.target < max_spans[B.source]
|
||||
//
|
||||
// That is, this condition is sufficient to check if an incoming attestation
|
||||
// is surrounded by a previous one. We also check if we indeed have an existing
|
||||
@@ -238,12 +244,14 @@ func (m *MaxSpanChunksSlice) CheckSlashable(
|
||||
) (*ethpb.AttesterSlashing, error) {
|
||||
sourceEpoch := attestation.IndexedAttestation.Data.Source.Epoch
|
||||
targetEpoch := attestation.IndexedAttestation.Data.Target.Epoch
|
||||
|
||||
maxTarget, err := chunkDataAtEpoch(m.params, m.data, validatorIdx, sourceEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(
|
||||
err, "could not get max target for validator %d at epoch %d", validatorIdx, sourceEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
if targetEpoch < maxTarget {
|
||||
existingAttRecord, err := slasherDB.AttestationRecordForValidator(
|
||||
ctx, validatorIdx, maxTarget,
|
||||
@@ -253,14 +261,17 @@ func (m *MaxSpanChunksSlice) CheckSlashable(
|
||||
err, "could not get existing attestation record at target %d", maxTarget,
|
||||
)
|
||||
}
|
||||
if existingAttRecord != nil {
|
||||
if existingAttRecord.IndexedAttestation.Data.Source.Epoch < sourceEpoch {
|
||||
surroundedVotesTotal.Inc()
|
||||
return ðpb.AttesterSlashing{
|
||||
Attestation_1: existingAttRecord.IndexedAttestation,
|
||||
Attestation_2: attestation.IndexedAttestation,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if existingAttRecord == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if existingAttRecord.IndexedAttestation.Data.Source.Epoch < sourceEpoch {
|
||||
surroundedVotesTotal.Inc()
|
||||
return ðpb.AttesterSlashing{
|
||||
Attestation_1: existingAttRecord.IndexedAttestation,
|
||||
Attestation_2: attestation.IndexedAttestation,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
@@ -331,8 +342,8 @@ func (m *MinSpanChunksSlice) Update(
|
||||
minEpoch = args.currentEpoch - (m.params.historyLength - 1)
|
||||
}
|
||||
epochInChunk := startEpoch
|
||||
// We go down the chunk for the validator, updating every value starting at start_epoch down to min_epoch.
|
||||
// As long as the epoch, e, in the same chunk index and e >= min_epoch, we proceed with
|
||||
// We go down the chunk for the validator, updating every value starting at startEpoch down to minEpoch.
|
||||
// As long as the epoch, e, in the same chunk index and e >= minEpoch, we proceed with
|
||||
// a for loop.
|
||||
for m.params.chunkIndex(epochInChunk) == args.chunkIndex && epochInChunk >= minEpoch {
|
||||
var chunkTarget primitives.Epoch
|
||||
@@ -375,7 +386,7 @@ func (m *MaxSpanChunksSlice) Update(
|
||||
newTargetEpoch primitives.Epoch,
|
||||
) (keepGoing bool, err error) {
|
||||
epochInChunk := startEpoch
|
||||
// We go down the chunk for the validator, updating every value starting at start_epoch up to
|
||||
// We go down the chunk for the validator, updating every value starting at startEpoch up to
|
||||
// and including the current epoch. As long as the epoch, e, is in the same chunk index and e <= currentEpoch,
|
||||
// we proceed with a for loop.
|
||||
for m.params.chunkIndex(epochInChunk) == args.chunkIndex && epochInChunk <= args.currentEpoch {
|
||||
@@ -436,7 +447,7 @@ func (m *MinSpanChunksSlice) StartEpoch(
|
||||
|
||||
// StartEpoch given a source epoch and current epoch, determines the start epoch of
|
||||
// a max span chunk for use in chunk updates. The source epoch cannot be >= the current epoch.
|
||||
func (_ *MaxSpanChunksSlice) StartEpoch(
|
||||
func (*MaxSpanChunksSlice) StartEpoch(
|
||||
sourceEpoch, currentEpoch primitives.Epoch,
|
||||
) (epoch primitives.Epoch, exists bool) {
|
||||
if sourceEpoch >= currentEpoch {
|
||||
|
||||
@@ -20,6 +20,7 @@ func (s *Service) checkSlashableAttestations(
|
||||
) ([]*ethpb.AttesterSlashing, error) {
|
||||
slashings := make([]*ethpb.AttesterSlashing, 0)
|
||||
|
||||
// Double votes
|
||||
log.Debug("Checking for double votes")
|
||||
start := time.Now()
|
||||
doubleVoteSlashings, err := s.checkDoubleVotes(ctx, atts)
|
||||
@@ -29,42 +30,53 @@ func (s *Service) checkSlashableAttestations(
|
||||
log.WithField("elapsed", time.Since(start)).Debug("Done checking double votes")
|
||||
slashings = append(slashings, doubleVoteSlashings...)
|
||||
|
||||
// Surrounding / surrounded votes
|
||||
groupedAtts := s.groupByValidatorChunkIndex(atts)
|
||||
log.WithField("numBatches", len(groupedAtts)).Debug("Batching attestations by validator chunk index")
|
||||
start = time.Now()
|
||||
batchTimes := make([]time.Duration, 0, len(groupedAtts))
|
||||
|
||||
for validatorChunkIdx, batch := range groupedAtts {
|
||||
innerStart := time.Now()
|
||||
attSlashings, err := s.detectAllAttesterSlashings(ctx, &chunkUpdateArgs{
|
||||
|
||||
attSlashings, err := s.checkSurrounds(ctx, &chunkUpdateArgs{
|
||||
validatorChunkIndex: validatorChunkIdx,
|
||||
currentEpoch: currentEpoch,
|
||||
}, batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slashings = append(slashings, attSlashings...)
|
||||
|
||||
indices := s.params.validatorIndicesInChunk(validatorChunkIdx)
|
||||
for _, idx := range indices {
|
||||
s.latestEpochWrittenForValidator[idx] = currentEpoch
|
||||
}
|
||||
|
||||
batchTimes = append(batchTimes, time.Since(innerStart))
|
||||
}
|
||||
var avgProcessingTimePerBatch time.Duration
|
||||
|
||||
avgProcessingTimePerBatch := time.Duration(0)
|
||||
for _, dur := range batchTimes {
|
||||
avgProcessingTimePerBatch += dur
|
||||
}
|
||||
|
||||
if avgProcessingTimePerBatch != time.Duration(0) {
|
||||
avgProcessingTimePerBatch = avgProcessingTimePerBatch / time.Duration(len(batchTimes))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"numAttestations": len(atts),
|
||||
"numBatchesByValidatorChunkIndex": len(groupedAtts),
|
||||
"elapsed": time.Since(start),
|
||||
"avgBatchProcessingTime": avgProcessingTimePerBatch,
|
||||
}).Info("Done checking slashable attestations")
|
||||
|
||||
if len(slashings) > 0 {
|
||||
log.WithField("numSlashings", len(slashings)).Warn("Slashable attestation offenses found")
|
||||
}
|
||||
|
||||
return slashings, nil
|
||||
}
|
||||
|
||||
@@ -72,14 +84,13 @@ func (s *Service) checkSlashableAttestations(
|
||||
// as the current epoch in time, we perform slashing detection.
|
||||
// The process is as follows given a list of attestations:
|
||||
//
|
||||
// 1. Check for attester double votes using the list of attestations.
|
||||
// 2. Group the attestations by chunk index.
|
||||
// 3. Update the min and max spans for those grouped attestations, check if any slashings are
|
||||
// 1. Group the attestations by chunk index.
|
||||
// 2. Update the min and max spans for those grouped attestations, check if any slashings are
|
||||
// found in the process
|
||||
// 4. Update the latest written epoch for all validators involved to the current epoch.
|
||||
// 3. Update the latest written epoch for all validators involved to the current epoch.
|
||||
//
|
||||
// This function performs a lot of critical actions and is split into smaller helpers for cleanliness.
|
||||
func (s *Service) detectAllAttesterSlashings(
|
||||
func (s *Service) checkSurrounds(
|
||||
ctx context.Context,
|
||||
args *chunkUpdateArgs,
|
||||
attestations []*slashertypes.IndexedAttestationWrapper,
|
||||
@@ -136,48 +147,8 @@ func (s *Service) detectAllAttesterSlashings(
|
||||
return slashings, nil
|
||||
}
|
||||
|
||||
// Check for attester slashing double votes by looking at every single validator index
|
||||
// in each attestation's attesting indices and checking if there already exist records for such
|
||||
// attestation's target epoch. If so, we append a double vote slashing object to a list of slashings
|
||||
// we return to the caller.
|
||||
func (s *Service) checkDoubleVotes(
|
||||
ctx context.Context, attestations []*slashertypes.IndexedAttestationWrapper,
|
||||
) ([]*ethpb.AttesterSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "Slasher.checkDoubleVotes")
|
||||
defer span.End()
|
||||
// We check if there are any slashable double votes in the input list
|
||||
// of attestations with respect to each other.
|
||||
slashings := make([]*ethpb.AttesterSlashing, 0)
|
||||
existingAtts := make(map[string]*slashertypes.IndexedAttestationWrapper)
|
||||
for _, att := range attestations {
|
||||
for _, valIdx := range att.IndexedAttestation.AttestingIndices {
|
||||
key := uintToString(uint64(att.IndexedAttestation.Data.Target.Epoch)) + ":" + uintToString(valIdx)
|
||||
existingAtt, ok := existingAtts[key]
|
||||
if !ok {
|
||||
existingAtts[key] = att
|
||||
continue
|
||||
}
|
||||
if att.SigningRoot != existingAtt.SigningRoot {
|
||||
doubleVotesTotal.Inc()
|
||||
slashings = append(slashings, ðpb.AttesterSlashing{
|
||||
Attestation_1: existingAtt.IndexedAttestation,
|
||||
Attestation_2: att.IndexedAttestation,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We check if there are any slashable double votes in the input list
|
||||
// of attestations with respect to our database.
|
||||
moreSlashings, err := s.checkDoubleVotesOnDisk(ctx, attestations)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not check attestation double votes on disk")
|
||||
}
|
||||
return append(slashings, moreSlashings...), nil
|
||||
}
|
||||
|
||||
// Check for double votes in our database given a list of incoming attestations.
|
||||
func (s *Service) checkDoubleVotesOnDisk(
|
||||
func (s *Service) checkDoubleVotes(
|
||||
ctx context.Context, attestations []*slashertypes.IndexedAttestationWrapper,
|
||||
) ([]*ethpb.AttesterSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "Slasher.checkDoubleVotesOnDisk")
|
||||
|
||||
@@ -30,10 +30,32 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
currentEpoch primitives.Epoch
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
shouldNotBeSlashable bool
|
||||
name string
|
||||
args args
|
||||
shouldBeSlashable bool
|
||||
}{
|
||||
{
|
||||
name: "Same target with different signing roots",
|
||||
args: args{
|
||||
attestationQueue: []*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(t, 1, 2, []uint64{0, 1}, []byte{1}),
|
||||
createAttestationWrapper(t, 1, 2, []uint64{0, 1}, []byte{2}),
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldBeSlashable: true,
|
||||
},
|
||||
{
|
||||
name: "Same target with same signing roots",
|
||||
args: args{
|
||||
attestationQueue: []*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(t, 1, 2, []uint64{0, 1}, []byte{1}),
|
||||
createAttestationWrapper(t, 1, 2, []uint64{0, 1}, []byte{1}),
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldBeSlashable: false,
|
||||
},
|
||||
{
|
||||
name: "Detects surrounding vote (source 1, target 2), (source 0, target 3)",
|
||||
args: args{
|
||||
@@ -43,6 +65,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldBeSlashable: true,
|
||||
},
|
||||
{
|
||||
name: "Detects surrounding vote (source 50, target 51), (source 0, target 1000)",
|
||||
@@ -53,6 +76,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 1000,
|
||||
},
|
||||
shouldBeSlashable: true,
|
||||
},
|
||||
{
|
||||
name: "Detects surrounded vote (source 0, target 3), (source 1, target 2)",
|
||||
@@ -63,6 +87,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldBeSlashable: true,
|
||||
},
|
||||
{
|
||||
name: "Detects double vote, (source 1, target 2), (source 0, target 2)",
|
||||
@@ -73,6 +98,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldBeSlashable: true,
|
||||
},
|
||||
{
|
||||
name: "Not slashable, surrounding but non-overlapping attesting indices within same validator chunk index",
|
||||
@@ -83,7 +109,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldNotBeSlashable: true,
|
||||
shouldBeSlashable: false,
|
||||
},
|
||||
{
|
||||
name: "Not slashable, surrounded but non-overlapping attesting indices within same validator chunk index",
|
||||
@@ -94,7 +120,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldNotBeSlashable: true,
|
||||
shouldBeSlashable: false,
|
||||
},
|
||||
{
|
||||
name: "Not slashable, surrounding but non-overlapping attesting indices in different validator chunk index",
|
||||
@@ -111,7 +137,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldNotBeSlashable: true,
|
||||
shouldBeSlashable: false,
|
||||
},
|
||||
{
|
||||
name: "Not slashable, surrounded but non-overlapping attesting indices in different validator chunk index",
|
||||
@@ -128,7 +154,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldNotBeSlashable: true,
|
||||
shouldBeSlashable: false,
|
||||
},
|
||||
{
|
||||
name: "Not slashable, (source 1, target 2), (source 2, target 3)",
|
||||
@@ -139,7 +165,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldNotBeSlashable: true,
|
||||
shouldBeSlashable: false,
|
||||
},
|
||||
{
|
||||
name: "Not slashable, (source 0, target 3), (source 2, target 4)",
|
||||
@@ -150,7 +176,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldNotBeSlashable: true,
|
||||
shouldBeSlashable: false,
|
||||
},
|
||||
{
|
||||
name: "Not slashable, (source 0, target 2), (source 0, target 3)",
|
||||
@@ -161,7 +187,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldNotBeSlashable: true,
|
||||
shouldBeSlashable: false,
|
||||
},
|
||||
{
|
||||
name: "Not slashable, (source 0, target 3), (source 0, target 2)",
|
||||
@@ -172,7 +198,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
},
|
||||
currentEpoch: 4,
|
||||
},
|
||||
shouldNotBeSlashable: true,
|
||||
shouldBeSlashable: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -246,21 +272,24 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
|
||||
currentSlotChan := make(chan primitives.Slot)
|
||||
exitChan := make(chan struct{})
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
s.processQueuedAttestations(ctx, currentSlotChan)
|
||||
exitChan <- struct{}{}
|
||||
}()
|
||||
s.attsQueue.extend(tt.args.attestationQueue)
|
||||
currentSlotChan <- slot
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
cancel()
|
||||
<-exitChan
|
||||
if tt.shouldNotBeSlashable {
|
||||
require.LogsDoNotContain(t, hook, "Attester slashing detected")
|
||||
} else {
|
||||
s.wg.Wait()
|
||||
if tt.shouldBeSlashable {
|
||||
require.LogsContain(t, hook, "Attester slashing detected")
|
||||
} else {
|
||||
require.LogsDoNotContain(t, hook, "Attester slashing detected")
|
||||
}
|
||||
|
||||
require.LogsDoNotContain(t, hook, couldNotSaveAttRecord)
|
||||
require.LogsDoNotContain(t, hook, couldNotCheckSlashableAtt)
|
||||
require.LogsDoNotContain(t, hook, couldNotProcessAttesterSlashings)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -304,10 +333,9 @@ func Test_processQueuedAttestations_MultipleChunkIndices(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
|
||||
currentSlotChan := make(chan primitives.Slot)
|
||||
exitChan := make(chan struct{})
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
s.processQueuedAttestations(ctx, currentSlotChan)
|
||||
exitChan <- struct{}{}
|
||||
}()
|
||||
|
||||
for i := startEpoch; i <= endEpoch; i++ {
|
||||
@@ -331,7 +359,7 @@ func Test_processQueuedAttestations_MultipleChunkIndices(t *testing.T) {
|
||||
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
cancel()
|
||||
<-exitChan
|
||||
s.wg.Wait()
|
||||
require.LogsDoNotContain(t, hook, "Slashable offenses found")
|
||||
require.LogsDoNotContain(t, hook, "Could not detect")
|
||||
}
|
||||
@@ -370,10 +398,9 @@ func Test_processQueuedAttestations_OverlappingChunkIndices(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
|
||||
currentSlotChan := make(chan primitives.Slot)
|
||||
exitChan := make(chan struct{})
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
s.processQueuedAttestations(ctx, currentSlotChan)
|
||||
exitChan <- struct{}{}
|
||||
}()
|
||||
|
||||
// We create two attestations fully spanning chunk indices 0 and chunk 1
|
||||
@@ -392,7 +419,7 @@ func Test_processQueuedAttestations_OverlappingChunkIndices(t *testing.T) {
|
||||
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
cancel()
|
||||
<-exitChan
|
||||
s.wg.Wait()
|
||||
require.LogsDoNotContain(t, hook, "Slashable offenses found")
|
||||
require.LogsDoNotContain(t, hook, "Could not detect")
|
||||
}
|
||||
@@ -593,44 +620,6 @@ func Test_applyAttestationForValidator_MaxSpanChunk(t *testing.T) {
|
||||
require.NotNil(t, slashing)
|
||||
}
|
||||
|
||||
func Test_checkDoubleVotes_SlashableInputAttestations(t *testing.T) {
|
||||
slasherDB := dbtest.SetupSlasherDB(t)
|
||||
ctx := context.Background()
|
||||
// For a list of input attestations, check that we can
|
||||
// indeed check there could exist a double vote offense
|
||||
// within the list with respect to other entries in the list.
|
||||
atts := []*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(t, 0, 1, []uint64{1, 2}, []byte{1}),
|
||||
createAttestationWrapper(t, 0, 2, []uint64{1, 2}, []byte{1}),
|
||||
createAttestationWrapper(t, 0, 2, []uint64{1, 2}, []byte{2}), // Different signing root.
|
||||
}
|
||||
srv, err := New(context.Background(),
|
||||
&ServiceConfig{
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
prev1 := createAttestationWrapper(t, 0, 2, []uint64{1, 2}, []byte{1})
|
||||
cur1 := createAttestationWrapper(t, 0, 2, []uint64{1, 2}, []byte{2})
|
||||
prev2 := createAttestationWrapper(t, 0, 2, []uint64{1, 2}, []byte{1})
|
||||
cur2 := createAttestationWrapper(t, 0, 2, []uint64{1, 2}, []byte{2})
|
||||
wanted := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: prev1.IndexedAttestation,
|
||||
Attestation_2: cur1.IndexedAttestation,
|
||||
},
|
||||
{
|
||||
Attestation_1: prev2.IndexedAttestation,
|
||||
Attestation_2: cur2.IndexedAttestation,
|
||||
},
|
||||
}
|
||||
slashings, err := srv.checkDoubleVotes(ctx, atts)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wanted, slashings)
|
||||
}
|
||||
|
||||
func Test_checkDoubleVotes_SlashableAttestationsOnDisk(t *testing.T) {
|
||||
slasherDB := dbtest.SetupSlasherDB(t)
|
||||
ctx := context.Background()
|
||||
@@ -787,16 +776,15 @@ func TestService_processQueuedAttestations(t *testing.T) {
|
||||
})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
tickerChan := make(chan primitives.Slot)
|
||||
exitChan := make(chan struct{})
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
s.processQueuedAttestations(ctx, tickerChan)
|
||||
exitChan <- struct{}{}
|
||||
}()
|
||||
|
||||
// Send a value over the ticker.
|
||||
tickerChan <- 1
|
||||
cancel()
|
||||
<-exitChan
|
||||
s.wg.Wait()
|
||||
assert.LogsContain(t, hook, "Processing queued")
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -13,80 +12,70 @@ import (
|
||||
// detectProposerSlashings takes in signed block header wrappers and returns a list of proposer slashings detected.
|
||||
func (s *Service) detectProposerSlashings(
|
||||
ctx context.Context,
|
||||
proposedBlocks []*slashertypes.SignedBlockHeaderWrapper,
|
||||
incomingProposals []*slashertypes.SignedBlockHeaderWrapper,
|
||||
) ([]*ethpb.ProposerSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "slasher.detectProposerSlashings")
|
||||
defer span.End()
|
||||
|
||||
// internalSlashings will contain any slashable double proposals in the input list
|
||||
// of proposals with respect to each other.
|
||||
internalSlashings := []*ethpb.ProposerSlashing{}
|
||||
|
||||
existingProposals := make(map[string]*slashertypes.SignedBlockHeaderWrapper)
|
||||
|
||||
// We check if there are any slashable double proposals in the input list
|
||||
// of proposals with respect to each other.
|
||||
slashings := make([]*ethpb.ProposerSlashing, 0)
|
||||
existingProposals := make(map[string]*slashertypes.SignedBlockHeaderWrapper)
|
||||
for i, proposal := range proposedBlocks {
|
||||
key := proposalKey(proposal)
|
||||
for _, incomingProposal := range incomingProposals {
|
||||
key := proposalKey(incomingProposal)
|
||||
existingProposal, ok := existingProposals[key]
|
||||
|
||||
// If we have not seen this proposal before, we add it to our map of existing proposals
|
||||
// and we continue to the next proposal.
|
||||
if !ok {
|
||||
existingProposals[key] = proposal
|
||||
existingProposals[key] = incomingProposal
|
||||
continue
|
||||
}
|
||||
if isDoubleProposal(proposedBlocks[i].SigningRoot, existingProposal.SigningRoot) {
|
||||
|
||||
// If we have seen this proposal before, we check if it is a double proposal.
|
||||
if isDoubleProposal(incomingProposal.SigningRoot, existingProposal.SigningRoot) {
|
||||
doubleProposalsTotal.Inc()
|
||||
|
||||
slashing := ðpb.ProposerSlashing{
|
||||
Header_1: existingProposal.SignedBeaconBlockHeader,
|
||||
Header_2: proposedBlocks[i].SignedBeaconBlockHeader,
|
||||
Header_2: incomingProposal.SignedBeaconBlockHeader,
|
||||
}
|
||||
slashings = append(slashings, slashing)
|
||||
|
||||
internalSlashings = append(internalSlashings, slashing)
|
||||
}
|
||||
}
|
||||
|
||||
proposerSlashings, err := s.serviceCfg.Database.CheckDoubleBlockProposals(ctx, proposedBlocks)
|
||||
// We check if there are any slashable double proposals in the input list
|
||||
// of proposals with respect to the slasher database.
|
||||
databaseSlashings, err := s.serviceCfg.Database.CheckDoubleBlockProposals(ctx, incomingProposals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not check for double proposals on disk")
|
||||
}
|
||||
if err := s.saveSafeProposals(ctx, proposedBlocks, proposerSlashings); err != nil {
|
||||
|
||||
// We save the safe proposals (with respect to the database) to our database.
|
||||
// If some proposals in incomingProposals are slashable with respect to each other,
|
||||
// we (arbitrarily) save the last one to the database.
|
||||
if err := s.serviceCfg.Database.SaveBlockProposals(ctx, incomingProposals); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save safe proposals")
|
||||
}
|
||||
slashings = append(slashings, proposerSlashings...)
|
||||
return slashings, nil
|
||||
}
|
||||
|
||||
// Check for double proposals in our database given a list of incoming block proposals.
|
||||
// For the proposals that were not slashable, we save them to the database.
|
||||
func (s *Service) saveSafeProposals(
|
||||
ctx context.Context,
|
||||
proposedBlocks []*slashertypes.SignedBlockHeaderWrapper,
|
||||
proposerSlashings []*ethpb.ProposerSlashing,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "slasher.saveSafeProposals")
|
||||
defer span.End()
|
||||
return s.serviceCfg.Database.SaveBlockProposals(
|
||||
ctx,
|
||||
filterSafeProposals(proposedBlocks, proposerSlashings),
|
||||
)
|
||||
}
|
||||
|
||||
func filterSafeProposals(
|
||||
proposedBlocks []*slashertypes.SignedBlockHeaderWrapper,
|
||||
proposerSlashings []*ethpb.ProposerSlashing,
|
||||
) []*slashertypes.SignedBlockHeaderWrapper {
|
||||
// We initialize a map of proposers that are safe from slashing.
|
||||
safeProposers := make(map[primitives.ValidatorIndex]*slashertypes.SignedBlockHeaderWrapper, len(proposedBlocks))
|
||||
for _, proposal := range proposedBlocks {
|
||||
safeProposers[proposal.SignedBeaconBlockHeader.Header.ProposerIndex] = proposal
|
||||
}
|
||||
for _, doubleProposal := range proposerSlashings {
|
||||
// If a proposer is found to have committed a slashable offense, we delete
|
||||
// them from the safe proposers map.
|
||||
delete(safeProposers, doubleProposal.Header_1.Header.ProposerIndex)
|
||||
}
|
||||
// We save all the proposals that are determined "safe" and not-slashable to our database.
|
||||
safeProposals := make([]*slashertypes.SignedBlockHeaderWrapper, 0, len(safeProposers))
|
||||
for _, proposal := range safeProposers {
|
||||
safeProposals = append(safeProposals, proposal)
|
||||
}
|
||||
return safeProposals
|
||||
|
||||
// totalSlashings contain all slashings we have detected.
|
||||
totalSlashings := append(internalSlashings, databaseSlashings...)
|
||||
return totalSlashings, nil
|
||||
}
|
||||
|
||||
// proposalKey build a key which is a combination of the slot and the proposer index.
|
||||
// If a validator proposes several blocks for the same slot, then several (potentially slashable)
|
||||
// proposals will correspond to the same key.
|
||||
func proposalKey(proposal *slashertypes.SignedBlockHeaderWrapper) string {
|
||||
header := proposal.SignedBeaconBlockHeader.Header
|
||||
return uintToString(uint64(header.Slot)) + ":" + uintToString(uint64(header.ProposerIndex))
|
||||
|
||||
slotKey := uintToString(uint64(header.Slot))
|
||||
proposerIndexKey := uintToString(uint64(header.ProposerIndex))
|
||||
|
||||
return slotKey + ":" + proposerIndexKey
|
||||
}
|
||||
|
||||
@@ -22,94 +22,139 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
type wrapped struct {
|
||||
slot primitives.Slot
|
||||
signedBlkHeaders []*slashertypes.SignedBlockHeaderWrapper
|
||||
}
|
||||
|
||||
func Test_processQueuedBlocks_DetectsDoubleProposals(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
slasherDB := dbtest.SetupSlasherDB(t)
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initialize validators in the state.
|
||||
numVals := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, numVals)
|
||||
privKeys := make([]bls.SecretKey, numVals)
|
||||
for i := range validators {
|
||||
privKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = privKey
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: privKey.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
err = beaconState.SetValidators(validators)
|
||||
require.NoError(t, err)
|
||||
domain, err := signing.Domain(
|
||||
beaconState.Fork(),
|
||||
0,
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
beaconState.GenesisValidatorsRoot(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChain := &mock.ChainService{
|
||||
State: beaconState,
|
||||
}
|
||||
s := &Service{
|
||||
serviceCfg: &ServiceConfig{
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
HeadStateFetcher: mockChain,
|
||||
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
SlashingPoolInserter: &slashingsmock.PoolMock{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
testCases := []struct {
|
||||
name string
|
||||
wraps []wrapped
|
||||
}{
|
||||
{
|
||||
name: "detects double proposals in the same batch",
|
||||
wraps: []wrapped{
|
||||
{
|
||||
4,
|
||||
[]*slashertypes.SignedBlockHeaderWrapper{
|
||||
createProposalWrapper(t, 4, 1, []byte{1}),
|
||||
createProposalWrapper(t, 4, 1, []byte{1}),
|
||||
createProposalWrapper(t, 4, 1, []byte{1}),
|
||||
createProposalWrapper(t, 4, 1, []byte{2}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "detects double proposals in the different batches",
|
||||
wraps: []wrapped{
|
||||
{
|
||||
5,
|
||||
[]*slashertypes.SignedBlockHeaderWrapper{
|
||||
createProposalWrapper(t, 4, 1, []byte{1}),
|
||||
createProposalWrapper(t, 5, 1, []byte{1}),
|
||||
},
|
||||
},
|
||||
{
|
||||
6,
|
||||
[]*slashertypes.SignedBlockHeaderWrapper{
|
||||
createProposalWrapper(t, 4, 1, []byte{2}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
params: DefaultParams(),
|
||||
blksQueue: newBlocksQueue(),
|
||||
}
|
||||
|
||||
parentRoot := bytesutil.ToBytes32([]byte("parent"))
|
||||
err = s.serviceCfg.StateGen.SaveState(ctx, parentRoot, beaconState)
|
||||
require.NoError(t, err)
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
slasherDB := dbtest.SetupSlasherDB(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
currentSlotChan := make(chan primitives.Slot)
|
||||
exitChan := make(chan struct{})
|
||||
go func() {
|
||||
s.processQueuedBlocks(ctx, currentSlotChan)
|
||||
exitChan <- struct{}{}
|
||||
}()
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
signedBlkHeaders := []*slashertypes.SignedBlockHeaderWrapper{
|
||||
createProposalWrapper(t, 4, 1, []byte{1}),
|
||||
createProposalWrapper(t, 4, 1, []byte{1}),
|
||||
createProposalWrapper(t, 4, 1, []byte{1}),
|
||||
createProposalWrapper(t, 4, 1, []byte{2}),
|
||||
// Initialize validators in the state.
|
||||
numVals := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, numVals)
|
||||
privKeys := make([]bls.SecretKey, numVals)
|
||||
for i := range validators {
|
||||
privKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = privKey
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: privKey.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
err = beaconState.SetValidators(validators)
|
||||
require.NoError(t, err)
|
||||
domain, err := signing.Domain(
|
||||
beaconState.Fork(),
|
||||
0,
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
beaconState.GenesisValidatorsRoot(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChain := &mock.ChainService{
|
||||
State: beaconState,
|
||||
}
|
||||
s := &Service{
|
||||
serviceCfg: &ServiceConfig{
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
HeadStateFetcher: mockChain,
|
||||
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
SlashingPoolInserter: &slashingsmock.PoolMock{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
},
|
||||
params: DefaultParams(),
|
||||
blksQueue: newBlocksQueue(),
|
||||
}
|
||||
|
||||
parentRoot := bytesutil.ToBytes32([]byte("parent"))
|
||||
err = s.serviceCfg.StateGen.SaveState(ctx, parentRoot, beaconState)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlotChan := make(chan primitives.Slot)
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
s.processQueuedBlocks(ctx, currentSlotChan)
|
||||
}()
|
||||
|
||||
for _, wrap := range tt.wraps {
|
||||
// Add valid signatures to the block headers we are testing.
|
||||
for _, proposalWrapper := range wrap.signedBlkHeaders {
|
||||
proposalWrapper.SignedBeaconBlockHeader.Header.ParentRoot = parentRoot[:]
|
||||
headerHtr, err := proposalWrapper.SignedBeaconBlockHeader.Header.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
container := ðpb.SigningData{
|
||||
ObjectRoot: headerHtr[:],
|
||||
Domain: domain,
|
||||
}
|
||||
|
||||
signingRoot, err := container.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
privKey := privKeys[proposalWrapper.SignedBeaconBlockHeader.Header.ProposerIndex]
|
||||
proposalWrapper.SignedBeaconBlockHeader.Signature = privKey.Sign(signingRoot[:]).Marshal()
|
||||
}
|
||||
|
||||
s.blksQueue.extend(wrap.signedBlkHeaders)
|
||||
|
||||
currentSlot := primitives.Slot(4)
|
||||
currentSlotChan <- currentSlot
|
||||
}
|
||||
|
||||
cancel()
|
||||
s.wg.Wait()
|
||||
require.LogsContain(t, hook, "Proposer slashing detected")
|
||||
})
|
||||
}
|
||||
|
||||
// Add valid signatures to the block headers we are testing.
|
||||
for _, proposalWrapper := range signedBlkHeaders {
|
||||
proposalWrapper.SignedBeaconBlockHeader.Header.ParentRoot = parentRoot[:]
|
||||
headerHtr, err := proposalWrapper.SignedBeaconBlockHeader.Header.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
container := ðpb.SigningData{
|
||||
ObjectRoot: headerHtr[:],
|
||||
Domain: domain,
|
||||
}
|
||||
signingRoot, err := container.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
privKey := privKeys[proposalWrapper.SignedBeaconBlockHeader.Header.ProposerIndex]
|
||||
proposalWrapper.SignedBeaconBlockHeader.Signature = privKey.Sign(signingRoot[:]).Marshal()
|
||||
}
|
||||
|
||||
s.blksQueue.extend(signedBlkHeaders)
|
||||
|
||||
currentSlot := primitives.Slot(4)
|
||||
currentSlotChan <- currentSlot
|
||||
cancel()
|
||||
<-exitChan
|
||||
require.LogsContain(t, hook, "Proposer slashing detected")
|
||||
}
|
||||
|
||||
func Test_processQueuedBlocks_NotSlashable(t *testing.T) {
|
||||
@@ -137,10 +182,9 @@ func Test_processQueuedBlocks_NotSlashable(t *testing.T) {
|
||||
blksQueue: newBlocksQueue(),
|
||||
}
|
||||
currentSlotChan := make(chan primitives.Slot)
|
||||
exitChan := make(chan struct{})
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
s.processQueuedBlocks(ctx, currentSlotChan)
|
||||
exitChan <- struct{}{}
|
||||
}()
|
||||
s.blksQueue.extend([]*slashertypes.SignedBlockHeaderWrapper{
|
||||
createProposalWrapper(t, 4, 1, []byte{1}),
|
||||
@@ -148,7 +192,7 @@ func Test_processQueuedBlocks_NotSlashable(t *testing.T) {
|
||||
})
|
||||
currentSlotChan <- currentSlot
|
||||
cancel()
|
||||
<-exitChan
|
||||
s.wg.Wait()
|
||||
require.LogsDoNotContain(t, hook, "Proposer slashing detected")
|
||||
}
|
||||
|
||||
|
||||
@@ -52,12 +52,12 @@ func (s *Service) groupByChunkIndex(
|
||||
// This function returns a list of valid attestations, a list of attestations that are
|
||||
// valid in the future, and the number of attestations dropped.
|
||||
func (s *Service) filterAttestations(
|
||||
atts []*slashertypes.IndexedAttestationWrapper, currentEpoch primitives.Epoch,
|
||||
attWrappers []*slashertypes.IndexedAttestationWrapper, currentEpoch primitives.Epoch,
|
||||
) (valid, validInFuture []*slashertypes.IndexedAttestationWrapper, numDropped int) {
|
||||
valid = make([]*slashertypes.IndexedAttestationWrapper, 0, len(atts))
|
||||
validInFuture = make([]*slashertypes.IndexedAttestationWrapper, 0, len(atts))
|
||||
valid = make([]*slashertypes.IndexedAttestationWrapper, 0, len(attWrappers))
|
||||
validInFuture = make([]*slashertypes.IndexedAttestationWrapper, 0, len(attWrappers))
|
||||
|
||||
for _, attWrapper := range atts {
|
||||
for _, attWrapper := range attWrappers {
|
||||
if attWrapper == nil || !validateAttestationIntegrity(attWrapper.IndexedAttestation) {
|
||||
numDropped++
|
||||
continue
|
||||
@@ -73,18 +73,19 @@ func (s *Service) filterAttestations(
|
||||
// If an attestations's target epoch is in the future, we defer processing for later.
|
||||
if attWrapper.IndexedAttestation.Data.Target.Epoch > currentEpoch {
|
||||
validInFuture = append(validInFuture, attWrapper)
|
||||
} else {
|
||||
valid = append(valid, attWrapper)
|
||||
continue
|
||||
}
|
||||
|
||||
// The attestation is valid.
|
||||
valid = append(valid, attWrapper)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Validates the attestation data integrity, ensuring we have no nil values for
|
||||
// source, epoch, and that the source epoch of the attestation must be less than
|
||||
// the target epoch, which is a precondition for performing slashing detection.
|
||||
// This function also checks the attestation source epoch is within the history size
|
||||
// we keep track of for slashing detection.
|
||||
// source and target epochs, and that the source epoch of the attestation must
|
||||
// be less than the target epoch, which is a precondition for performing slashing
|
||||
// detection (except for the genesis epoch).
|
||||
func validateAttestationIntegrity(att *ethpb.IndexedAttestation) bool {
|
||||
// If an attestation is malformed, we drop it.
|
||||
if att == nil ||
|
||||
|
||||
@@ -10,16 +10,10 @@ import (
|
||||
// To properly access the element at epoch `e` for a validator index `i`, we leverage helper
|
||||
// functions from these parameter values as nice abstractions. the following parameters are
|
||||
// required for the helper functions defined in this file.
|
||||
//
|
||||
// (C) chunkSize defines how many elements are in a chunk for a validator
|
||||
// min or max span slice.
|
||||
// (K) validatorChunkSize defines how many validators' chunks we store in a single
|
||||
// flat byte slice on disk.
|
||||
// (H) historyLength defines how many epochs we keep of min or max spans.
|
||||
type Parameters struct {
|
||||
chunkSize uint64
|
||||
validatorChunkSize uint64
|
||||
historyLength primitives.Epoch
|
||||
chunkSize uint64 // C - defines how many elements are in a chunk for a validator min or max span slice.
|
||||
validatorChunkSize uint64 // K - defines how many validators' chunks we store in a single flat byte slice on disk.
|
||||
historyLength primitives.Epoch // H - defines how many epochs we keep of min or max spans.
|
||||
}
|
||||
|
||||
// DefaultParams defines default values for slasher's important parameters, defined
|
||||
@@ -98,8 +92,8 @@ func (p *Parameters) lastEpoch(chunkIndex uint64) primitives.Epoch {
|
||||
// with (validatorIndex % K)*C + (epoch % C), which gives us:
|
||||
//
|
||||
// (2 % 3)*3 + (1 % 3) =
|
||||
// (2*3) + (1) =
|
||||
// 7
|
||||
// 2*3 + 1 =
|
||||
// 7
|
||||
//
|
||||
// val0 val1 val2
|
||||
// | | |
|
||||
|
||||
@@ -12,10 +12,18 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
couldNotSaveAttRecord = "Could not save attestation records to DB"
|
||||
couldNotCheckSlashableAtt = "Could not check slashable attestations"
|
||||
couldNotProcessAttesterSlashings = "Could not process attester slashings"
|
||||
)
|
||||
|
||||
// Receive indexed attestations from some source event feed,
|
||||
// validating their integrity before appending them to an attestation queue
|
||||
// for batch processing in a separate routine.
|
||||
func (s *Service) receiveAttestations(ctx context.Context, indexedAttsChan chan *ethpb.IndexedAttestation) {
|
||||
defer s.wg.Done()
|
||||
|
||||
sub := s.serviceCfg.IndexedAttestationsFeed.Subscribe(indexedAttsChan)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
@@ -45,6 +53,8 @@ func (s *Service) receiveAttestations(ctx context.Context, indexedAttsChan chan
|
||||
|
||||
// Receive beacon blocks from some source event feed,
|
||||
func (s *Service) receiveBlocks(ctx context.Context, beaconBlockHeadersChan chan *ethpb.SignedBeaconBlockHeader) {
|
||||
defer s.wg.Done()
|
||||
|
||||
sub := s.serviceCfg.BeaconBlockHeadersFeed.Subscribe(beaconBlockHeadersChan)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
@@ -77,6 +87,8 @@ func (s *Service) receiveBlocks(ctx context.Context, beaconBlockHeadersChan chan
|
||||
// This grouping will allow us to perform detection on batches of attestations
|
||||
// per validator chunk index which can be done concurrently.
|
||||
func (s *Service) processQueuedAttestations(ctx context.Context, slotTicker <-chan primitives.Slot) {
|
||||
defer s.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case currentSlot := <-slotTicker:
|
||||
@@ -101,24 +113,26 @@ func (s *Service) processQueuedAttestations(ctx context.Context, slotTicker <-ch
|
||||
}).Info("Processing queued attestations for slashing detection")
|
||||
|
||||
// Save the attestation records to our database.
|
||||
// If multiple attestations are provided for the same validator index + target epoch combination,
|
||||
// then last (validator index + target epoch) => signing root) link is kept into the database.
|
||||
if err := s.serviceCfg.Database.SaveAttestationRecordsForValidators(
|
||||
ctx, validAtts,
|
||||
); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation records to DB")
|
||||
log.WithError(err).Error(couldNotSaveAttRecord)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for slashings.
|
||||
slashings, err := s.checkSlashableAttestations(ctx, currentEpoch, validAtts)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not check slashable attestations")
|
||||
log.WithError(err).Error(couldNotCheckSlashableAtt)
|
||||
continue
|
||||
}
|
||||
|
||||
// Process attester slashings by verifying their signatures, submitting
|
||||
// to the beacon node's operations pool, and logging them.
|
||||
if err := s.processAttesterSlashings(ctx, slashings); err != nil {
|
||||
log.WithError(err).Error("Could not process attester slashings")
|
||||
log.WithError(err).Error(couldNotProcessAttesterSlashings)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -132,6 +146,8 @@ func (s *Service) processQueuedAttestations(ctx context.Context, slotTicker <-ch
|
||||
// Process queued blocks every time an epoch ticker fires. We retrieve
|
||||
// these blocks from a queue, then perform double proposal detection.
|
||||
func (s *Service) processQueuedBlocks(ctx context.Context, slotTicker <-chan primitives.Slot) {
|
||||
defer s.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case currentSlot := <-slotTicker:
|
||||
@@ -172,6 +188,8 @@ func (s *Service) processQueuedBlocks(ctx context.Context, slotTicker <-chan pri
|
||||
|
||||
// Prunes slasher data on each slot tick to prevent unnecessary build-up of disk space usage.
|
||||
func (s *Service) pruneSlasherData(ctx context.Context, slotTicker <-chan primitives.Slot) {
|
||||
defer s.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker:
|
||||
|
||||
@@ -32,10 +32,9 @@ func TestSlasher_receiveAttestations_OK(t *testing.T) {
|
||||
indexedAttsChan := make(chan *ethpb.IndexedAttestation)
|
||||
defer close(indexedAttsChan)
|
||||
|
||||
exitChan := make(chan struct{})
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
s.receiveAttestations(ctx, indexedAttsChan)
|
||||
exitChan <- struct{}{}
|
||||
}()
|
||||
firstIndices := []uint64{1, 2, 3}
|
||||
secondIndices := []uint64{4, 5, 6}
|
||||
@@ -44,7 +43,7 @@ func TestSlasher_receiveAttestations_OK(t *testing.T) {
|
||||
indexedAttsChan <- att1.IndexedAttestation
|
||||
indexedAttsChan <- att2.IndexedAttestation
|
||||
cancel()
|
||||
<-exitChan
|
||||
s.wg.Wait()
|
||||
wanted := []*slashertypes.IndexedAttestationWrapper{
|
||||
att1,
|
||||
att2,
|
||||
@@ -216,11 +215,9 @@ func TestSlasher_receiveAttestations_OnlyValidAttestations(t *testing.T) {
|
||||
indexedAttsChan := make(chan *ethpb.IndexedAttestation)
|
||||
defer close(indexedAttsChan)
|
||||
|
||||
exitChan := make(chan struct{})
|
||||
defer close(exitChan)
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
s.receiveAttestations(ctx, indexedAttsChan)
|
||||
exitChan <- struct{}{}
|
||||
}()
|
||||
firstIndices := []uint64{1, 2, 3}
|
||||
secondIndices := []uint64{4, 5, 6}
|
||||
@@ -233,7 +230,7 @@ func TestSlasher_receiveAttestations_OnlyValidAttestations(t *testing.T) {
|
||||
AttestingIndices: secondIndices,
|
||||
}
|
||||
cancel()
|
||||
<-exitChan
|
||||
s.wg.Wait()
|
||||
// Expect only a single, valid attestation was added to the queue.
|
||||
require.Equal(t, 1, s.attsQueue.size())
|
||||
wanted := []*slashertypes.IndexedAttestationWrapper{
|
||||
@@ -254,10 +251,9 @@ func TestSlasher_receiveBlocks_OK(t *testing.T) {
|
||||
}
|
||||
beaconBlockHeadersChan := make(chan *ethpb.SignedBeaconBlockHeader)
|
||||
defer close(beaconBlockHeadersChan)
|
||||
exitChan := make(chan struct{})
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
s.receiveBlocks(ctx, beaconBlockHeadersChan)
|
||||
exitChan <- struct{}{}
|
||||
}()
|
||||
|
||||
block1 := createProposalWrapper(t, 0, 1, nil).SignedBeaconBlockHeader
|
||||
@@ -265,7 +261,7 @@ func TestSlasher_receiveBlocks_OK(t *testing.T) {
|
||||
beaconBlockHeadersChan <- block1
|
||||
beaconBlockHeadersChan <- block2
|
||||
cancel()
|
||||
<-exitChan
|
||||
s.wg.Wait()
|
||||
wanted := []*slashertypes.SignedBlockHeaderWrapper{
|
||||
createProposalWrapper(t, 0, block1.Header.ProposerIndex, nil),
|
||||
createProposalWrapper(t, 0, block2.Header.ProposerIndex, nil),
|
||||
@@ -301,15 +297,14 @@ func TestService_processQueuedBlocks(t *testing.T) {
|
||||
})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
tickerChan := make(chan primitives.Slot)
|
||||
exitChan := make(chan struct{})
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
s.processQueuedBlocks(ctx, tickerChan)
|
||||
exitChan <- struct{}{}
|
||||
}()
|
||||
|
||||
// Send a value over the ticker.
|
||||
tickerChan <- 0
|
||||
cancel()
|
||||
<-exitChan
|
||||
s.wg.Wait()
|
||||
assert.LogsContain(t, hook, "Processing queued")
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package slasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
beaconChainSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -38,7 +39,7 @@ type ServiceConfig struct {
|
||||
StateGen stategen.StateManager
|
||||
SlashingPoolInserter slashings.PoolInserter
|
||||
HeadStateFetcher blockchain.HeadFetcher
|
||||
SyncChecker sync.Checker
|
||||
SyncChecker beaconChainSync.Checker
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
@@ -67,6 +68,7 @@ type Service struct {
|
||||
blocksSlotTicker *slots.SlotTicker
|
||||
pruningSlotTicker *slots.SlotTicker
|
||||
latestEpochWrittenForValidator map[primitives.ValidatorIndex]primitives.Epoch
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// New instantiates a new slasher from configuration values.
|
||||
@@ -126,21 +128,33 @@ func (s *Service) run() {
|
||||
|
||||
indexedAttsChan := make(chan *ethpb.IndexedAttestation, 1)
|
||||
beaconBlockHeadersChan := make(chan *ethpb.SignedBeaconBlockHeader, 1)
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.receiveAttestations(s.ctx, indexedAttsChan)
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.receiveBlocks(s.ctx, beaconBlockHeadersChan)
|
||||
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
s.attsSlotTicker = slots.NewSlotTicker(s.genesisTime, secondsPerSlot)
|
||||
s.blocksSlotTicker = slots.NewSlotTicker(s.genesisTime, secondsPerSlot)
|
||||
s.pruningSlotTicker = slots.NewSlotTicker(s.genesisTime, secondsPerSlot)
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.processQueuedAttestations(s.ctx, s.attsSlotTicker.C())
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.processQueuedBlocks(s.ctx, s.blocksSlotTicker.C())
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.pruneSlasherData(s.ctx, s.pruningSlotTicker.C())
|
||||
}
|
||||
|
||||
// Stop the slasher service.
|
||||
func (s *Service) Stop() error {
|
||||
s.cancel()
|
||||
s.wg.Wait()
|
||||
|
||||
if s.attsSlotTicker != nil {
|
||||
s.attsSlotTicker.Done()
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func NewFieldTrie(field types.FieldIndex, fieldInfo types.DataType, elements int
|
||||
if err := validateElements(field, fieldInfo, elements, length); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
numOfElems := 0
|
||||
var numOfElems int
|
||||
if val, ok := elements.(sliceAccessor); ok {
|
||||
numOfElems = val.Len(val.State())
|
||||
} else {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
@@ -22,10 +24,10 @@ func (b *BeaconState) LatestExecutionPayloadHeader() (interfaces.ExecutionData,
|
||||
}
|
||||
|
||||
if b.version == version.Capella {
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapellaVal(), 0)
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapellaVal(), big.NewInt(0))
|
||||
}
|
||||
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.latestExecutionPayloadHeaderDenebVal(), 0)
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.latestExecutionPayloadHeaderDenebVal(), big.NewInt(0))
|
||||
}
|
||||
|
||||
// latestExecutionPayloadHeaderVal of the beacon state.
|
||||
|
||||
@@ -2,6 +2,7 @@ package state_native_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
@@ -253,7 +254,7 @@ func TestComputeFieldRootsWithHasher_Capella(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetInactivityScores([]uint64{1, 2, 3}))
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee("current")))
|
||||
require.NoError(t, beaconState.SetNextSyncCommittee(syncCommittee("next")))
|
||||
wrappedHeader, err := blocks.WrappedExecutionPayloadHeaderCapella(executionPayloadHeaderCapella(), 0)
|
||||
wrappedHeader, err := blocks.WrappedExecutionPayloadHeaderCapella(executionPayloadHeaderCapella(), big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
require.NoError(t, beaconState.SetNextWithdrawalIndex(123))
|
||||
|
||||
@@ -54,6 +54,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
|
||||
@@ -227,6 +227,10 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.ms.setClock(clock)
|
||||
|
||||
if s.store.isGenesisSync() {
|
||||
log.Info("Exiting backfill service as the node has been initialized with a genesis state or the backfill status is missing")
|
||||
return
|
||||
}
|
||||
status := s.store.status()
|
||||
// Exit early if there aren't going to be any batches to backfill.
|
||||
if primitives.Slot(status.LowSlot) <= s.ms.minimumSlot() {
|
||||
@@ -293,8 +297,10 @@ func minimumBackfillSlot(current primitives.Slot) primitives.Slot {
|
||||
oe = slots.MaxSafeEpoch()
|
||||
}
|
||||
offset := slots.UnsafeEpochStart(oe)
|
||||
if offset > current {
|
||||
return 0
|
||||
if offset >= current {
|
||||
// Slot 0 is the genesis block, therefore the signature in it is invalid.
|
||||
// To prevent us from rejecting a batch, we restrict the minimum backfill batch till only slot 1
|
||||
return 1
|
||||
}
|
||||
return current - offset
|
||||
}
|
||||
|
||||
@@ -5,9 +5,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -75,6 +77,20 @@ func TestServiceInit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinimumBackfillSlot(t *testing.T) {
|
||||
oe := helpers.MinEpochsForBlockRequests()
|
||||
|
||||
currSlot := (oe + 100).Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
minSlot := minimumBackfillSlot(primitives.Slot(currSlot))
|
||||
require.Equal(t, 100*params.BeaconConfig().SlotsPerEpoch, minSlot)
|
||||
|
||||
oe = helpers.MinEpochsForBlockRequests()
|
||||
|
||||
currSlot = oe.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
minSlot = minimumBackfillSlot(primitives.Slot(currSlot))
|
||||
require.Equal(t, primitives.Slot(1), minSlot)
|
||||
}
|
||||
|
||||
func testReadN(t *testing.T, ctx context.Context, c chan batch, n int, into []batch) []batch {
|
||||
for i := 0; i < n; i++ {
|
||||
select {
|
||||
|
||||
@@ -149,6 +149,12 @@ func (s *Store) swapStatus(bs *dbval.BackfillStatus) {
|
||||
s.bs = bs
|
||||
}
|
||||
|
||||
func (s *Store) isGenesisSync() bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.genesisSync
|
||||
}
|
||||
|
||||
// originState looks up the state for the checkpoint sync origin. This is a hack, because StatusUpdater is the only
|
||||
// thing that needs db access and it has the origin root handy, so it's convenient to look it up here. The state is
|
||||
// needed by the verifier.
|
||||
|
||||
@@ -40,11 +40,13 @@ func (w *p2pWorker) handle(ctx context.Context, b batch) batch {
|
||||
dlt := time.Now()
|
||||
backfillBatchTimeDownloading.Observe(float64(dlt.Sub(start).Milliseconds()))
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(b.logFields()).Debug("Batch requesting failed")
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
vb, err := w.v.verify(results)
|
||||
backfillBatchTimeVerifying.Observe(float64(time.Since(dlt).Milliseconds()))
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(b.logFields()).Debug("Batch validation failed")
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
// This is a hack to get the rough size of the batch. This helps us approximate the amount of memory needed
|
||||
|
||||
@@ -2,6 +2,7 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -80,7 +82,10 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
sc, err := s.cfg.blobStorage.Get(root, idx)
|
||||
if err != nil {
|
||||
if db.IsNotFound(err) {
|
||||
log.WithError(err).Debugf("BlobSidecar not found in db, root=%x, index=%d", root, idx)
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"index": idx,
|
||||
}).Debugf("Peer requested blob sidecar by root not found in db")
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Errorf("unexpected db error retrieving BlobSidecar, root=%x, index=%d", root, idx)
|
||||
|
||||
@@ -210,7 +210,7 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
|
||||
"error": err,
|
||||
}).Debug("Invalid status message from peer")
|
||||
|
||||
respCode := byte(0)
|
||||
var respCode byte
|
||||
switch err {
|
||||
case p2ptypes.ErrGeneric:
|
||||
respCode = responseCodeServerError
|
||||
|
||||
@@ -257,7 +257,8 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, pubsub.ValidatorEx) {
|
||||
return topic, func(ctx context.Context, pid peer.ID, msg *pubsub.Message) (res pubsub.ValidationResult) {
|
||||
defer messagehandler.HandlePanic(ctx, msg)
|
||||
res = pubsub.ValidationIgnore // Default: ignore any message that panics.
|
||||
// Default: ignore any message that panics.
|
||||
res = pubsub.ValidationIgnore // nolint:wastedassign
|
||||
ctx, cancel := context.WithTimeout(ctx, pubsubMessageTimeout)
|
||||
defer cancel()
|
||||
messageReceivedCounter.WithLabelValues(topic).Inc()
|
||||
@@ -781,10 +782,8 @@ func isDigestValid(digest [4]byte, genesis time.Time, genValRoot [32]byte) (bool
|
||||
}
|
||||
|
||||
func agentString(pid peer.ID, hst host.Host) string {
|
||||
agString := ""
|
||||
ok := false
|
||||
rawVersion, storeErr := hst.Peerstore().Get(pid, "AgentVersion")
|
||||
agString, ok = rawVersion.(string)
|
||||
agString, ok := rawVersion.(string)
|
||||
if storeErr != nil || !ok {
|
||||
agString = ""
|
||||
}
|
||||
|
||||
@@ -227,8 +227,8 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
logFields["validationTime"] = validationTime
|
||||
log.WithFields(logFields).Debug("Received block")
|
||||
|
||||
blockArrivalGossipSummary.Observe(float64(sinceSlotStartTime))
|
||||
blockVerificationGossipSummary.Observe(float64(validationTime))
|
||||
blockArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
|
||||
blockVerificationGossipSummary.Observe(float64(validationTime.Milliseconds()))
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
@@ -241,36 +241,11 @@ func (s *Service) validateBeaconBlock(ctx context.Context, blk interfaces.ReadOn
|
||||
return err
|
||||
}
|
||||
|
||||
if !s.cfg.chain.InForkchoice(blk.Block().ParentRoot()) {
|
||||
s.setBadBlock(ctx, blockRoot)
|
||||
return blockchain.ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
parentState, err := s.cfg.stateGen.StateByRoot(ctx, blk.Block().ParentRoot())
|
||||
parentState, err := s.validatePhase0Block(ctx, blk, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := blocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk); err != nil {
|
||||
s.setBadBlock(ctx, blockRoot)
|
||||
return err
|
||||
}
|
||||
// In the event the block is more than an epoch ahead from its
|
||||
// parent state, we have to advance the state forward.
|
||||
parentRoot := blk.Block().ParentRoot()
|
||||
parentState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, parentState, parentRoot[:], blk.Block().Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, parentState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blk.Block().ProposerIndex() != idx {
|
||||
s.setBadBlock(ctx, blockRoot)
|
||||
return errors.New("incorrect proposer index")
|
||||
}
|
||||
|
||||
if err = s.validateBellatrixBeaconBlock(ctx, parentState, blk.Block()); err != nil {
|
||||
if errors.Is(err, ErrOptimisticParent) {
|
||||
return err
|
||||
@@ -282,6 +257,42 @@ func (s *Service) validateBeaconBlock(ctx context.Context, blk interfaces.ReadOn
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validates beacon block according to phase 0 validity conditions.
|
||||
// - Checks that the parent is in our forkchoice tree.
|
||||
// - Validates that the proposer signature is valid.
|
||||
// - Validates that the proposer index is valid.
|
||||
func (s *Service) validatePhase0Block(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
if !s.cfg.chain.InForkchoice(blk.Block().ParentRoot()) {
|
||||
s.setBadBlock(ctx, blockRoot)
|
||||
return nil, blockchain.ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
parentState, err := s.cfg.stateGen.StateByRoot(ctx, blk.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := blocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// In the event the block is more than an epoch ahead from its
|
||||
// parent state, we have to advance the state forward.
|
||||
parentRoot := blk.Block().ParentRoot()
|
||||
parentState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, parentState, parentRoot[:], blk.Block().Slot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, parentState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blk.Block().ProposerIndex() != idx {
|
||||
s.setBadBlock(ctx, blockRoot)
|
||||
return nil, errors.New("incorrect proposer index")
|
||||
}
|
||||
return parentState, nil
|
||||
}
|
||||
|
||||
func validateDenebBeaconBlock(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
|
||||
@@ -22,6 +22,7 @@ go_library(
|
||||
"//cmd/beacon-chain/jwt:go_default_library",
|
||||
"//cmd/beacon-chain/storage:go_default_library",
|
||||
"//cmd/beacon-chain/sync/backfill:go_default_library",
|
||||
"//cmd/beacon-chain/sync/backfill/flags:go_default_library",
|
||||
"//cmd/beacon-chain/sync/checkpoint:go_default_library",
|
||||
"//cmd/beacon-chain/sync/genesis:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
|
||||
@@ -171,7 +171,7 @@ var (
|
||||
BlobBatchLimit = &cli.IntFlag{
|
||||
Name: "blob-batch-limit",
|
||||
Usage: "The amount of blobs the local peer is bounded to request and respond to in a batch.",
|
||||
Value: 8,
|
||||
Value: 64,
|
||||
}
|
||||
// BlobBatchLimitBurstFactor specifies the factor by which blob batch size may increase.
|
||||
BlobBatchLimitBurstFactor = &cli.IntFlag{
|
||||
|
||||
@@ -20,7 +20,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
jwtcommands "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/jwt"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/storage"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/backfill"
|
||||
backfill "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/backfill"
|
||||
bflags "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/backfill/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/checkpoint"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/genesis"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -139,9 +140,9 @@ var appFlags = []cli.Flag{
|
||||
flags.JwtId,
|
||||
storage.BlobStoragePathFlag,
|
||||
storage.BlobRetentionEpochFlag,
|
||||
backfill.EnableExperimentalBackfill,
|
||||
backfill.BackfillBatchSize,
|
||||
backfill.BackfillWorkerCount,
|
||||
bflags.EnableExperimentalBackfill,
|
||||
bflags.BackfillBatchSize,
|
||||
bflags.BackfillWorkerCount,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//beacon-chain/sync/backfill:go_default_library",
|
||||
"//cmd/beacon-chain/sync/backfill/flags:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
9
cmd/beacon-chain/sync/backfill/flags/BUILD.bazel
Normal file
9
cmd/beacon-chain/sync/backfill/flags/BUILD.bazel
Normal file
@@ -0,0 +1,9 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["flags.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/backfill/flags",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_urfave_cli_v2//:go_default_library"],
|
||||
)
|
||||
38
cmd/beacon-chain/sync/backfill/flags/flags.go
Normal file
38
cmd/beacon-chain/sync/backfill/flags/flags.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
backfillBatchSizeName = "backfill-batch-size"
|
||||
backfillWorkerCountName = "backfill-worker-count"
|
||||
|
||||
// EnableExperimentalBackfill enables backfill for checkpoint synced nodes.
|
||||
// This flag will be removed onced backfill is enabled by default.
|
||||
EnableExperimentalBackfill = &cli.BoolFlag{
|
||||
Name: "enable-experimental-backfill",
|
||||
Usage: "Backfill is still experimental at this time." +
|
||||
"It will only be enabled if this flag is specified and the node was started using checkpoint sync.",
|
||||
}
|
||||
// BackfillBatchSize allows users to tune block backfill request sizes to maximize network utilization
|
||||
// at the cost of higher memory.
|
||||
BackfillBatchSize = &cli.Uint64Flag{
|
||||
Name: backfillBatchSizeName,
|
||||
Usage: "Number of blocks per backfill batch. " +
|
||||
"A larger number will request more blocks at once from peers, but also consume more system memory to " +
|
||||
"hold batches in memory during processing. This has a multiplicative effect with " + backfillWorkerCountName,
|
||||
Value: 64,
|
||||
}
|
||||
// BackfillWorkerCount allows users to tune the number of concurrent backfill batches to download, to maximize
|
||||
// network utilization at the cost of higher memory.
|
||||
BackfillWorkerCount = &cli.IntFlag{
|
||||
Name: backfillWorkerCountName,
|
||||
Usage: "Number of concurrent backfill batch requests. " +
|
||||
"A larger number will better utilize network resources, up to a system-dependent limit, but will also " +
|
||||
"consume more system memory to hold batches in memory during processing. Multiply by backfill-batch-size and " +
|
||||
"average block size (~2MB before deneb) to find the right number for your system. " +
|
||||
"This has a multiplicatice effect with " + backfillBatchSizeName,
|
||||
Value: 2,
|
||||
}
|
||||
)
|
||||
@@ -3,49 +3,18 @@ package backfill
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/node"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/backfill/flags"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
backfillBatchSizeName = "backfill-batch-size"
|
||||
backfillWorkerCountName = "backfill-worker-count"
|
||||
// EnableExperimentalBackfill enables backfill for checkpoint synced nodes.
|
||||
// This flag will be removed onced backfill is enabled by default.
|
||||
EnableExperimentalBackfill = &cli.BoolFlag{
|
||||
Name: "enable-experimental-backfill",
|
||||
Usage: "Backfill is still experimental at this time." +
|
||||
"It will only be enabled if this flag is specified and the node was started using checkpoint sync.",
|
||||
}
|
||||
// BackfillBatchSize allows users to tune block backfill request sizes to maximize network utilization
|
||||
// at the cost of higher memory.
|
||||
BackfillBatchSize = &cli.Uint64Flag{
|
||||
Name: backfillBatchSizeName,
|
||||
Usage: "Number of blocks per backfill batch. " +
|
||||
"A larger number will request more blocks at once from peers, but also consume more system memory to " +
|
||||
"hold batches in memory during processing. This has a multiplicative effect with " + backfillWorkerCountName,
|
||||
Value: 64,
|
||||
}
|
||||
// BackfillWorkerCount allows users to tune the number of concurrent backfill batches to download, to maximize
|
||||
// network utilization at the cost of higher memory.
|
||||
BackfillWorkerCount = &cli.IntFlag{
|
||||
Name: backfillWorkerCountName,
|
||||
Usage: "Number of concurrent backfill batch requests. " +
|
||||
"A larger number will better utilize network resources, up to a system-dependent limit, but will also " +
|
||||
"consume more system memory to hold batches in memory during processing. Multiply by backfill-batch-size and " +
|
||||
"average block size (~2MB before deneb) to find the right number for your system. " +
|
||||
"This has a multiplicatice effect with " + backfillBatchSizeName,
|
||||
Value: 2,
|
||||
}
|
||||
)
|
||||
|
||||
// BeaconNodeOptions sets the appropriate functional opts on the *node.BeaconNode value, to decouple options
|
||||
// from flag parsing.
|
||||
func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
|
||||
opt := func(node *node.BeaconNode) (err error) {
|
||||
node.BackfillOpts = []backfill.ServiceOption{
|
||||
backfill.WithBatchSize(c.Uint64(BackfillBatchSize.Name)),
|
||||
backfill.WithWorkerCount(c.Int(BackfillWorkerCount.Name)),
|
||||
backfill.WithEnableBackfill(c.Bool(EnableExperimentalBackfill.Name)),
|
||||
backfill.WithBatchSize(c.Uint64(flags.BackfillBatchSize.Name)),
|
||||
backfill.WithWorkerCount(c.Int(flags.BackfillWorkerCount.Name)),
|
||||
backfill.WithEnableBackfill(c.Bool(flags.EnableExperimentalBackfill.Name)),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/storage"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/backfill"
|
||||
backfill "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/backfill/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/checkpoint"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/genesis"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/beacon-chain/sync/backfill/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
|
||||
@@ -223,9 +223,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(enableFullSSZDataLogging)
|
||||
cfg.EnableFullSSZDataLogging = true
|
||||
}
|
||||
if ctx.IsSet(enableVerboseSigVerification.Name) {
|
||||
logEnabled(enableVerboseSigVerification)
|
||||
cfg.EnableVerboseSigVerification = true
|
||||
cfg.EnableVerboseSigVerification = true
|
||||
if ctx.IsSet(disableVerboseSigVerification.Name) {
|
||||
logEnabled(disableVerboseSigVerification)
|
||||
cfg.EnableVerboseSigVerification = false
|
||||
}
|
||||
if ctx.IsSet(prepareAllPayloads.Name) {
|
||||
logEnabled(prepareAllPayloads)
|
||||
@@ -235,9 +236,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(disableResourceManager)
|
||||
cfg.DisableResourceManager = true
|
||||
}
|
||||
if ctx.IsSet(EnableEIP4881.Name) {
|
||||
logEnabled(EnableEIP4881)
|
||||
cfg.EnableEIP4881 = true
|
||||
cfg.EnableEIP4881 = true
|
||||
if ctx.IsSet(DisableEIP4881.Name) {
|
||||
logEnabled(DisableEIP4881)
|
||||
cfg.EnableEIP4881 = false
|
||||
}
|
||||
if ctx.IsSet(EnableLightClient.Name) {
|
||||
logEnabled(EnableLightClient)
|
||||
|
||||
@@ -68,6 +68,16 @@ var (
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedEnableEIP4881 = &cli.BoolFlag{
|
||||
Name: "enable-eip-4881",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedVerboseSigVerification = &cli.BoolFlag{
|
||||
Name: "enable-verbose-sig-verification",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
// Deprecated flags for both the beacon node and validator client.
|
||||
@@ -84,6 +94,8 @@ var deprecatedFlags = []cli.Flag{
|
||||
deprecatedDisableReorgLateBlocks,
|
||||
deprecatedDisableOptionalEngineMethods,
|
||||
deprecatedDisableAggregateParallel,
|
||||
deprecatedEnableEIP4881,
|
||||
deprecatedVerboseSigVerification,
|
||||
}
|
||||
|
||||
// deprecatedBeaconFlags contains flags that are still used by other components
|
||||
|
||||
@@ -3,6 +3,7 @@ package features
|
||||
import (
|
||||
"time"
|
||||
|
||||
backfill "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/backfill/flags"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -128,17 +129,17 @@ var (
|
||||
Name: "enable-beacon-rest-api",
|
||||
Usage: "(Experimental): Enables of the beacon REST API when querying a beacon node.",
|
||||
}
|
||||
enableVerboseSigVerification = &cli.BoolFlag{
|
||||
Name: "enable-verbose-sig-verification",
|
||||
Usage: "Enables identifying invalid signatures if batch verification fails when processing block.",
|
||||
disableVerboseSigVerification = &cli.BoolFlag{
|
||||
Name: "disable-verbose-sig-verification",
|
||||
Usage: "Disables identifying invalid signatures if batch verification fails when processing block.",
|
||||
}
|
||||
prepareAllPayloads = &cli.BoolFlag{
|
||||
Name: "prepare-all-payloads",
|
||||
Usage: "Informs the engine to prepare all local payloads. Useful for relayers and builders.",
|
||||
}
|
||||
EnableEIP4881 = &cli.BoolFlag{
|
||||
Name: "enable-eip-4881",
|
||||
Usage: "Enables the deposit tree specified in EIP-4881.",
|
||||
DisableEIP4881 = &cli.BoolFlag{
|
||||
Name: "disable-eip-4881",
|
||||
Usage: "Disables the deposit tree specified in EIP-4881.",
|
||||
}
|
||||
EnableLightClient = &cli.BoolFlag{
|
||||
Name: "enable-lightclient",
|
||||
@@ -158,9 +159,8 @@ var (
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
var devModeFlags = []cli.Flag{
|
||||
enableVerboseSigVerification,
|
||||
EnableEIP4881,
|
||||
enableExperimentalState,
|
||||
backfill.EnableExperimentalBackfill,
|
||||
}
|
||||
|
||||
// ValidatorFlags contains a list of all the feature flags that apply to the validator client.
|
||||
@@ -200,12 +200,12 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
|
||||
SaveFullExecutionPayloads,
|
||||
enableStartupOptimistic,
|
||||
enableFullSSZDataLogging,
|
||||
enableVerboseSigVerification,
|
||||
disableVerboseSigVerification,
|
||||
prepareAllPayloads,
|
||||
aggregateFirstInterval,
|
||||
aggregateSecondInterval,
|
||||
aggregateThirdInterval,
|
||||
EnableEIP4881,
|
||||
DisableEIP4881,
|
||||
disableResourceManager,
|
||||
DisableRegistrationCache,
|
||||
EnableLightClient,
|
||||
|
||||
@@ -187,6 +187,11 @@ func (executionPayload) PbDeneb() (*enginev1.ExecutionPayloadDeneb, error) {
|
||||
return nil, consensus_types.ErrUnsupportedField
|
||||
}
|
||||
|
||||
// ValueInWei --
|
||||
func (executionPayload) ValueInWei() (math.Wei, error) {
|
||||
return nil, consensus_types.ErrUnsupportedField
|
||||
}
|
||||
|
||||
// ValueInGwei --
|
||||
func (executionPayload) ValueInGwei() (uint64, error) {
|
||||
return 0, consensus_types.ErrUnsupportedField
|
||||
@@ -363,6 +368,11 @@ func (executionPayloadHeader) PbBellatrix() (*enginev1.ExecutionPayload, error)
|
||||
return nil, consensus_types.ErrUnsupportedField
|
||||
}
|
||||
|
||||
// ValueInWei --
|
||||
func (executionPayloadHeader) ValueInWei() (math.Wei, error) {
|
||||
return nil, consensus_types.ErrUnsupportedField
|
||||
}
|
||||
|
||||
// ValueInGwei --
|
||||
func (executionPayloadHeader) ValueInGwei() (uint64, error) {
|
||||
return 0, consensus_types.ErrUnsupportedField
|
||||
@@ -400,13 +410,14 @@ func PayloadToHeader(payload interfaces.ExecutionData) (*enginev1.ExecutionPaylo
|
||||
// This wrapper allows us to conform to a common interface so that beacon
|
||||
// blocks for future forks can also be applied across Prysm without issues.
|
||||
type executionPayloadCapella struct {
|
||||
p *enginev1.ExecutionPayloadCapella
|
||||
value uint64
|
||||
p *enginev1.ExecutionPayloadCapella
|
||||
weiValue math.Wei
|
||||
gweiValue uint64
|
||||
}
|
||||
|
||||
// WrappedExecutionPayloadCapella is a constructor which wraps a protobuf execution payload into an interface.
|
||||
func WrappedExecutionPayloadCapella(p *enginev1.ExecutionPayloadCapella, value math.Gwei) (interfaces.ExecutionData, error) {
|
||||
w := executionPayloadCapella{p: p, value: uint64(value)}
|
||||
func WrappedExecutionPayloadCapella(p *enginev1.ExecutionPayloadCapella, value math.Wei) (interfaces.ExecutionData, error) {
|
||||
w := executionPayloadCapella{p: p, weiValue: value, gweiValue: uint64(math.WeiToGwei(value))}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
@@ -568,22 +579,28 @@ func (executionPayloadCapella) PbBellatrix() (*enginev1.ExecutionPayload, error)
|
||||
return nil, consensus_types.ErrUnsupportedField
|
||||
}
|
||||
|
||||
// ValueInWei --
|
||||
func (e executionPayloadCapella) ValueInWei() (math.Wei, error) {
|
||||
return e.weiValue, nil
|
||||
}
|
||||
|
||||
// ValueInGwei --
|
||||
func (e executionPayloadCapella) ValueInGwei() (uint64, error) {
|
||||
return e.value, nil
|
||||
return e.gweiValue, nil
|
||||
}
|
||||
|
||||
// executionPayloadHeaderCapella is a convenience wrapper around a blinded beacon block body's execution header data structure
|
||||
// This wrapper allows us to conform to a common interface so that beacon
|
||||
// blocks for future forks can also be applied across Prysm without issues.
|
||||
type executionPayloadHeaderCapella struct {
|
||||
p *enginev1.ExecutionPayloadHeaderCapella
|
||||
value uint64
|
||||
p *enginev1.ExecutionPayloadHeaderCapella
|
||||
weiValue math.Wei
|
||||
gweiValue uint64
|
||||
}
|
||||
|
||||
// WrappedExecutionPayloadHeaderCapella is a constructor which wraps a protobuf execution header into an interface.
|
||||
func WrappedExecutionPayloadHeaderCapella(p *enginev1.ExecutionPayloadHeaderCapella, value math.Gwei) (interfaces.ExecutionData, error) {
|
||||
w := executionPayloadHeaderCapella{p: p, value: uint64(value)}
|
||||
func WrappedExecutionPayloadHeaderCapella(p *enginev1.ExecutionPayloadHeaderCapella, value math.Wei) (interfaces.ExecutionData, error) {
|
||||
w := executionPayloadHeaderCapella{p: p, weiValue: value, gweiValue: uint64(math.WeiToGwei(value))}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
@@ -745,9 +762,14 @@ func (executionPayloadHeaderCapella) PbBellatrix() (*enginev1.ExecutionPayload,
|
||||
return nil, consensus_types.ErrUnsupportedField
|
||||
}
|
||||
|
||||
// ValueInWei --
|
||||
func (e executionPayloadHeaderCapella) ValueInWei() (math.Wei, error) {
|
||||
return e.weiValue, nil
|
||||
}
|
||||
|
||||
// ValueInGwei --
|
||||
func (e executionPayloadHeaderCapella) ValueInGwei() (uint64, error) {
|
||||
return e.value, nil
|
||||
return e.gweiValue, nil
|
||||
}
|
||||
|
||||
// PayloadToHeaderCapella converts `payload` into execution payload header format.
|
||||
@@ -839,6 +861,9 @@ func PayloadToHeaderDeneb(payload interfaces.ExecutionData) (*enginev1.Execution
|
||||
// IsEmptyExecutionData checks if an execution data is empty underneath. If a single field has
|
||||
// a non-zero value, this function will return false.
|
||||
func IsEmptyExecutionData(data interfaces.ExecutionData) (bool, error) {
|
||||
if data == nil {
|
||||
return true, nil
|
||||
}
|
||||
if !bytes.Equal(data.ParentHash(), make([]byte, fieldparams.RootLength)) {
|
||||
return false, nil
|
||||
}
|
||||
@@ -897,13 +922,14 @@ func IsEmptyExecutionData(data interfaces.ExecutionData) (bool, error) {
|
||||
// This wrapper allows us to conform to a common interface so that beacon
|
||||
// blocks for future forks can also be applied across Prysm without issues.
|
||||
type executionPayloadHeaderDeneb struct {
|
||||
p *enginev1.ExecutionPayloadHeaderDeneb
|
||||
value uint64
|
||||
p *enginev1.ExecutionPayloadHeaderDeneb
|
||||
weiValue math.Wei
|
||||
gweiValue uint64
|
||||
}
|
||||
|
||||
// WrappedExecutionPayloadHeaderDeneb is a constructor which wraps a protobuf execution header into an interface.
|
||||
func WrappedExecutionPayloadHeaderDeneb(p *enginev1.ExecutionPayloadHeaderDeneb, value math.Gwei) (interfaces.ExecutionData, error) {
|
||||
w := executionPayloadHeaderDeneb{p: p, value: uint64(value)}
|
||||
func WrappedExecutionPayloadHeaderDeneb(p *enginev1.ExecutionPayloadHeaderDeneb, value math.Wei) (interfaces.ExecutionData, error) {
|
||||
w := executionPayloadHeaderDeneb{p: p, weiValue: value, gweiValue: uint64(math.WeiToGwei(value))}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
@@ -1030,17 +1056,17 @@ func (e executionPayloadHeaderDeneb) Withdrawals() ([]*enginev1.Withdrawal, erro
|
||||
return nil, consensus_types.ErrUnsupportedField
|
||||
}
|
||||
|
||||
// WitdrawalsRoot --
|
||||
// WithdrawalsRoot --
|
||||
func (e executionPayloadHeaderDeneb) WithdrawalsRoot() ([]byte, error) {
|
||||
return e.p.WithdrawalsRoot, nil
|
||||
}
|
||||
|
||||
// BlobGasUsed
|
||||
// BlobGasUsed --
|
||||
func (e executionPayloadHeaderDeneb) BlobGasUsed() (uint64, error) {
|
||||
return e.p.BlobGasUsed, nil
|
||||
}
|
||||
|
||||
// ExcessBlobGas
|
||||
// ExcessBlobGas --
|
||||
func (e executionPayloadHeaderDeneb) ExcessBlobGas() (uint64, error) {
|
||||
return e.p.ExcessBlobGas, nil
|
||||
}
|
||||
@@ -1060,8 +1086,14 @@ func (executionPayloadHeaderDeneb) PbCapella() (*enginev1.ExecutionPayloadCapell
|
||||
return nil, consensus_types.ErrUnsupportedField
|
||||
}
|
||||
|
||||
// ValueInWei --
|
||||
func (e executionPayloadHeaderDeneb) ValueInWei() (math.Wei, error) {
|
||||
return e.weiValue, nil
|
||||
}
|
||||
|
||||
// ValueInGwei --
|
||||
func (e executionPayloadHeaderDeneb) ValueInGwei() (uint64, error) {
|
||||
return e.value, nil
|
||||
return e.gweiValue, nil
|
||||
}
|
||||
|
||||
// IsBlinded returns true if the underlying data is blinded.
|
||||
@@ -1073,13 +1105,14 @@ func (e executionPayloadHeaderDeneb) IsBlinded() bool {
|
||||
// This wrapper allows us to conform to a common interface so that beacon
|
||||
// blocks for future forks can also be applied across Prysm without issues.
|
||||
type executionPayloadDeneb struct {
|
||||
p *enginev1.ExecutionPayloadDeneb
|
||||
value uint64
|
||||
p *enginev1.ExecutionPayloadDeneb
|
||||
weiValue math.Wei
|
||||
gweiValue uint64
|
||||
}
|
||||
|
||||
// WrappedExecutionPayloadDeneb is a constructor which wraps a protobuf execution payload into an interface.
|
||||
func WrappedExecutionPayloadDeneb(p *enginev1.ExecutionPayloadDeneb, value math.Gwei) (interfaces.ExecutionData, error) {
|
||||
w := executionPayloadDeneb{p: p, value: uint64(value)}
|
||||
func WrappedExecutionPayloadDeneb(p *enginev1.ExecutionPayloadDeneb, value math.Wei) (interfaces.ExecutionData, error) {
|
||||
w := executionPayloadDeneb{p: p, weiValue: value, gweiValue: uint64(math.WeiToGwei(value))}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
@@ -1234,8 +1267,14 @@ func (e executionPayloadDeneb) PbDeneb() (*enginev1.ExecutionPayloadDeneb, error
|
||||
return e.p, nil
|
||||
}
|
||||
|
||||
// ValueInWei --
|
||||
func (e executionPayloadDeneb) ValueInWei() (math.Wei, error) {
|
||||
return e.weiValue, nil
|
||||
}
|
||||
|
||||
// ValueInGwei --
|
||||
func (e executionPayloadDeneb) ValueInGwei() (uint64, error) {
|
||||
return e.value, nil
|
||||
return e.gweiValue, nil
|
||||
}
|
||||
|
||||
// IsBlinded returns true if the underlying data is blinded.
|
||||
@@ -1243,6 +1282,12 @@ func (e executionPayloadDeneb) IsBlinded() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PayloadValueToWei returns a Wei value given the payload's value
|
||||
func PayloadValueToWei(value []byte) math.Wei {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(value))
|
||||
}
|
||||
|
||||
// PayloadValueToGwei returns a Gwei value given the payload's value
|
||||
func PayloadValueToGwei(value []byte) math.Gwei {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
@@ -105,11 +106,14 @@ func TestWrapExecutionPayloadCapella(t *testing.T) {
|
||||
Amount: 77,
|
||||
}},
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadCapella(data, 10)
|
||||
payload, err := blocks.WrappedExecutionPayloadCapella(data, big.NewInt(10*1e9))
|
||||
require.NoError(t, err)
|
||||
v, err := payload.ValueInGwei()
|
||||
wei, err := payload.ValueInWei()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(10), v)
|
||||
assert.Equal(t, 0, big.NewInt(10*1e9).Cmp(wei))
|
||||
gwei, err := payload.ValueInGwei()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(10), gwei)
|
||||
|
||||
assert.DeepEqual(t, data, payload.Proto())
|
||||
}
|
||||
@@ -132,12 +136,15 @@ func TestWrapExecutionPayloadHeaderCapella(t *testing.T) {
|
||||
TransactionsRoot: []byte("transactionsroot"),
|
||||
WithdrawalsRoot: []byte("withdrawalsroot"),
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadHeaderCapella(data, 10)
|
||||
payload, err := blocks.WrappedExecutionPayloadHeaderCapella(data, big.NewInt(10*1e9))
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err := payload.ValueInGwei()
|
||||
wei, err := payload.ValueInWei()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(10), v)
|
||||
assert.Equal(t, 0, big.NewInt(10*1e9).Cmp(wei))
|
||||
gwei, err := payload.ValueInGwei()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(10), gwei)
|
||||
|
||||
assert.DeepEqual(t, data, payload.Proto())
|
||||
|
||||
@@ -151,22 +158,22 @@ func TestWrapExecutionPayloadHeaderCapella(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWrapExecutionPayloadCapella_IsNil(t *testing.T) {
|
||||
_, err := blocks.WrappedExecutionPayloadCapella(nil, 0)
|
||||
_, err := blocks.WrappedExecutionPayloadCapella(nil, big.NewInt(0))
|
||||
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
|
||||
|
||||
data := &enginev1.ExecutionPayloadCapella{GasUsed: 54}
|
||||
payload, err := blocks.WrappedExecutionPayloadCapella(data, 0)
|
||||
payload, err := blocks.WrappedExecutionPayloadCapella(data, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, false, payload.IsNil())
|
||||
}
|
||||
|
||||
func TestWrapExecutionPayloadHeaderCapella_IsNil(t *testing.T) {
|
||||
_, err := blocks.WrappedExecutionPayloadHeaderCapella(nil, 0)
|
||||
_, err := blocks.WrappedExecutionPayloadHeaderCapella(nil, big.NewInt(0))
|
||||
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
|
||||
|
||||
data := &enginev1.ExecutionPayloadHeaderCapella{GasUsed: 54}
|
||||
payload, err := blocks.WrappedExecutionPayloadHeaderCapella(data, 0)
|
||||
payload, err := blocks.WrappedExecutionPayloadHeaderCapella(data, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, false, payload.IsNil())
|
||||
@@ -267,11 +274,14 @@ func TestWrapExecutionPayloadDeneb(t *testing.T) {
|
||||
BlobGasUsed: 88,
|
||||
ExcessBlobGas: 99,
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadDeneb(data, 420)
|
||||
payload, err := blocks.WrappedExecutionPayloadDeneb(data, big.NewInt(420*1e9))
|
||||
require.NoError(t, err)
|
||||
v, err := payload.ValueInGwei()
|
||||
wei, err := payload.ValueInWei()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(420), v)
|
||||
assert.Equal(t, 0, big.NewInt(420*1e9).Cmp(wei))
|
||||
gwei, err := payload.ValueInGwei()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(420), gwei)
|
||||
|
||||
g, err := payload.BlobGasUsed()
|
||||
require.NoError(t, err)
|
||||
@@ -302,12 +312,15 @@ func TestWrapExecutionPayloadHeaderDeneb(t *testing.T) {
|
||||
BlobGasUsed: 88,
|
||||
ExcessBlobGas: 99,
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadHeaderDeneb(data, 10)
|
||||
payload, err := blocks.WrappedExecutionPayloadHeaderDeneb(data, big.NewInt(10*1e9))
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err := payload.ValueInGwei()
|
||||
wei, err := payload.ValueInWei()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(10), v)
|
||||
assert.Equal(t, 0, big.NewInt(10*1e9).Cmp(wei))
|
||||
gwei, err := payload.ValueInGwei()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(10), gwei)
|
||||
|
||||
g, err := payload.BlobGasUsed()
|
||||
require.NoError(t, err)
|
||||
@@ -409,7 +422,7 @@ func createWrappedPayloadCapella(t testing.TB) interfaces.ExecutionData {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
}, 0)
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return payload
|
||||
}
|
||||
@@ -431,7 +444,7 @@ func createWrappedPayloadHeaderCapella(t testing.TB) interfaces.ExecutionData {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
}, 0)
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return payload
|
||||
}
|
||||
@@ -455,7 +468,7 @@ func createWrappedPayloadDeneb(t testing.TB) interfaces.ExecutionData {
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
BlobGasUsed: 0,
|
||||
ExcessBlobGas: 0,
|
||||
}, 0)
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return payload
|
||||
}
|
||||
@@ -479,7 +492,7 @@ func createWrappedPayloadHeaderDeneb(t testing.TB) interfaces.ExecutionData {
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
BlobGasUsed: 0,
|
||||
ExcessBlobGas: 0,
|
||||
}, 0)
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return payload
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package blocks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -228,9 +229,9 @@ func BuildSignedBeaconBlockFromExecutionPayload(
|
||||
case *enginev1.ExecutionPayload:
|
||||
wrappedPayload, wrapErr = WrappedExecutionPayload(p)
|
||||
case *enginev1.ExecutionPayloadCapella:
|
||||
wrappedPayload, wrapErr = WrappedExecutionPayloadCapella(p, 0)
|
||||
wrappedPayload, wrapErr = WrappedExecutionPayloadCapella(p, big.NewInt(0))
|
||||
case *enginev1.ExecutionPayloadDeneb:
|
||||
wrappedPayload, wrapErr = WrappedExecutionPayloadDeneb(p, 0)
|
||||
wrappedPayload, wrapErr = WrappedExecutionPayloadDeneb(p, big.NewInt(0))
|
||||
default:
|
||||
return nil, fmt.Errorf("%T is not a type of execution payload", p)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package blocks
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
@@ -515,7 +516,7 @@ func TestBuildSignedBeaconBlockFromExecutionPayload(t *testing.T) {
|
||||
ExcessBlobGas: 123,
|
||||
BlobGasUsed: 321,
|
||||
}
|
||||
wrapped, err := WrappedExecutionPayloadDeneb(payload, 123)
|
||||
wrapped, err := WrappedExecutionPayloadDeneb(payload, big.NewInt(123))
|
||||
require.NoError(t, err)
|
||||
header, err := PayloadToHeaderDeneb(wrapped)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,6 +2,7 @@ package blocks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
validatorpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/validator-client"
|
||||
@@ -371,7 +373,26 @@ func (b *SignedBeaconBlock) IsBlinded() bool {
|
||||
return b.version >= version.Bellatrix && b.block.body.executionPayload == nil
|
||||
}
|
||||
|
||||
// ValueInGwei metadata on the payload value returned by the builder. Value is 0 by default if local.
|
||||
// ValueInWei metadata on the payload value returned by the builder.
|
||||
func (b *SignedBeaconBlock) ValueInWei() math.Wei {
|
||||
exec, err := b.block.body.Execution()
|
||||
if err != nil {
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
log.WithError(err).Warn("failed to retrieve execution payload")
|
||||
}
|
||||
return big.NewInt(0)
|
||||
}
|
||||
val, err := exec.ValueInWei()
|
||||
if err != nil {
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
log.WithError(err).Warn("failed to retrieve execution payload")
|
||||
}
|
||||
return big.NewInt(0)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// ValueInGwei metadata on the payload value returned by the builder.
|
||||
func (b *SignedBeaconBlock) ValueInGwei() uint64 {
|
||||
exec, err := b.block.body.Execution()
|
||||
if err != nil {
|
||||
@@ -1038,71 +1059,11 @@ func (b *BeaconBlockBody) Execution() (interfaces.ExecutionData, error) {
|
||||
switch b.version {
|
||||
case version.Phase0, version.Altair:
|
||||
return nil, consensus_types.ErrNotSupported("Execution", b.version)
|
||||
case version.Bellatrix:
|
||||
if b.IsBlinded() {
|
||||
var ph *enginev1.ExecutionPayloadHeader
|
||||
var ok bool
|
||||
if b.executionPayloadHeader != nil {
|
||||
ph, ok = b.executionPayloadHeader.Proto().(*enginev1.ExecutionPayloadHeader)
|
||||
if !ok {
|
||||
return nil, errPayloadHeaderWrongType
|
||||
}
|
||||
}
|
||||
return WrappedExecutionPayloadHeader(ph)
|
||||
}
|
||||
var p *enginev1.ExecutionPayload
|
||||
var ok bool
|
||||
if b.executionPayload != nil {
|
||||
p, ok = b.executionPayload.Proto().(*enginev1.ExecutionPayload)
|
||||
if !ok {
|
||||
return nil, errPayloadWrongType
|
||||
}
|
||||
}
|
||||
return WrappedExecutionPayload(p)
|
||||
case version.Capella:
|
||||
if b.IsBlinded() {
|
||||
var ph *enginev1.ExecutionPayloadHeaderCapella
|
||||
var ok bool
|
||||
if b.executionPayloadHeader != nil {
|
||||
ph, ok = b.executionPayloadHeader.Proto().(*enginev1.ExecutionPayloadHeaderCapella)
|
||||
if !ok {
|
||||
return nil, errPayloadHeaderWrongType
|
||||
}
|
||||
return WrappedExecutionPayloadHeaderCapella(ph, 0)
|
||||
}
|
||||
}
|
||||
var p *enginev1.ExecutionPayloadCapella
|
||||
var ok bool
|
||||
if b.executionPayload != nil {
|
||||
p, ok = b.executionPayload.Proto().(*enginev1.ExecutionPayloadCapella)
|
||||
if !ok {
|
||||
return nil, errPayloadWrongType
|
||||
}
|
||||
}
|
||||
return WrappedExecutionPayloadCapella(p, 0)
|
||||
case version.Deneb:
|
||||
if b.IsBlinded() {
|
||||
var ph *enginev1.ExecutionPayloadHeaderDeneb
|
||||
var ok bool
|
||||
if b.executionPayloadHeader != nil {
|
||||
ph, ok = b.executionPayloadHeader.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
if !ok {
|
||||
return nil, errPayloadHeaderWrongType
|
||||
}
|
||||
return WrappedExecutionPayloadHeaderDeneb(ph, 0)
|
||||
}
|
||||
}
|
||||
var p *enginev1.ExecutionPayloadDeneb
|
||||
var ok bool
|
||||
if b.executionPayload != nil {
|
||||
p, ok = b.executionPayload.Proto().(*enginev1.ExecutionPayloadDeneb)
|
||||
if !ok {
|
||||
return nil, errPayloadWrongType
|
||||
}
|
||||
}
|
||||
return WrappedExecutionPayloadDeneb(p, 0)
|
||||
default:
|
||||
return nil, errIncorrectBlockVersion
|
||||
if b.IsBlinded() {
|
||||
return b.executionPayloadHeader, nil
|
||||
}
|
||||
return b.executionPayload, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
@@ -214,9 +215,9 @@ func Test_BeaconBlock_Copy(t *testing.T) {
|
||||
|
||||
payload := &pb.ExecutionPayloadDeneb{ExcessBlobGas: 123}
|
||||
header := &pb.ExecutionPayloadHeaderDeneb{ExcessBlobGas: 223}
|
||||
payloadInterface, err := WrappedExecutionPayloadDeneb(payload, 123)
|
||||
payloadInterface, err := WrappedExecutionPayloadDeneb(payload, big.NewInt(123))
|
||||
require.NoError(t, err)
|
||||
headerInterface, err := WrappedExecutionPayloadHeaderDeneb(header, 123)
|
||||
headerInterface, err := WrappedExecutionPayloadHeaderDeneb(header, big.NewInt(123))
|
||||
require.NoError(t, err)
|
||||
bb = &BeaconBlockBody{executionPayload: payloadInterface, executionPayloadHeader: headerInterface, randaoReveal: bytesutil.ToBytes96([]byte{246}), graffiti: bytesutil.ToBytes32([]byte("graffiti"))}
|
||||
b = &BeaconBlock{body: bb, slot: 123, proposerIndex: 456, parentRoot: bytesutil.ToBytes32([]byte("parentroot")), stateRoot: bytesutil.ToBytes32([]byte("stateroot"))}
|
||||
@@ -411,7 +412,7 @@ func Test_BeaconBlockBody_Execution(t *testing.T) {
|
||||
assert.DeepEqual(t, result, e)
|
||||
|
||||
executionCapella := &pb.ExecutionPayloadCapella{BlockNumber: 1}
|
||||
eCapella, err := WrappedExecutionPayloadCapella(executionCapella, 0)
|
||||
eCapella, err := WrappedExecutionPayloadCapella(executionCapella, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
bb = &SignedBeaconBlock{version: version.Capella, block: &BeaconBlock{body: &BeaconBlockBody{version: version.Capella}}}
|
||||
require.NoError(t, bb.SetExecution(eCapella))
|
||||
@@ -420,7 +421,7 @@ func Test_BeaconBlockBody_Execution(t *testing.T) {
|
||||
assert.DeepEqual(t, result, eCapella)
|
||||
|
||||
executionCapellaHeader := &pb.ExecutionPayloadHeaderCapella{BlockNumber: 1}
|
||||
eCapellaHeader, err := WrappedExecutionPayloadHeaderCapella(executionCapellaHeader, 0)
|
||||
eCapellaHeader, err := WrappedExecutionPayloadHeaderCapella(executionCapellaHeader, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
bb = &SignedBeaconBlock{version: version.Capella, block: &BeaconBlock{version: version.Capella, body: &BeaconBlockBody{version: version.Capella}}}
|
||||
require.NoError(t, bb.SetExecution(eCapellaHeader))
|
||||
@@ -429,7 +430,7 @@ func Test_BeaconBlockBody_Execution(t *testing.T) {
|
||||
assert.DeepEqual(t, result, eCapellaHeader)
|
||||
|
||||
executionDeneb := &pb.ExecutionPayloadDeneb{BlockNumber: 1, ExcessBlobGas: 123}
|
||||
eDeneb, err := WrappedExecutionPayloadDeneb(executionDeneb, 0)
|
||||
eDeneb, err := WrappedExecutionPayloadDeneb(executionDeneb, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
bb = &SignedBeaconBlock{version: version.Deneb, block: &BeaconBlock{body: &BeaconBlockBody{version: version.Deneb}}}
|
||||
require.NoError(t, bb.SetExecution(eDeneb))
|
||||
@@ -441,7 +442,7 @@ func Test_BeaconBlockBody_Execution(t *testing.T) {
|
||||
require.DeepEqual(t, gas, uint64(123))
|
||||
|
||||
executionDenebHeader := &pb.ExecutionPayloadHeaderDeneb{BlockNumber: 1, ExcessBlobGas: 223}
|
||||
eDenebHeader, err := WrappedExecutionPayloadHeaderDeneb(executionDenebHeader, 0)
|
||||
eDenebHeader, err := WrappedExecutionPayloadHeaderDeneb(executionDenebHeader, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
bb = &SignedBeaconBlock{version: version.Deneb, block: &BeaconBlock{version: version.Deneb, body: &BeaconBlockBody{version: version.Deneb}}}
|
||||
require.NoError(t, bb.SetExecution(eDenebHeader))
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -844,7 +846,7 @@ func initBlockBodyFromProtoCapella(pb *eth.BeaconBlockBodyCapella) (*BeaconBlock
|
||||
return nil, errNilBlockBody
|
||||
}
|
||||
|
||||
p, err := WrappedExecutionPayloadCapella(pb.ExecutionPayload, 0)
|
||||
p, err := WrappedExecutionPayloadCapella(pb.ExecutionPayload, big.NewInt(0))
|
||||
// We allow the payload to be nil
|
||||
if err != nil && err != consensus_types.ErrNilObjectWrapped {
|
||||
return nil, err
|
||||
@@ -871,7 +873,7 @@ func initBlindedBlockBodyFromProtoCapella(pb *eth.BlindedBeaconBlockBodyCapella)
|
||||
return nil, errNilBlockBody
|
||||
}
|
||||
|
||||
ph, err := WrappedExecutionPayloadHeaderCapella(pb.ExecutionPayloadHeader, 0)
|
||||
ph, err := WrappedExecutionPayloadHeaderCapella(pb.ExecutionPayloadHeader, big.NewInt(0))
|
||||
// We allow the payload to be nil
|
||||
if err != nil && err != consensus_types.ErrNilObjectWrapped {
|
||||
return nil, err
|
||||
@@ -898,7 +900,7 @@ func initBlockBodyFromProtoDeneb(pb *eth.BeaconBlockBodyDeneb) (*BeaconBlockBody
|
||||
return nil, errNilBlockBody
|
||||
}
|
||||
|
||||
p, err := WrappedExecutionPayloadDeneb(pb.ExecutionPayload, 0)
|
||||
p, err := WrappedExecutionPayloadDeneb(pb.ExecutionPayload, big.NewInt(0))
|
||||
// We allow the payload to be nil
|
||||
if err != nil && err != consensus_types.ErrNilObjectWrapped {
|
||||
return nil, err
|
||||
@@ -926,7 +928,7 @@ func initBlindedBlockBodyFromProtoDeneb(pb *eth.BlindedBeaconBlockBodyDeneb) (*B
|
||||
return nil, errNilBlockBody
|
||||
}
|
||||
|
||||
ph, err := WrappedExecutionPayloadHeaderDeneb(pb.ExecutionPayloadHeader, 0)
|
||||
ph, err := WrappedExecutionPayloadHeaderDeneb(pb.ExecutionPayloadHeader, big.NewInt(0))
|
||||
// We allow the payload to be nil
|
||||
if err != nil && err != consensus_types.ErrNilObjectWrapped {
|
||||
return nil, err
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
@@ -1331,7 +1332,7 @@ func bodyBlindedBellatrix(t *testing.T) *BeaconBlockBody {
|
||||
|
||||
func bodyCapella(t *testing.T) *BeaconBlockBody {
|
||||
f := getFields()
|
||||
p, err := WrappedExecutionPayloadCapella(f.execPayloadCapella, 0)
|
||||
p, err := WrappedExecutionPayloadCapella(f.execPayloadCapella, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return &BeaconBlockBody{
|
||||
version: version.Capella,
|
||||
@@ -1355,7 +1356,7 @@ func bodyCapella(t *testing.T) *BeaconBlockBody {
|
||||
|
||||
func bodyBlindedCapella(t *testing.T) *BeaconBlockBody {
|
||||
f := getFields()
|
||||
ph, err := WrappedExecutionPayloadHeaderCapella(f.execPayloadHeaderCapella, 0)
|
||||
ph, err := WrappedExecutionPayloadHeaderCapella(f.execPayloadHeaderCapella, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return &BeaconBlockBody{
|
||||
version: version.Capella,
|
||||
@@ -1379,7 +1380,7 @@ func bodyBlindedCapella(t *testing.T) *BeaconBlockBody {
|
||||
|
||||
func bodyDeneb(t *testing.T) *BeaconBlockBody {
|
||||
f := getFields()
|
||||
p, err := WrappedExecutionPayloadDeneb(f.execPayloadDeneb, 0)
|
||||
p, err := WrappedExecutionPayloadDeneb(f.execPayloadDeneb, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return &BeaconBlockBody{
|
||||
version: version.Deneb,
|
||||
@@ -1404,7 +1405,7 @@ func bodyDeneb(t *testing.T) *BeaconBlockBody {
|
||||
|
||||
func bodyBlindedDeneb(t *testing.T) *BeaconBlockBody {
|
||||
f := getFields()
|
||||
ph, err := WrappedExecutionPayloadHeaderDeneb(f.execPayloadHeaderDeneb, 0)
|
||||
ph, err := WrappedExecutionPayloadHeaderDeneb(f.execPayloadHeaderDeneb, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return &BeaconBlockBody{
|
||||
version: version.Deneb,
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
field_params "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
validatorpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/validator-client"
|
||||
@@ -32,6 +33,7 @@ type ReadOnlySignedBeaconBlock interface {
|
||||
ssz.Unmarshaler
|
||||
Version() int
|
||||
IsBlinded() bool
|
||||
ValueInWei() math.Wei
|
||||
ValueInGwei() uint64
|
||||
Header() (*ethpb.SignedBeaconBlockHeader, error)
|
||||
}
|
||||
@@ -130,5 +132,6 @@ type ExecutionData interface {
|
||||
PbCapella() (*enginev1.ExecutionPayloadCapella, error)
|
||||
PbBellatrix() (*enginev1.ExecutionPayload, error)
|
||||
PbDeneb() (*enginev1.ExecutionPayloadDeneb, error)
|
||||
ValueInWei() (math.Wei, error)
|
||||
ValueInGwei() (uint64, error)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
field_params "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
validatorpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/validator-client"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -106,6 +107,10 @@ func (SignedBeaconBlock) Header() (*eth.SignedBeaconBlockHeader, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (SignedBeaconBlock) ValueInWei() math.Wei {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (SignedBeaconBlock) ValueInGwei() uint64 {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -94,7 +94,6 @@ func (l *List[T]) Remove(n *Node[T]) {
|
||||
n.next.prev = n.prev
|
||||
}
|
||||
}
|
||||
n = nil
|
||||
l.len--
|
||||
}
|
||||
|
||||
|
||||
4
deps.bzl
4
deps.bzl
@@ -3561,8 +3561,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_gohashtree",
|
||||
importpath = "github.com/prysmaticlabs/gohashtree",
|
||||
sum = "h1:1EVinCWdb3Lorq7xn8DYQHf48nCcdAM3Vb18KsFlRWY=",
|
||||
version = "v0.0.3-alpha",
|
||||
sum = "h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4=",
|
||||
version = "v0.0.4-beta",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
|
||||
2
go.mod
2
go.mod
@@ -257,7 +257,7 @@ require (
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-playground/validator/v10 v10.13.0
|
||||
github.com/peterh/liner v1.2.0 // indirect
|
||||
github.com/prysmaticlabs/gohashtree v0.0.3-alpha
|
||||
github.com/prysmaticlabs/gohashtree v0.0.4-beta
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
google.golang.org/api v0.44.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -1105,8 +1105,8 @@ github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44/go.mod h1:MA
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210108222456-8e92c3709aa0/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 h1:0tVE4tdWQK9ZpYygoV7+vS6QkDvQVySboMVEIxBJmXw=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.3-alpha h1:1EVinCWdb3Lorq7xn8DYQHf48nCcdAM3Vb18KsFlRWY=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.3-alpha/go.mod h1:4pWaT30XoEx1j8KNJf3TV+E3mQkaufn7mf+jRNb/Fuk=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs=
|
||||
github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20230315201114-09284ba20446 h1:4wctORg/1TkgLgXejv9yOSAm3cDBJxoTzl/RNuZmX28=
|
||||
github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20230315201114-09284ba20446/go.mod h1:IOyTYjcIO0rkmnGBfJTL0NJ11exy/Tc2QEuv7hCXp24=
|
||||
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c h1:9PHRCuO/VN0s9k+RmLykho7AjDxblNYI5bYKed16NPU=
|
||||
|
||||
1802
proto/prysm/v1alpha1/beacon_block.pb.go
generated
1802
proto/prysm/v1alpha1/beacon_block.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -53,7 +53,7 @@ message GenericSignedBeaconBlock {
|
||||
SignedBlindedBeaconBlockDeneb blinded_deneb = 8;
|
||||
}
|
||||
bool is_blinded = 100;
|
||||
uint64 payload_value = 101;
|
||||
uint64 payload_value = 101 [deprecated = true];
|
||||
}
|
||||
|
||||
message GenericBeaconBlock {
|
||||
@@ -82,7 +82,7 @@ message GenericBeaconBlock {
|
||||
BlindedBeaconBlockDeneb blinded_deneb = 8;
|
||||
}
|
||||
bool is_blinded = 100;
|
||||
uint64 payload_value = 101;
|
||||
string payload_value = 101;
|
||||
}
|
||||
|
||||
// The Ethereum consensus beacon block. The message does not contain a validator signature.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user