mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
25 Commits
fix-p
...
electra-up
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
125935b131 | ||
|
|
78cf75a0ed | ||
|
|
fa370724f1 | ||
|
|
5edc64d88c | ||
|
|
7898e65d4e | ||
|
|
6d63dbe1af | ||
|
|
eab9daf5f5 | ||
|
|
4becd7b375 | ||
|
|
f230a6af58 | ||
|
|
539b981ac3 | ||
|
|
aad29ff9fc | ||
|
|
4722446caf | ||
|
|
b8aad84285 | ||
|
|
5f0d6074d6 | ||
|
|
9d6a2f5390 | ||
|
|
490ddbf782 | ||
|
|
adc875b20d | ||
|
|
8cd249c1c8 | ||
|
|
305d5850e7 | ||
|
|
df3a9f218d | ||
|
|
ae451a3a02 | ||
|
|
17561a6576 | ||
|
|
b842b7ea01 | ||
|
|
9bbe12e28c | ||
|
|
0674cf64cc |
@@ -224,6 +224,7 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/defers:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/directive:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library",
|
||||
# fieldalignment disabled
|
||||
#"@org_golang_x_tools//go/analysis/passes/fieldalignment:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",
|
||||
|
||||
@@ -332,14 +332,14 @@ http_archive(
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = [
|
||||
"custom_config_data/config.yaml",
|
||||
"metadata/config.yaml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "5f4be6fd088683ea9db45c863b9c5a1884422449e5b59fd2d561d3ba0f73ffd9",
|
||||
strip_prefix = "holesky-9d9aabf2d4de51334ee5fed6c79a4d55097d1a43",
|
||||
url = "https://github.com/eth-clients/holesky/archive/9d9aabf2d4de51334ee5fed6c79a4d55097d1a43.tar.gz", # 2024-01-22
|
||||
integrity = "sha256-b7ZTT+olF+VXEJYNTV5jggNtCkt9dOejm1i2VE+zy+0=",
|
||||
strip_prefix = "holesky-874c199423ccd180607320c38cbaca05d9a1573a",
|
||||
url = "https://github.com/eth-clients/holesky/archive/874c199423ccd180607320c38cbaca05d9a1573a.tar.gz", # 2024-06-18
|
||||
)
|
||||
|
||||
http_archive(
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, http.NoBody)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -64,6 +64,7 @@ go_library(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -138,6 +139,7 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -181,6 +183,7 @@ go_test(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -169,13 +170,8 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go s.sendNewFinalizedEvent(ctx, postState)
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
// hook to process all post state finalization tasks
|
||||
s.executePostFinalizationTasks(ctx, postState)
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
@@ -224,6 +220,19 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go func() {
|
||||
finalizedState.SaveValidatorIndices() // used to handle Validator index invariant from EIP6110
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
@@ -489,7 +498,7 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
return
|
||||
}
|
||||
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
|
||||
s.cfg.SlasherAttestationsFeed.Send(&types.WrappedIndexedAtt{IndexedAtt: indexedAtt})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -20,6 +23,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -415,3 +419,81 @@ func Test_sendNewFinalizedEvent(t *testing.T) {
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
}
|
||||
|
||||
func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EIP6110ValidatorIndexCache: true,
|
||||
})
|
||||
defer resetFn()
|
||||
|
||||
logHook := logTest.NewGlobal()
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
finalizedStRoot, err := headState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*122 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.StateRoot = finalizedStRoot[:]
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
hexKey := "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
key, err := hexutil.Decode(hexKey)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetValidators([]*ethpb.Validator{
|
||||
{
|
||||
PublicKey: key,
|
||||
WithdrawalCredentials: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
}))
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetFinalizedCheckpoint(ðpb.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: headRoot[:],
|
||||
}))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
|
||||
// check deposit
|
||||
require.LogsContain(t, logHook, "Finalized deposit insertion completed at index")
|
||||
}
|
||||
|
||||
@@ -330,6 +330,8 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
saved.SaveValidatorIndices() // used to handle Validator index invariant from EIP6110
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,9 +8,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
@@ -29,6 +30,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -203,7 +205,7 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
BlockHash: make([]byte, 32),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
genState, err = blocks.ProcessPreGenesisDeposits(ctx, genState, deposits)
|
||||
genState, err = altair.ProcessPreGenesisDeposits(ctx, genState, deposits)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, ðpb.Eth1Data{DepositRoot: hashTreeRoot[:], BlockHash: make([]byte, 32)})
|
||||
@@ -499,6 +501,66 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
require.Equal(t, true, op)
|
||||
}
|
||||
|
||||
func TestStartFromSavedState_ValidatorIndexCacheUpdated(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableStartOptimistic: true,
|
||||
EIP6110ValidatorIndexCache: true,
|
||||
})
|
||||
defer resetFn()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
hexKey := "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
key, err := hexutil.Decode(hexKey)
|
||||
require.NoError(t, err)
|
||||
hexKey2 := "0x42247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
key2, err := hexutil.Decode(hexKey2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetValidators([]*ethpb.Validator{
|
||||
{
|
||||
PublicKey: key,
|
||||
WithdrawalCredentials: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
{
|
||||
PublicKey: key2,
|
||||
WithdrawalCredentials: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
}))
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
index2, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key2))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), index2) // first index
|
||||
}
|
||||
|
||||
// MockClockSetter satisfies the ClockSetter interface for testing the conditions where blockchain.Service should
|
||||
// call SetGenesis.
|
||||
type MockClockSetter struct {
|
||||
|
||||
@@ -36,6 +36,7 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -5,11 +5,32 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// ProcessPreGenesisDeposits processes a deposit for the beacon state before chainstart.
|
||||
func ProcessPreGenesisDeposits(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
beaconState, err = ProcessDeposits(ctx, beaconState, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit")
|
||||
}
|
||||
beaconState, err = blocks.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessDeposits processes validator deposits for beacon state Altair.
|
||||
func ProcessDeposits(
|
||||
ctx context.Context,
|
||||
@@ -33,23 +54,158 @@ func ProcessDeposits(
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessDeposit processes validator deposit for beacon state Altair.
|
||||
// ProcessDeposit takes in a deposit object and inserts it
|
||||
// into the registry as a new validator or balance change.
|
||||
// Returns the resulting state, a boolean to indicate whether or not the deposit
|
||||
// resulted in a new validator entry into the beacon state, and any error.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
//
|
||||
// # Verify the Merkle branch
|
||||
// assert is_valid_merkle_branch(
|
||||
// leaf=hash_tree_root(deposit.data),
|
||||
// branch=deposit.proof,
|
||||
// depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
|
||||
// index=state.eth1_deposit_index,
|
||||
// root=state.eth1_data.deposit_root,
|
||||
// )
|
||||
//
|
||||
// # Deposits must be processed in order
|
||||
// state.eth1_deposit_index += 1
|
||||
//
|
||||
// apply_deposit(
|
||||
// state=state,
|
||||
// pubkey=deposit.data.pubkey,
|
||||
// withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
// amount=deposit.data.amount,
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, error) {
|
||||
beaconState, isNewValidator, err := blocks.ProcessDeposit(beaconState, deposit, verifySignature)
|
||||
if err != nil {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.Wrapf(err, "could not verify deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
|
||||
}
|
||||
if err := beaconState.SetEth1DepositIndex(beaconState.Eth1DepositIndex() + 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isNewValidator {
|
||||
if err := beaconState.AppendInactivityScore(0); err != nil {
|
||||
|
||||
return ApplyDeposit(beaconState, deposit.Data, verifySignature)
|
||||
}
|
||||
|
||||
// ApplyDeposit
|
||||
// Spec pseudocode definition:
|
||||
// def apply_deposit(state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature) -> None:
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if pubkey not in validator_pubkeys:
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// deposit_message = DepositMessage(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// amount=amount,
|
||||
// )
|
||||
// domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
// signing_root = compute_signing_root(deposit_message, domain)
|
||||
// if bls.Verify(pubkey, signing_root, signature):
|
||||
// add_validator_to_registry(state, pubkey, withdrawal_credentials, amount)
|
||||
// else:
|
||||
// # Increase balance by deposit amount
|
||||
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
// increase_balance(state, index, amount)
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, verifySignature bool) (state.BeaconState, error) {
|
||||
pubKey := data.PublicKey
|
||||
amount := data.Amount
|
||||
withdrawalCredentials := data.WithdrawalCredentials
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if verifySignature {
|
||||
valid, err := blocks.IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return beaconState, nil
|
||||
}
|
||||
}
|
||||
if err := AddValidatorToRegistry(beaconState, pubKey, withdrawalCredentials, amount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.AppendPreviousParticipationBits(0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.AppendCurrentParticipationBits(0); err != nil {
|
||||
} else {
|
||||
if err := helpers.IncreaseBalance(beaconState, index, amount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// AddValidatorToRegistry updates the beacon state with validator information
|
||||
// def add_validator_to_registry(state: BeaconState,
|
||||
//
|
||||
// pubkey: BLSPubkey,
|
||||
// withdrawal_credentials: Bytes32,
|
||||
// amount: uint64) -> None:
|
||||
// index = get_index_for_new_validator(state)
|
||||
// validator = get_validator_from_deposit(pubkey, withdrawal_credentials)
|
||||
// set_or_append_list(state.validators, index, validator)
|
||||
// set_or_append_list(state.balances, index, 0)
|
||||
// set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) // New in Altair
|
||||
// set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000)) // New in Altair
|
||||
// set_or_append_list(state.inactivity_scores, index, uint64(0)) // New in Altair
|
||||
func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdrawalCredentials []byte, amount uint64) error {
|
||||
val := GetValidatorFromDeposit(pubKey, withdrawalCredentials, amount)
|
||||
if err := beaconState.AppendValidator(val); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendBalance(amount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// only active in altair and only when it's a new validator (after append balance)
|
||||
if beaconState.Version() >= version.Altair {
|
||||
if err := beaconState.AppendInactivityScore(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendPreviousParticipationBits(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendCurrentParticipationBits(0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetValidatorFromDeposit gets a new validator object with provided parameters
|
||||
//
|
||||
// def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator:
|
||||
//
|
||||
// effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
//
|
||||
// return Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=effective_balance,
|
||||
// )
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
if params.BeaconConfig().MaxEffectiveBalance < effectiveBalance {
|
||||
effectiveBalance = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,59 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessPreGenesisDeposits(ctx, s, []*ethpb.Deposit{deposit})
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessPreGenesisDeposits(ctx, s, []*ethpb.Deposit{deposit})
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessDeposit(s, deposit, true)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
|
||||
@@ -7,11 +7,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
@@ -44,36 +47,6 @@ func TestProcessDeposits_SameValidatorMultipleDepositsSameBlock(t *testing.T) {
|
||||
require.Equal(t, 2, len(newState.Validators()), "Incorrect validator count")
|
||||
}
|
||||
|
||||
func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
beaconState, err := state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: []byte{0},
|
||||
BlockHash: []byte{1},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
want := "deposit root did not verify"
|
||||
_, err = altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{deposit})
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) {
|
||||
dep, _, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
@@ -245,3 +218,158 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
t.Errorf("Expected validator balance at index 0 to stay 0, received: %v", newState.Balances()[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
|
||||
|
||||
dep, _, err := util.DeterministicDepositsAndKeys(100)
|
||||
require.NoError(t, err)
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
dt, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := range dep {
|
||||
proof, err := dt.MerkleProof(i)
|
||||
require.NoError(t, err)
|
||||
dep[i].Proof = proof
|
||||
}
|
||||
root, err := dt.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
}
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessPreGenesisDeposits(context.Background(), beaconState, dep)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
_, ok := newState.ValidatorIndexByPubkey(bytesutil.ToBytes48(dep[0].Data.PublicKey))
|
||||
require.Equal(t, false, ok, "bad pubkey should not exist in state")
|
||||
|
||||
for i := 1; i < newState.NumValidators(); i++ {
|
||||
val, err := newState.ValidatorAtIndex(primitives.ValidatorIndex(i))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, val.EffectiveBalance, "unequal effective balance")
|
||||
require.Equal(t, primitives.Epoch(0), val.ActivationEpoch)
|
||||
require.Equal(t, primitives.Epoch(0), val.ActivationEligibilityEpoch)
|
||||
}
|
||||
if newState.Eth1DepositIndex() != 100 {
|
||||
t.Errorf(
|
||||
"Expected Eth1DepositIndex to be increased by 99 after processing an invalid deposit, received change: %v",
|
||||
newState.Eth1DepositIndex(),
|
||||
)
|
||||
}
|
||||
if len(newState.Validators()) != 100 {
|
||||
t.Errorf("Expected validator list to have length 100, received: %v", len(newState.Validators()))
|
||||
}
|
||||
if len(newState.Balances()) != 100 {
|
||||
t.Errorf("Expected validator balances list to have length 100, received: %v", len(newState.Balances()))
|
||||
}
|
||||
if newState.Balances()[0] != 0 {
|
||||
t.Errorf("Expected validator balance at index 0 to stay 0, received: %v", newState.Balances()[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDeposit_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(deposit.Data, bytesutil.ToBytes(3, 32))
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
deposit.Data.Signature = sig.Marshal()
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1, 2, 3},
|
||||
},
|
||||
{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: []byte{1},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0, 50}
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
BlockHash: root[:],
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposit(beaconState, deposit, true /*verifySignature*/)
|
||||
require.NoError(t, err, "Process deposit failed")
|
||||
require.Equal(t, uint64(1000+50), newState.Balances()[1], "Expected balance at index 1 to be 1050")
|
||||
}
|
||||
|
||||
func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, fieldparams.BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
}
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Deposits: []*ethpb.Deposit{deposit},
|
||||
},
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: []byte{0},
|
||||
BlockHash: []byte{1},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
want := "deposit root did not verify"
|
||||
_, err = altair.ProcessDeposits(context.Background(), beaconState, b.Block.Body.Deposits)
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -297,60 +297,6 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessDeposits(ctx, s, deposits)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessPreGenesisDeposits(ctx, s, []*ethpb.Deposit{deposit})
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, _, err := ProcessDeposit(s, deposit, true)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
@@ -360,7 +306,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
err = verifyDeposit(s, deposit)
|
||||
err = VerifyDeposit(s, deposit)
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -17,24 +16,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ProcessPreGenesisDeposits processes a deposit for the beacon state before chainstart.
|
||||
func ProcessPreGenesisDeposits(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
beaconState, err = ProcessDeposits(ctx, beaconState, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit")
|
||||
}
|
||||
beaconState, err = ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ActivateValidatorWithEffectiveBalance updates validator's effective balance, and if it's above MaxEffectiveBalance, validator becomes active in genesis.
|
||||
func ActivateValidatorWithEffectiveBalance(beaconState state.BeaconState, deposits []*ethpb.Deposit) (state.BeaconState, error) {
|
||||
for _, d := range deposits {
|
||||
@@ -66,38 +47,6 @@ func ActivateValidatorWithEffectiveBalance(beaconState state.BeaconState, deposi
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessDeposits is one of the operations performed on each processed
|
||||
// beacon block to verify queued validators from the Ethereum 1.0 Deposit Contract
|
||||
// into the beacon chain.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// For each deposit in block.body.deposits:
|
||||
// process_deposit(state, deposit)
|
||||
func ProcessDeposits(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
|
||||
// individual deposits with signature verification enabled.
|
||||
batchVerified, err := BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, d := range deposits {
|
||||
if d == nil || d.Data == nil {
|
||||
return nil, errors.New("got a nil deposit in block")
|
||||
}
|
||||
beaconState, _, err = ProcessDeposit(beaconState, d, batchVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(d.Data.PublicKey))
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// BatchVerifyDepositsSignatures batch verifies deposit signatures.
|
||||
func BatchVerifyDepositsSignatures(ctx context.Context, deposits []*ethpb.Deposit) (bool, error) {
|
||||
var err error
|
||||
@@ -114,102 +63,28 @@ func BatchVerifyDepositsSignatures(ctx context.Context, deposits []*ethpb.Deposi
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
// ProcessDeposit takes in a deposit object and inserts it
|
||||
// into the registry as a new validator or balance change.
|
||||
// Returns the resulting state, a boolean to indicate whether or not the deposit
|
||||
// resulted in a new validator entry into the beacon state, and any error.
|
||||
// IsValidDepositSignature returns whether deposit_data is valid
|
||||
// def is_valid_deposit_signature(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature) -> bool:
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
//
|
||||
// # Verify the Merkle branch
|
||||
// assert is_valid_merkle_branch(
|
||||
// leaf=hash_tree_root(deposit.data),
|
||||
// branch=deposit.proof,
|
||||
// depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
|
||||
// index=state.eth1_deposit_index,
|
||||
// root=state.eth1_data.deposit_root,
|
||||
// )
|
||||
//
|
||||
// # Deposits must be processed in order
|
||||
// state.eth1_deposit_index += 1
|
||||
//
|
||||
// pubkey = deposit.data.pubkey
|
||||
// amount = deposit.data.amount
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if pubkey not in validator_pubkeys:
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// deposit_message = DepositMessage(
|
||||
// pubkey=deposit.data.pubkey,
|
||||
// withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
// amount=deposit.data.amount,
|
||||
// )
|
||||
// domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
// signing_root = compute_signing_root(deposit_message, domain)
|
||||
// if not bls.Verify(pubkey, signing_root, deposit.data.signature):
|
||||
// return
|
||||
//
|
||||
// # Add validator and balance entries
|
||||
// state.validators.append(get_validator_from_deposit(state, deposit))
|
||||
// state.balances.append(amount)
|
||||
// else:
|
||||
// # Increase balance by deposit amount
|
||||
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
// increase_balance(state, index, amount)
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, bool, error) {
|
||||
var newValidator bool
|
||||
if err := verifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, newValidator, err
|
||||
}
|
||||
return nil, newValidator, errors.Wrapf(err, "could not verify deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
|
||||
// deposit_message = DepositMessage( pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount, )
|
||||
// domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
// signing_root = compute_signing_root(deposit_message, domain)
|
||||
// return bls.Verify(pubkey, signing_root, signature)
|
||||
func IsValidDepositSignature(data *ethpb.Deposit_Data) (bool, error) {
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := beaconState.SetEth1DepositIndex(beaconState.Eth1DepositIndex() + 1); err != nil {
|
||||
return nil, newValidator, err
|
||||
if err := verifyDepositDataSigningRoot(data, domain); err != nil {
|
||||
// Ignore this error as in the spec pseudo code.
|
||||
log.WithError(err).Debug("Skipping deposit: could not verify deposit data signature")
|
||||
return false, nil
|
||||
}
|
||||
pubKey := deposit.Data.PublicKey
|
||||
amount := deposit.Data.Amount
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if verifySignature {
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
if err != nil {
|
||||
return nil, newValidator, err
|
||||
}
|
||||
if err := verifyDepositDataSigningRoot(deposit.Data, domain); err != nil {
|
||||
// Ignore this error as in the spec pseudo code.
|
||||
log.WithError(err).Debug("Skipping deposit: could not verify deposit data signature")
|
||||
return beaconState, newValidator, nil
|
||||
}
|
||||
}
|
||||
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
if params.BeaconConfig().MaxEffectiveBalance < effectiveBalance {
|
||||
effectiveBalance = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
if err := beaconState.AppendValidator(ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: deposit.Data.WithdrawalCredentials,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
}); err != nil {
|
||||
return nil, newValidator, err
|
||||
}
|
||||
newValidator = true
|
||||
if err := beaconState.AppendBalance(amount); err != nil {
|
||||
return nil, newValidator, err
|
||||
}
|
||||
} else if err := helpers.IncreaseBalance(beaconState, index, amount); err != nil {
|
||||
return nil, newValidator, err
|
||||
}
|
||||
|
||||
return beaconState, newValidator, nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func verifyDeposit(beaconState state.ReadOnlyBeaconState, deposit *ethpb.Deposit) error {
|
||||
// VerifyDeposit verifies the deposit data and signature given the beacon state and deposit information
|
||||
func VerifyDeposit(beaconState state.ReadOnlyBeaconState, deposit *ethpb.Deposit) error {
|
||||
// Verify Merkle proof of deposit and deposit trie root.
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return errors.New("received nil deposit or nil deposit data")
|
||||
|
||||
@@ -9,59 +9,42 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessDeposits_SameValidatorMultipleDepositsSameBlock(t *testing.T) {
|
||||
// Same validator created 3 valid deposits within the same block
|
||||
|
||||
dep, _, err := util.DeterministicDepositsAndKeysSameValidator(3)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := util.DeterministicEth1Data(len(dep))
|
||||
require.NoError(t, err)
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
// 3 deposits from the same validator
|
||||
Deposits: []*ethpb.Deposit{dep[0], dep[1], dep[2]},
|
||||
},
|
||||
}
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := blocks.ProcessDeposits(context.Background(), beaconState, b.Block.Body.Deposits)
|
||||
require.NoError(t, err, "Expected block deposits to process correctly")
|
||||
|
||||
assert.Equal(t, 2, len(newState.Validators()), "Incorrect validator count")
|
||||
}
|
||||
|
||||
func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
|
||||
func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, fieldparams.BLSPubkeyLength),
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
ok, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestVerifyDeposit_MerkleBranchFailsVerification(t *testing.T) {
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
@@ -74,13 +57,7 @@ func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Deposits: []*ethpb.Deposit{deposit},
|
||||
},
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
beaconState, err := state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: []byte{0},
|
||||
BlockHash: []byte{1},
|
||||
@@ -88,312 +65,31 @@ func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
want := "deposit root did not verify"
|
||||
_, err = blocks.ProcessDeposits(context.Background(), beaconState, b.Block.Body.Deposits)
|
||||
assert.ErrorContains(t, want, err)
|
||||
err = blocks.VerifyDeposit(beaconState, deposit)
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) {
|
||||
dep, _, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := util.DeterministicEth1Data(len(dep))
|
||||
require.NoError(t, err)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Deposits: []*ethpb.Deposit{dep[0]},
|
||||
},
|
||||
}
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := blocks.ProcessDeposits(context.Background(), beaconState, b.Block.Body.Deposits)
|
||||
require.NoError(t, err, "Expected block deposits to process correctly")
|
||||
if newState.Balances()[1] != dep[0].Data.Amount {
|
||||
t.Errorf(
|
||||
"Expected state validator balances index 0 to equal %d, received %d",
|
||||
dep[0].Data.Amount,
|
||||
newState.Balances()[1],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) {
|
||||
func TestIsValidDepositSignature_Ok(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
depositData := ðpb.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 0,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(deposit.Data, bytesutil.ToBytes(3, 32))
|
||||
dm := ðpb.DepositMessage{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 0,
|
||||
}
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(dm, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
deposit.Data.Signature = sig.Marshal()
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
depositData.Signature = sig.Marshal()
|
||||
valid, err := blocks.IsValidDepositSignature(depositData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Deposits: []*ethpb.Deposit{deposit},
|
||||
},
|
||||
}
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1, 2, 3},
|
||||
},
|
||||
{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: []byte{1},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0, 50}
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
BlockHash: root[:],
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := blocks.ProcessDeposits(context.Background(), beaconState, b.Block.Body.Deposits)
|
||||
require.NoError(t, err, "Process deposit failed")
|
||||
assert.Equal(t, uint64(1000+50), newState.Balances()[1], "Expected balance at index 1 to be 1050")
|
||||
}
|
||||
|
||||
func TestProcessDeposit_AddsNewValidatorDeposit(t *testing.T) {
|
||||
// Similar to TestProcessDeposits_AddsNewValidatorDeposit except that this test directly calls ProcessDeposit
|
||||
dep, _, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := util.DeterministicEth1Data(len(dep))
|
||||
require.NoError(t, err)
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, isNewValidator, err := blocks.ProcessDeposit(beaconState, dep[0], true)
|
||||
require.NoError(t, err, "Process deposit failed")
|
||||
assert.Equal(t, true, isNewValidator, "Expected isNewValidator to be true")
|
||||
assert.Equal(t, 2, len(newState.Validators()), "Expected validator list to have length 2")
|
||||
assert.Equal(t, 2, len(newState.Balances()), "Expected validator balances list to have length 2")
|
||||
if newState.Balances()[1] != dep[0].Data.Amount {
|
||||
t.Errorf(
|
||||
"Expected state validator balances index 1 to equal %d, received %d",
|
||||
dep[0].Data.Amount,
|
||||
newState.Balances()[1],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
// Same test settings as in TestProcessDeposit_AddsNewValidatorDeposit, except that we use an invalid signature
|
||||
dep, _, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
dt, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
root, err := dt.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
}
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, isNewValidator, err := blocks.ProcessDeposit(beaconState, dep[0], true)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
assert.Equal(t, false, isNewValidator, "Expected isNewValidator to be false")
|
||||
|
||||
if newState.Eth1DepositIndex() != 1 {
|
||||
t.Errorf(
|
||||
"Expected Eth1DepositIndex to be increased by 1 after processing an invalid deposit, received change: %v",
|
||||
newState.Eth1DepositIndex(),
|
||||
)
|
||||
}
|
||||
if len(newState.Validators()) != 1 {
|
||||
t.Errorf("Expected validator list to have length 1, received: %v", len(newState.Validators()))
|
||||
}
|
||||
if len(newState.Balances()) != 1 {
|
||||
t.Errorf("Expected validator balances list to have length 1, received: %v", len(newState.Balances()))
|
||||
}
|
||||
if newState.Balances()[0] != 0 {
|
||||
t.Errorf("Expected validator balance at index 0 to stay 0, received: %v", newState.Balances()[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
|
||||
|
||||
dep, _, err := util.DeterministicDepositsAndKeys(100)
|
||||
require.NoError(t, err)
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
dt, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := range dep {
|
||||
proof, err := dt.MerkleProof(i)
|
||||
require.NoError(t, err)
|
||||
dep[i].Proof = proof
|
||||
}
|
||||
root, err := dt.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
}
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := blocks.ProcessPreGenesisDeposits(context.Background(), beaconState, dep)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
_, ok := newState.ValidatorIndexByPubkey(bytesutil.ToBytes48(dep[0].Data.PublicKey))
|
||||
require.Equal(t, false, ok, "bad pubkey should not exist in state")
|
||||
|
||||
for i := 1; i < newState.NumValidators(); i++ {
|
||||
val, err := newState.ValidatorAtIndex(primitives.ValidatorIndex(i))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, val.EffectiveBalance, "unequal effective balance")
|
||||
require.Equal(t, primitives.Epoch(0), val.ActivationEpoch)
|
||||
require.Equal(t, primitives.Epoch(0), val.ActivationEligibilityEpoch)
|
||||
}
|
||||
if newState.Eth1DepositIndex() != 100 {
|
||||
t.Errorf(
|
||||
"Expected Eth1DepositIndex to be increased by 99 after processing an invalid deposit, received change: %v",
|
||||
newState.Eth1DepositIndex(),
|
||||
)
|
||||
}
|
||||
if len(newState.Validators()) != 100 {
|
||||
t.Errorf("Expected validator list to have length 100, received: %v", len(newState.Validators()))
|
||||
}
|
||||
if len(newState.Balances()) != 100 {
|
||||
t.Errorf("Expected validator balances list to have length 100, received: %v", len(newState.Balances()))
|
||||
}
|
||||
if newState.Balances()[0] != 0 {
|
||||
t.Errorf("Expected validator balance at index 0 to stay 0, received: %v", newState.Balances()[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDeposit_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(deposit.Data, bytesutil.ToBytes(3, 32))
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
deposit.Data.Signature = sig.Marshal()
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1, 2, 3},
|
||||
},
|
||||
{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: []byte{1},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0, 50}
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
BlockHash: root[:],
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, isNewValidator, err := blocks.ProcessDeposit(beaconState, deposit, true /*verifySignature*/)
|
||||
require.NoError(t, err, "Process deposit failed")
|
||||
assert.Equal(t, false, isNewValidator, "Expected isNewValidator to be false")
|
||||
assert.Equal(t, uint64(1000+50), newState.Balances()[1], "Expected balance at index 1 to be 1050")
|
||||
require.Equal(t, true, valid)
|
||||
}
|
||||
|
||||
@@ -21,14 +21,12 @@ go_library(
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
@@ -55,19 +53,14 @@ go_test(
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -5,12 +5,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -40,7 +35,7 @@ import (
|
||||
//
|
||||
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
|
||||
func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
@@ -68,7 +63,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
break
|
||||
}
|
||||
|
||||
if err := SwitchToCompoundingValidator(ctx, st, pc.TargetIndex); err != nil {
|
||||
if err := SwitchToCompoundingValidator(st, pc.TargetIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -91,168 +86,3 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessConsolidations implements the spec definition below. This method makes mutating calls to
|
||||
// the beacon state.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_consolidation(state: BeaconState, signed_consolidation: SignedConsolidation) -> None:
|
||||
// # If the pending consolidations queue is full, no consolidations are allowed in the block
|
||||
// assert len(state.pending_consolidations) < PENDING_CONSOLIDATIONS_LIMIT
|
||||
// # If there is too little available consolidation churn limit, no consolidations are allowed in the block
|
||||
// assert get_consolidation_churn_limit(state) > MIN_ACTIVATION_BALANCE
|
||||
// consolidation = signed_consolidation.message
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// assert consolidation.source_index != consolidation.target_index
|
||||
//
|
||||
// source_validator = state.validators[consolidation.source_index]
|
||||
// target_validator = state.validators[consolidation.target_index]
|
||||
// # Verify the source and the target are active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// assert is_active_validator(source_validator, current_epoch)
|
||||
// assert is_active_validator(target_validator, current_epoch)
|
||||
// # Verify exits for source and target have not been initiated
|
||||
// assert source_validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
// assert target_validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
// # Consolidations must specify an epoch when they become valid; they are not valid before then
|
||||
// assert current_epoch >= consolidation.epoch
|
||||
//
|
||||
// # Verify the source and the target have Execution layer withdrawal credentials
|
||||
// assert has_execution_withdrawal_credential(source_validator)
|
||||
// assert has_execution_withdrawal_credential(target_validator)
|
||||
// # Verify the same withdrawal address
|
||||
// assert source_validator.withdrawal_credentials[12:] == target_validator.withdrawal_credentials[12:]
|
||||
//
|
||||
// # Verify consolidation is signed by the source and the target
|
||||
// domain = compute_domain(DOMAIN_CONSOLIDATION, genesis_validators_root=state.genesis_validators_root)
|
||||
// signing_root = compute_signing_root(consolidation, domain)
|
||||
// pubkeys = [source_validator.pubkey, target_validator.pubkey]
|
||||
// assert bls.FastAggregateVerify(pubkeys, signing_root, signed_consolidation.signature)
|
||||
//
|
||||
// # Initiate source validator exit and append pending consolidation
|
||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||
// state, source_validator.effective_balance)
|
||||
// source_validator.withdrawable_epoch = Epoch(
|
||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// )
|
||||
// state.pending_consolidations.append(PendingConsolidation(
|
||||
// source_index=consolidation.source_index,
|
||||
// target_index=consolidation.target_index
|
||||
// ))
|
||||
func ProcessConsolidations(ctx context.Context, st state.BeaconState, cs []*ethpb.SignedConsolidation) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessConsolidations")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
if len(cs) == 0 {
|
||||
return nil // Nothing to process.
|
||||
}
|
||||
|
||||
domain, err := signing.ComputeDomain(
|
||||
params.BeaconConfig().DomainConsolidation,
|
||||
nil, // Use genesis fork version
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalBalance, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if helpers.ConsolidationChurnLimit(primitives.Gwei(totalBalance)) <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
return errors.New("too little available consolidation churn limit")
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
|
||||
for _, c := range cs {
|
||||
if c == nil || c.Message == nil {
|
||||
return errors.New("nil consolidation")
|
||||
}
|
||||
|
||||
if n, err := st.NumPendingConsolidations(); err != nil {
|
||||
return err
|
||||
} else if n >= params.BeaconConfig().PendingConsolidationsLimit {
|
||||
return errors.New("pending consolidations queue is full")
|
||||
}
|
||||
|
||||
if c.Message.SourceIndex == c.Message.TargetIndex {
|
||||
return errors.New("source and target index are the same")
|
||||
}
|
||||
source, err := st.ValidatorAtIndex(c.Message.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target, err := st.ValidatorAtIndex(c.Message.TargetIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !helpers.IsActiveValidator(source, currentEpoch) {
|
||||
return errors.New("source is not active")
|
||||
}
|
||||
if !helpers.IsActiveValidator(target, currentEpoch) {
|
||||
return errors.New("target is not active")
|
||||
}
|
||||
if source.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return errors.New("source exit epoch has been initiated")
|
||||
}
|
||||
if target.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return errors.New("target exit epoch has been initiated")
|
||||
}
|
||||
if currentEpoch < c.Message.Epoch {
|
||||
return errors.New("consolidation is not valid yet")
|
||||
}
|
||||
|
||||
if !helpers.HasExecutionWithdrawalCredentials(source) {
|
||||
return errors.New("source does not have execution withdrawal credentials")
|
||||
}
|
||||
if !helpers.HasExecutionWithdrawalCredentials(target) {
|
||||
return errors.New("target does not have execution withdrawal credentials")
|
||||
}
|
||||
if !helpers.IsSameWithdrawalCredentials(source, target) {
|
||||
return errors.New("source and target have different withdrawal credentials")
|
||||
}
|
||||
|
||||
sr, err := signing.ComputeSigningRoot(c.Message, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sourcePk, err := bls.PublicKeyFromBytes(source.PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert source public key bytes to bls public key")
|
||||
}
|
||||
targetPk, err := bls.PublicKeyFromBytes(target.PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert target public key bytes to bls public key")
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(c.Signature)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
if !sig.FastAggregateVerify([]bls.PublicKey{sourcePk, targetPk}, sr) {
|
||||
return errors.New("consolidation signature verification failed")
|
||||
}
|
||||
|
||||
sEE, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(source.EffectiveBalance))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
source.ExitEpoch = sEE
|
||||
source.WithdrawableEpoch = sEE + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
if err := st.UpdateValidatorAtIndex(c.Message.SourceIndex, source); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := st.AppendPendingConsolidation(c.Message.ToPendingConsolidation()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,19 +5,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/interop"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingConsolidations(t *testing.T) {
|
||||
@@ -237,205 +229,3 @@ func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func TestProcessConsolidations(t *testing.T) {
|
||||
secretKeys, publicKeys, err := interop.DeterministicallyGenerateKeys(0, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisValidatorRoot := bytesutil.PadTo([]byte("genesisValidatorRoot"), fieldparams.RootLength)
|
||||
|
||||
_ = secretKeys
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
scs []*eth.SignedConsolidation
|
||||
check func(*testing.T, state.BeaconState)
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "nil state",
|
||||
scs: make([]*eth.SignedConsolidation, 10),
|
||||
wantErr: "nil state",
|
||||
},
|
||||
{
|
||||
name: "nil consolidation in slice",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{nil, nil},
|
||||
wantErr: "nil consolidation",
|
||||
},
|
||||
{
|
||||
name: "state is 100% full of pending consolidations",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
pc := make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit)
|
||||
require.NoError(t, st.SetPendingConsolidations(pc))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{}}},
|
||||
wantErr: "pending consolidations queue is full",
|
||||
},
|
||||
{
|
||||
name: "state has too little consolidation churn limit available to process a consolidation",
|
||||
state: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{}}},
|
||||
wantErr: "too little available consolidation churn limit",
|
||||
},
|
||||
{
|
||||
name: "consolidation with source and target as the same index is rejected",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 100}}},
|
||||
wantErr: "source and target index are the same",
|
||||
},
|
||||
{
|
||||
name: "consolidation with inactive source is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
|
||||
wantErr: "source is not active",
|
||||
},
|
||||
{
|
||||
name: "consolidation with inactive target is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "target is not active",
|
||||
},
|
||||
{
|
||||
name: "consolidation with exiting source is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ExitEpoch = 256
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
|
||||
wantErr: "source exit epoch has been initiated",
|
||||
},
|
||||
{
|
||||
name: "consolidation with exiting target is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ExitEpoch = 256
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "target exit epoch has been initiated",
|
||||
},
|
||||
{
|
||||
name: "consolidation with future epoch is rejected",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25, Epoch: 55}}},
|
||||
wantErr: "consolidation is not valid yet",
|
||||
},
|
||||
{
|
||||
name: "source validator without withdrawal credentials is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.WithdrawalCredentials = []byte{}
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
|
||||
wantErr: "source does not have execution withdrawal credentials",
|
||||
},
|
||||
{
|
||||
name: "target validator without withdrawal credentials is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.WithdrawalCredentials = []byte{}
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "target does not have execution withdrawal credentials",
|
||||
},
|
||||
{
|
||||
name: "source and target with different withdrawal credentials is rejected",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "source and target have different withdrawal credentials",
|
||||
},
|
||||
{
|
||||
name: "consolidation with valid signatures is OK",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
require.NoError(t, st.SetGenesisValidatorsRoot(genesisValidatorRoot))
|
||||
source, err := st.ValidatorAtIndex(100)
|
||||
require.NoError(t, err)
|
||||
target, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
source.PublicKey = publicKeys[0].Marshal()
|
||||
source.WithdrawalCredentials = target.WithdrawalCredentials
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(100, source))
|
||||
target.PublicKey = publicKeys[1].Marshal()
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, target))
|
||||
return st
|
||||
}(),
|
||||
scs: func() []*eth.SignedConsolidation {
|
||||
sc := ð.SignedConsolidation{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25, Epoch: 8}}
|
||||
|
||||
domain, err := signing.ComputeDomain(
|
||||
params.BeaconConfig().DomainConsolidation,
|
||||
nil,
|
||||
genesisValidatorRoot,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(sc.Message, domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
sig0 := secretKeys[0].Sign(sr[:])
|
||||
sig1 := secretKeys[1].Sign(sr[:])
|
||||
|
||||
sc.Signature = blst.AggregateSignatures([]common.Signature{sig0, sig1}).Marshal()
|
||||
|
||||
return []*eth.SignedConsolidation{sc}
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
source, err := st.ValidatorAtIndex(100)
|
||||
require.NoError(t, err)
|
||||
// The consolidated validator is exiting.
|
||||
require.Equal(t, primitives.Epoch(15), source.ExitEpoch) // 15 = state.Epoch(10) + MIN_SEED_LOOKAHEAD(4) + 1
|
||||
require.Equal(t, primitives.Epoch(15+params.BeaconConfig().MinValidatorWithdrawabilityDelay), source.WithdrawableEpoch)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessConsolidations(context.TODO(), tt.state, tt.scs)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,25 +244,26 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7002]
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7002]
|
||||
ConsolidationRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7251]
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
@@ -295,14 +296,14 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
for _, index := range preActivationIndices {
|
||||
if err := helpers.QueueEntireBalanceAndResetValidator(post, index); err != nil {
|
||||
if err := QueueEntireBalanceAndResetValidator(post, index); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to queue entire balance and reset validator")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure early adopters of compounding credentials go through the activation churn
|
||||
for _, index := range compoundWithdrawalIndices {
|
||||
if err := helpers.QueueExcessActiveBalance(post, index); err != nil {
|
||||
if err := QueueExcessActiveBalance(post, index); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to queue excess active balance")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,23 +113,24 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
wdRoot, err := prevHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
wanted := &enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: prevHeader.ParentHash(),
|
||||
FeeRecipient: prevHeader.FeeRecipient(),
|
||||
StateRoot: prevHeader.StateRoot(),
|
||||
ReceiptsRoot: prevHeader.ReceiptsRoot(),
|
||||
LogsBloom: prevHeader.LogsBloom(),
|
||||
PrevRandao: prevHeader.PrevRandao(),
|
||||
BlockNumber: prevHeader.BlockNumber(),
|
||||
GasLimit: prevHeader.GasLimit(),
|
||||
GasUsed: prevHeader.GasUsed(),
|
||||
Timestamp: prevHeader.Timestamp(),
|
||||
ExtraData: prevHeader.ExtraData(),
|
||||
BaseFeePerGas: prevHeader.BaseFeePerGas(),
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0),
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0),
|
||||
ParentHash: prevHeader.ParentHash(),
|
||||
FeeRecipient: prevHeader.FeeRecipient(),
|
||||
StateRoot: prevHeader.StateRoot(),
|
||||
ReceiptsRoot: prevHeader.ReceiptsRoot(),
|
||||
LogsBloom: prevHeader.LogsBloom(),
|
||||
PrevRandao: prevHeader.PrevRandao(),
|
||||
BlockNumber: prevHeader.BlockNumber(),
|
||||
GasLimit: prevHeader.GasLimit(),
|
||||
GasUsed: prevHeader.GasUsed(),
|
||||
Timestamp: prevHeader.Timestamp(),
|
||||
ExtraData: prevHeader.ExtraData(),
|
||||
BaseFeePerGas: prevHeader.BaseFeePerGas(),
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0),
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0),
|
||||
ConsolidationRequestsRoot: bytesutil.Bytes32(0),
|
||||
}
|
||||
require.DeepEqual(t, wanted, protoHeader)
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
@@ -19,7 +18,7 @@ import (
|
||||
// if has_eth1_withdrawal_credential(validator):
|
||||
// validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
// queue_excess_active_balance(state, index)
|
||||
func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
func SwitchToCompoundingValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -32,12 +31,12 @@ func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return queueExcessActiveBalance(ctx, s, idx)
|
||||
return QueueExcessActiveBalance(s, idx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// queueExcessActiveBalance
|
||||
// QueueExcessActiveBalance
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
@@ -49,7 +48,7 @@ func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=excess_balance)
|
||||
// )
|
||||
func queueExcessActiveBalance(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -80,7 +79,7 @@ func queueExcessActiveBalance(ctx context.Context, s state.BeaconState, idx prim
|
||||
// )
|
||||
//
|
||||
//nolint:dupword
|
||||
func QueueEntireBalanceAndResetValidator(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -2,7 +2,6 @@ package electra_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
@@ -11,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
@@ -34,10 +34,10 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
})
|
||||
// Test that a validator with no withdrawal credentials cannot be switched to compounding.
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(context.TODO(), s, 0))
|
||||
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(s, 0))
|
||||
|
||||
// Test that a validator with withdrawal credentials can be switched to compounding.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(context.TODO(), s, 1))
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
|
||||
v, err := s.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.HasPrefix(v.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}), "withdrawal credentials were not updated")
|
||||
@@ -50,7 +50,7 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
|
||||
|
||||
// Test that a validator with excess balance can be switched to compounding, excess balance is queued.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(context.TODO(), s, 2))
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 2))
|
||||
b, err = s.BalanceAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
|
||||
@@ -74,7 +74,7 @@ func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, electra.QueueEntireBalanceAndResetValidator(context.TODO(), s, 0))
|
||||
require.NoError(t, electra.QueueEntireBalanceAndResetValidator(s, 0))
|
||||
b, err := s.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), b, "balance was not changed")
|
||||
@@ -88,3 +88,57 @@ func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+100_000, pbd[0].Amount, "pending balance deposit amount is incorrect")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), pbd[0].Index, "pending balance deposit index is incorrect")
|
||||
}
|
||||
|
||||
func TestSwitchToCompoundingValidator_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
vals := st.Validators()
|
||||
vals[0].WithdrawalCredentials = []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte}
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 1010
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(st, 0))
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1010), pbd[0].Amount) // appends it at the end
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
bytes.HasPrefix(val.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte})
|
||||
}
|
||||
|
||||
func TestQueueExcessActiveBalance_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
err := electra.QueueExcessActiveBalance(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount) // appends it at the end
|
||||
|
||||
bals = st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bals[0])
|
||||
}
|
||||
|
||||
func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
// need to manually set this to 0 as after 6110 these balances are now 0 and instead populates pending balance deposits
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance - 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
err := electra.QueueEntireBalanceAndResetValidator(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance-1000, pbd[0].Amount)
|
||||
bal, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), bal)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["epoch_processing.go"],
|
||||
srcs = [
|
||||
"epoch_processing.go",
|
||||
"sortable_indices.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
@@ -31,6 +34,7 @@ go_test(
|
||||
srcs = [
|
||||
"epoch_processing_fuzz_test.go",
|
||||
"epoch_processing_test.go",
|
||||
"sortable_indices_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -47,6 +51,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_google_go_cmp//cmp:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -24,27 +24,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// sortableIndices implements the Sort interface to sort newly activated validator indices
|
||||
// by activation epoch and by index number.
|
||||
type sortableIndices struct {
|
||||
indices []primitives.ValidatorIndex
|
||||
validators []*ethpb.Validator
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s sortableIndices) Len() int { return len(s.indices) }
|
||||
|
||||
// Swap swaps the elements with indexes i and j.
|
||||
func (s sortableIndices) Swap(i, j int) { s.indices[i], s.indices[j] = s.indices[j], s.indices[i] }
|
||||
|
||||
// Less reports whether the element with index i must sort before the element with index j.
|
||||
func (s sortableIndices) Less(i, j int) bool {
|
||||
if s.validators[s.indices[i]].ActivationEligibilityEpoch == s.validators[s.indices[j]].ActivationEligibilityEpoch {
|
||||
return s.indices[i] < s.indices[j]
|
||||
}
|
||||
return s.validators[s.indices[i]].ActivationEligibilityEpoch < s.validators[s.indices[j]].ActivationEligibilityEpoch
|
||||
}
|
||||
|
||||
// AttestingBalance returns the total balance from all the attesting indices.
|
||||
//
|
||||
// WARNING: This method allocates a new copy of the attesting validator indices set and is
|
||||
@@ -91,55 +70,78 @@ func AttestingBalance(ctx context.Context, state state.ReadOnlyBeaconState, atts
|
||||
// for index in activation_queue[:get_validator_churn_limit(state)]:
|
||||
// validator = state.validators[index]
|
||||
// validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
vals := state.Validators()
|
||||
func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
var err error
|
||||
ejectionBal := params.BeaconConfig().EjectionBalance
|
||||
activationEligibilityEpoch := time.CurrentEpoch(state) + 1
|
||||
for idx, validator := range vals {
|
||||
// Process the validators for activation eligibility.
|
||||
if helpers.IsEligibleForActivationQueue(validator, currentEpoch) {
|
||||
validator.ActivationEligibilityEpoch = activationEligibilityEpoch
|
||||
if err := state.UpdateValidatorAtIndex(primitives.ValidatorIndex(idx), validator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To avoid copying the state validator set via st.Validators(), we will perform a read only pass
|
||||
// over the validator set while collecting validator indices where the validator copy is actually
|
||||
// necessary, then we will process these operations.
|
||||
eligibleForActivationQ := make([]primitives.ValidatorIndex, 0)
|
||||
eligibleForActivation := make([]primitives.ValidatorIndex, 0)
|
||||
eligibleForEjection := make([]primitives.ValidatorIndex, 0)
|
||||
|
||||
if err := st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
// Collect validators eligible to enter the activation queue.
|
||||
if helpers.IsEligibleForActivationQueue(val, currentEpoch) {
|
||||
eligibleForActivationQ = append(eligibleForActivationQ, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
// Process the validators for ejection.
|
||||
isActive := helpers.IsActiveValidator(validator, currentEpoch)
|
||||
belowEjectionBalance := validator.EffectiveBalance <= ejectionBal
|
||||
// Collect validators to eject.
|
||||
isActive := helpers.IsActiveValidatorUsingTrie(val, currentEpoch)
|
||||
belowEjectionBalance := val.EffectiveBalance() <= ejectionBal
|
||||
if isActive && belowEjectionBalance {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(state)
|
||||
state, _, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx), maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
}
|
||||
eligibleForEjection = append(eligibleForEjection, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
// Collect validators eligible for activation and not yet dequeued for activation.
|
||||
if helpers.IsEligibleForActivationUsingTrie(st, val) {
|
||||
eligibleForActivation = append(eligibleForActivation, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return st, fmt.Errorf("failed to read validators: %w", err)
|
||||
}
|
||||
|
||||
// Process validators for activation eligibility.
|
||||
activationEligibilityEpoch := time.CurrentEpoch(st) + 1
|
||||
for _, idx := range eligibleForActivationQ {
|
||||
v, err := st.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.ActivationEligibilityEpoch = activationEligibilityEpoch
|
||||
if err := st.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Process validators eligible for ejection.
|
||||
for _, idx := range eligibleForEjection {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
// Queue validators eligible for activation and not yet dequeued for activation.
|
||||
var activationQ []primitives.ValidatorIndex
|
||||
for idx, validator := range vals {
|
||||
if helpers.IsEligibleForActivation(state, validator) {
|
||||
activationQ = append(activationQ, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(sortableIndices{indices: activationQ, validators: vals})
|
||||
sort.Sort(sortableIndices{indices: eligibleForActivation, state: st})
|
||||
|
||||
// Only activate just enough validators according to the activation churn limit.
|
||||
limit := uint64(len(activationQ))
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, state, currentEpoch)
|
||||
limit := uint64(len(eligibleForActivation))
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, st, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
|
||||
churnLimit := helpers.ValidatorActivationChurnLimit(activeValidatorCount)
|
||||
|
||||
if state.Version() >= version.Deneb {
|
||||
if st.Version() >= version.Deneb {
|
||||
churnLimit = helpers.ValidatorActivationChurnLimitDeneb(activeValidatorCount)
|
||||
}
|
||||
|
||||
@@ -149,17 +151,17 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
}
|
||||
|
||||
activationExitEpoch := helpers.ActivationExitEpoch(currentEpoch)
|
||||
for _, index := range activationQ[:limit] {
|
||||
validator, err := state.ValidatorAtIndex(index)
|
||||
for _, index := range eligibleForActivation[:limit] {
|
||||
validator, err := st.ValidatorAtIndex(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validator.ActivationEpoch = activationExitEpoch
|
||||
if err := state.UpdateValidatorAtIndex(index, validator); err != nil {
|
||||
if err := st.UpdateValidatorAtIndex(index, validator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// ProcessSlashings processes the slashed validators during epoch processing,
|
||||
|
||||
@@ -307,14 +307,15 @@ func TestProcessRegistryUpdates_NoRotation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
|
||||
finalizedEpoch := primitives.Epoch(4)
|
||||
base := ðpb.BeaconState{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 6, Root: make([]byte, fieldparams.RootLength)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
limit := helpers.ValidatorActivationChurnLimit(0)
|
||||
for i := uint64(0); i < limit+10; i++ {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
})
|
||||
@@ -325,7 +326,6 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
|
||||
newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
for i, validator := range newState.Validators() {
|
||||
assert.Equal(t, currentEpoch+1, validator.ActivationEligibilityEpoch, "Could not update registry %d, unexpected activation eligibility epoch", i)
|
||||
if uint64(i) < limit && validator.ActivationEpoch != helpers.ActivationExitEpoch(currentEpoch) {
|
||||
t.Errorf("Could not update registry %d, validators failed to activate: wanted activation epoch %d, got %d",
|
||||
i, helpers.ActivationExitEpoch(currentEpoch), validator.ActivationEpoch)
|
||||
@@ -338,9 +338,10 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
|
||||
finalizedEpoch := primitives.Epoch(4)
|
||||
base := ðpb.BeaconStateDeneb{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 6, Root: make([]byte, fieldparams.RootLength)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.MinPerEpochChurnLimit = 10
|
||||
@@ -349,7 +350,7 @@ func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
|
||||
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
})
|
||||
@@ -360,7 +361,6 @@ func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
|
||||
newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
for i, validator := range newState.Validators() {
|
||||
assert.Equal(t, currentEpoch+1, validator.ActivationEligibilityEpoch, "Could not update registry %d, unexpected activation eligibility epoch", i)
|
||||
// Note: In Deneb, only validators indices before `MaxPerEpochActivationChurnLimit` should be activated.
|
||||
if uint64(i) < params.BeaconConfig().MaxPerEpochActivationChurnLimit && validator.ActivationEpoch != helpers.ActivationExitEpoch(currentEpoch) {
|
||||
t.Errorf("Could not update registry %d, validators failed to activate: wanted activation epoch %d, got %d",
|
||||
|
||||
35
beacon-chain/core/epoch/sortable_indices.go
Normal file
35
beacon-chain/core/epoch/sortable_indices.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package epoch
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// sortableIndices implements the Sort interface to sort newly activated validator indices
|
||||
// by activation epoch and by index number.
|
||||
type sortableIndices struct {
|
||||
indices []primitives.ValidatorIndex
|
||||
state state.ReadOnlyValidators
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s sortableIndices) Len() int { return len(s.indices) }
|
||||
|
||||
// Swap swaps the elements with indexes i and j.
|
||||
func (s sortableIndices) Swap(i, j int) { s.indices[i], s.indices[j] = s.indices[j], s.indices[i] }
|
||||
|
||||
// Less reports whether the element with index i must sort before the element with index j.
|
||||
func (s sortableIndices) Less(i, j int) bool {
|
||||
vi, erri := s.state.ValidatorAtIndexReadOnly(s.indices[i])
|
||||
vj, errj := s.state.ValidatorAtIndexReadOnly(s.indices[j])
|
||||
|
||||
if erri != nil || errj != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
a, b := vi.ActivationEligibilityEpoch(), vj.ActivationEligibilityEpoch()
|
||||
if a == b {
|
||||
return s.indices[i] < s.indices[j]
|
||||
}
|
||||
return a < b
|
||||
}
|
||||
53
beacon-chain/core/epoch/sortable_indices_test.go
Normal file
53
beacon-chain/core/epoch/sortable_indices_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package epoch
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestSortableIndices(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoPhase0(ð.BeaconState{
|
||||
Validators: []*eth.Validator{
|
||||
{ActivationEligibilityEpoch: 0},
|
||||
{ActivationEligibilityEpoch: 5},
|
||||
{ActivationEligibilityEpoch: 4},
|
||||
{ActivationEligibilityEpoch: 4},
|
||||
{ActivationEligibilityEpoch: 2},
|
||||
{ActivationEligibilityEpoch: 1},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
s := sortableIndices{
|
||||
indices: []primitives.ValidatorIndex{
|
||||
4,
|
||||
2,
|
||||
5,
|
||||
3,
|
||||
1,
|
||||
0,
|
||||
},
|
||||
state: st,
|
||||
}
|
||||
|
||||
sort.Sort(s)
|
||||
|
||||
want := []primitives.ValidatorIndex{
|
||||
0,
|
||||
5,
|
||||
4,
|
||||
2, // Validators with the same ActivationEligibilityEpoch are sorted by index, ascending.
|
||||
3,
|
||||
1,
|
||||
}
|
||||
|
||||
if !cmp.Equal(s.indices, want) {
|
||||
t.Errorf("Failed to sort indices correctly, wanted %v, got %v", want, s.indices)
|
||||
}
|
||||
}
|
||||
@@ -79,6 +79,7 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
@@ -91,6 +92,14 @@ func IsAggregated(attestation ethpb.Att) bool {
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
func ComputeSubnetForAttestation(activeValCount uint64, att ethpb.Att) uint64 {
|
||||
if att.Version() >= version.Electra {
|
||||
committeeIndex := 0
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
if len(committeeIndices) > 0 {
|
||||
committeeIndex = committeeIndices[0]
|
||||
}
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, primitives.CommitteeIndex(committeeIndex), att.GetData().Slot)
|
||||
}
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.GetData().CommitteeIndex, att.GetData().Slot)
|
||||
}
|
||||
|
||||
|
||||
@@ -73,21 +73,37 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
att := ðpb.Attestation{
|
||||
AggregationBits: []byte{'A'},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 34,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: []byte{'C'},
|
||||
Source: nil,
|
||||
Target: nil,
|
||||
},
|
||||
Signature: []byte{'B'},
|
||||
}
|
||||
valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(att.Data.Slot))
|
||||
valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(34))
|
||||
require.NoError(t, err)
|
||||
sub := helpers.ComputeSubnetForAttestation(valCount, att)
|
||||
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
AggregationBits: []byte{'A'},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 34,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: []byte{'C'},
|
||||
},
|
||||
Signature: []byte{'B'},
|
||||
}
|
||||
sub := helpers.ComputeSubnetForAttestation(valCount, att)
|
||||
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(4, true)
|
||||
att := ðpb.AttestationElectra{
|
||||
AggregationBits: []byte{'A'},
|
||||
CommitteeBits: cb,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 34,
|
||||
BeaconBlockRoot: []byte{'C'},
|
||||
},
|
||||
Signature: []byte{'B'},
|
||||
}
|
||||
sub := helpers.ComputeSubnetForAttestation(valCount, att)
|
||||
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
|
||||
})
|
||||
}
|
||||
|
||||
func Test_ValidateAttestationTime(t *testing.T) {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -357,10 +358,11 @@ func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconSt
|
||||
// candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
|
||||
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
// effective_balance = state.validators[candidate_index].effective_balance
|
||||
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte:
|
||||
// return candidate_index
|
||||
// i += 1
|
||||
func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []primitives.ValidatorIndex, seed [32]byte) (primitives.ValidatorIndex, error) {
|
||||
func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, seed [32]byte) (primitives.ValidatorIndex, error) {
|
||||
length := uint64(len(activeIndices))
|
||||
if length == 0 {
|
||||
return 0, errors.New("empty active indices list")
|
||||
@@ -385,7 +387,12 @@ func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []primi
|
||||
}
|
||||
effectiveBal := v.EffectiveBalance()
|
||||
|
||||
if effectiveBal*maxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
|
||||
maxEB := params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
if bState.Version() < version.Electra {
|
||||
maxEB = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
if effectiveBal*maxRandomByte >= maxEB*uint64(randomByte) {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
}
|
||||
@@ -404,11 +411,11 @@ func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []primi
|
||||
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
|
||||
// and validator.effective_balance >= MIN_ACTIVATION_BALANCE # [Modified in Electra:EIP7251]
|
||||
// )
|
||||
func IsEligibleForActivationQueue(validator *ethpb.Validator, currentEpoch primitives.Epoch) bool {
|
||||
func IsEligibleForActivationQueue(validator state.ReadOnlyValidator, currentEpoch primitives.Epoch) bool {
|
||||
if currentEpoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return isEligibleForActivationQueueElectra(validator.ActivationEligibilityEpoch, validator.EffectiveBalance)
|
||||
return isEligibleForActivationQueueElectra(validator.ActivationEligibilityEpoch(), validator.EffectiveBalance())
|
||||
}
|
||||
return isEligibleForActivationQueue(validator.ActivationEligibilityEpoch, validator.EffectiveBalance)
|
||||
return isEligibleForActivationQueue(validator.ActivationEligibilityEpoch(), validator.EffectiveBalance())
|
||||
}
|
||||
|
||||
// isEligibleForActivationQueue carries out the logic for IsEligibleForActivationQueue
|
||||
@@ -674,68 +681,3 @@ func ValidatorMaxEffectiveBalance(val *ethpb.Validator) uint64 {
|
||||
}
|
||||
return params.BeaconConfig().MinActivationBalance
|
||||
}
|
||||
|
||||
// QueueExcessActiveBalance queues validators with balances above the min activation balance and adds to pending balance deposit.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// if balance > MIN_ACTIVATION_BALANCE:
|
||||
// excess_balance = balance - MIN_ACTIVATION_BALANCE
|
||||
// state.balances[index] = MIN_ACTIVATION_BALANCE
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=excess_balance)
|
||||
// )
|
||||
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bal > params.BeaconConfig().MinActivationBalance {
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
if err := s.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.AppendPendingBalanceDeposit(idx, excessBalance)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueEntireBalanceAndResetValidator queues the entire balance and resets the validator. This is used in electra fork logic.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// validator = state.validators[index]
|
||||
// state.balances[index] = 0
|
||||
// validator.effective_balance = 0
|
||||
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=balance)
|
||||
// )
|
||||
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.UpdateBalancesAtIndex(idx, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v.EffectiveBalance = 0
|
||||
v.ActivationEligibilityEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.AppendPendingBalanceDeposit(idx, bal)
|
||||
}
|
||||
|
||||
@@ -16,9 +16,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestIsActiveValidator_OK(t *testing.T) {
|
||||
@@ -596,6 +596,7 @@ func TestComputeProposerIndex(t *testing.T) {
|
||||
validators []*ethpb.Validator
|
||||
indices []primitives.ValidatorIndex
|
||||
seed [32]byte
|
||||
minVersion int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -683,21 +684,81 @@ func TestComputeProposerIndex(t *testing.T) {
|
||||
},
|
||||
want: 7,
|
||||
},
|
||||
{
|
||||
name: "all_active_indices_above_min_activation_balance",
|
||||
args: args{
|
||||
validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
},
|
||||
indices: []primitives.ValidatorIndex{0, 1, 2, 3, 4},
|
||||
seed: seed,
|
||||
minVersion: version.Electra,
|
||||
},
|
||||
want: 2,
|
||||
},
|
||||
{
|
||||
name: "second_half_active_above_min_activation_balance",
|
||||
args: args{
|
||||
validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
},
|
||||
indices: []primitives.ValidatorIndex{5, 6, 7, 8, 9},
|
||||
seed: seed,
|
||||
minVersion: version.Electra,
|
||||
},
|
||||
want: 7,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
t.Run("phase0", func(t *testing.T) {
|
||||
if tt.args.minVersion > version.Phase0 {
|
||||
t.Skip("scenario does not apply to phase0")
|
||||
}
|
||||
helpers.ClearCache()
|
||||
bState := ðpb.BeaconState{Validators: tt.args.validators}
|
||||
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
|
||||
require.NoError(t, err)
|
||||
got, err := helpers.ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
return
|
||||
}
|
||||
assert.NoError(t, err, "received unexpected error")
|
||||
assert.Equal(t, tt.want, got, "ComputeProposerIndex()")
|
||||
})
|
||||
t.Run("electra", func(t *testing.T) {
|
||||
if tt.args.minVersion > version.Electra {
|
||||
t.Skip("scenario does not apply to electra")
|
||||
}
|
||||
helpers.ClearCache()
|
||||
bState := ðpb.BeaconStateElectra{Validators: tt.args.validators}
|
||||
stTrie, err := state_native.InitializeFromProtoUnsafeElectra(bState)
|
||||
require.NoError(t, err)
|
||||
|
||||
bState := ðpb.BeaconState{Validators: tt.args.validators}
|
||||
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
|
||||
require.NoError(t, err)
|
||||
got, err := helpers.ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
return
|
||||
}
|
||||
assert.NoError(t, err, "received unexpected error")
|
||||
assert.Equal(t, tt.want, got, "ComputeProposerIndex()")
|
||||
require.Equal(t, stTrie.Version(), version.Electra)
|
||||
|
||||
got, err := helpers.ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
return
|
||||
}
|
||||
assert.NoError(t, err, "received unexpected error")
|
||||
assert.Equal(t, tt.want, got, "ComputeProposerIndex()")
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -744,7 +805,9 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(tt.validator, tt.currentEpoch), "IsEligibleForActivationQueue()")
|
||||
v, err := state_native.NewValidator(tt.validator)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(v, tt.currentEpoch), "IsEligibleForActivationQueue()")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1120,40 +1183,3 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
// Sanity check that MinActivationBalance equals (pre-electra) MaxEffectiveBalance
|
||||
assert.Equal(t, params.BeaconConfig().MinActivationBalance, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
|
||||
func TestQueueExcessActiveBalance_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
err := helpers.QueueExcessActiveBalance(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
|
||||
bals = st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bals[0])
|
||||
}
|
||||
|
||||
func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, val.EffectiveBalance)
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pbd))
|
||||
err = helpers.QueueEntireBalanceAndResetValidator(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err = st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
|
||||
val, err = st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), val.EffectiveBalance)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
@@ -71,7 +70,7 @@ func GenesisBeaconStateBellatrix(ctx context.Context, deposits []*ethpb.Deposit,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st, err = b.ProcessPreGenesisDeposits(ctx, st, deposits)
|
||||
st, err = altair.ProcessPreGenesisDeposits(ctx, st, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process validator deposits")
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
b "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
@@ -69,7 +69,7 @@ func GenesisBeaconState(ctx context.Context, deposits []*ethpb.Deposit, genesisT
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st, err = b.ProcessPreGenesisDeposits(ctx, st, deposits)
|
||||
st, err = altair.ProcessPreGenesisDeposits(ctx, st, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process validator deposits")
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func PreminedGenesisBeaconState(ctx context.Context, deposits []*ethpb.Deposit,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err = b.ProcessPreGenesisDeposits(ctx, st, deposits)
|
||||
st, err = altair.ProcessPreGenesisDeposits(ctx, st, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process validator deposits")
|
||||
}
|
||||
|
||||
@@ -432,12 +432,7 @@ func electraOperations(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := block.Body()
|
||||
bod, ok := b.(interfaces.ROBlockBodyElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("could not cast block body to electra block body")
|
||||
}
|
||||
e, err := bod.Execution()
|
||||
e, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution data from block")
|
||||
}
|
||||
@@ -455,9 +450,8 @@ func electraOperations(
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
}
|
||||
|
||||
if err := electra.ProcessConsolidations(ctx, st, bod.Consolidations()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process consolidations")
|
||||
}
|
||||
// TODO: Process consolidations from execution header.
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -505,7 +499,7 @@ func phase0Operations(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attestations")
|
||||
}
|
||||
if _, err := b.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposits")
|
||||
}
|
||||
return b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
|
||||
|
||||
@@ -132,16 +132,6 @@ var blockTests = []struct {
|
||||
b.Block.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
b.Block.Body.Consolidations = []*ethpb.SignedConsolidation{
|
||||
{
|
||||
Message: ðpb.Consolidation{
|
||||
SourceIndex: 1,
|
||||
TargetIndex: 2,
|
||||
Epoch: 3,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
@@ -153,16 +143,6 @@ var blockTests = []struct {
|
||||
b.Message.Slot = slot
|
||||
if root != nil {
|
||||
b.Message.ParentRoot = root
|
||||
b.Message.Body.Consolidations = []*ethpb.SignedConsolidation{
|
||||
{
|
||||
Message: ðpb.Consolidation{
|
||||
SourceIndex: 1,
|
||||
TargetIndex: 2,
|
||||
Epoch: 3,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
}},
|
||||
|
||||
@@ -138,19 +138,20 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
p, err := blocks.WrappedExecutionPayloadHeaderElectra(&enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: []byte("foo"),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
DepositRequestsRoot: make([]byte, 32),
|
||||
WithdrawalRequestsRoot: make([]byte, 32),
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: []byte("foo"),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
DepositRequestsRoot: make([]byte, 32),
|
||||
WithdrawalRequestsRoot: make([]byte, 32),
|
||||
ConsolidationRequestsRoot: make([]byte, 32),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
|
||||
|
||||
@@ -27,7 +27,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -28,7 +28,8 @@ func (s *Service) processDeposit(ctx context.Context, eth1Data *ethpb.Eth1Data,
|
||||
if err := s.preGenesisState.SetEth1Data(eth1Data); err != nil {
|
||||
return err
|
||||
}
|
||||
beaconState, err := blocks.ProcessPreGenesisDeposits(ctx, s.preGenesisState, []*ethpb.Deposit{deposit})
|
||||
// preGenesisState is always a genesis state ( phase 0 ) and so state version does not need to be checked here for post electra deposit processing
|
||||
beaconState, err := altair.ProcessPreGenesisDeposits(ctx, s.preGenesisState, []*ethpb.Deposit{deposit})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process pre-genesis deposits")
|
||||
}
|
||||
|
||||
@@ -69,7 +69,6 @@ go_library(
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -197,7 +196,3 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
|
||||
return params.SetActive(c)
|
||||
}
|
||||
|
||||
func configureFastSSZHashingAlgorithm() {
|
||||
fastssz.EnableVectorizedHTR = true
|
||||
}
|
||||
|
||||
@@ -277,8 +277,6 @@ func configureBeacon(cliCtx *cli.Context) error {
|
||||
return errors.Wrap(err, "could not configure execution setting")
|
||||
}
|
||||
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -21,12 +21,13 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -16,9 +16,10 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
@@ -39,14 +40,15 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -9,7 +9,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -32,28 +34,28 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAtts")
|
||||
defer span.End()
|
||||
|
||||
attsByDataRoot := make(map[[32]byte][]ethpb.Att, len(unaggregatedAtts))
|
||||
attsByVerAndDataRoot := make(map[attestation.Id][]ethpb.Att, len(unaggregatedAtts))
|
||||
for _, att := range unaggregatedAtts {
|
||||
attDataRoot, err := att.GetData().HashTreeRoot()
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
attsByDataRoot[attDataRoot] = append(attsByDataRoot[attDataRoot], att)
|
||||
attsByVerAndDataRoot[id] = append(attsByVerAndDataRoot[id], att)
|
||||
}
|
||||
|
||||
// Aggregate unaggregated attestations from the pool and save them in the pool.
|
||||
// Track the unaggregated attestations that aren't able to aggregate.
|
||||
leftOverUnaggregatedAtt := make(map[[32]byte]bool)
|
||||
leftOverUnaggregatedAtt := make(map[attestation.Id]bool)
|
||||
|
||||
leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt)
|
||||
leftOverUnaggregatedAtt = c.aggregateParallel(attsByVerAndDataRoot, leftOverUnaggregatedAtt)
|
||||
|
||||
// Remove the unaggregated attestations from the pool that were successfully aggregated.
|
||||
for _, att := range unaggregatedAtts {
|
||||
h, err := hashFn(att)
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
if leftOverUnaggregatedAtt[h] {
|
||||
if leftOverUnaggregatedAtt[id] {
|
||||
continue
|
||||
}
|
||||
if err := c.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
@@ -66,7 +68,7 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
|
||||
// aggregateParallel aggregates attestations in parallel for `atts` and saves them in the pool,
|
||||
// returns the unaggregated attestations that weren't able to aggregate.
|
||||
// Given `n` CPU cores, it creates a channel of size `n` and spawns `n` goroutines to aggregate attestations
|
||||
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]ethpb.Att, leftOver map[[32]byte]bool) map[[32]byte]bool {
|
||||
func (c *AttCaches) aggregateParallel(atts map[attestation.Id][]ethpb.Att, leftOver map[attestation.Id]bool) map[attestation.Id]bool {
|
||||
var leftoverLock sync.Mutex
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
@@ -92,13 +94,13 @@ func (c *AttCaches) aggregateParallel(atts map[[32]byte][]ethpb.Att, leftOver ma
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
h, err := hashFn(aggregated)
|
||||
id, err := attestation.NewId(aggregated, attestation.Full)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not hash attestation")
|
||||
log.WithError(err).Error("Could not create attestation ID")
|
||||
continue
|
||||
}
|
||||
leftoverLock.Lock()
|
||||
leftOver[h] = true
|
||||
leftOver[id] = true
|
||||
leftoverLock.Unlock()
|
||||
}
|
||||
}
|
||||
@@ -139,17 +141,18 @@ func (c *AttCaches) SaveAggregatedAttestation(att ethpb.Att) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
r, err := hashFn(att.GetData())
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
copiedAtt := att.Copy()
|
||||
|
||||
c.aggregatedAttLock.Lock()
|
||||
defer c.aggregatedAttLock.Unlock()
|
||||
atts, ok := c.aggregatedAtt[r]
|
||||
atts, ok := c.aggregatedAtt[id]
|
||||
if !ok {
|
||||
atts := []ethpb.Att{copiedAtt}
|
||||
c.aggregatedAtt[r] = atts
|
||||
c.aggregatedAtt[id] = atts
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -157,7 +160,7 @@ func (c *AttCaches) SaveAggregatedAttestation(att ethpb.Att) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.aggregatedAtt[r] = atts
|
||||
c.aggregatedAtt[id] = atts
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -191,17 +194,56 @@ func (c *AttCaches) AggregatedAttestations() []ethpb.Att {
|
||||
|
||||
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att {
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndex(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) []*ethpb.Attestation {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]ethpb.Att, 0)
|
||||
atts := make([]*ethpb.Attestation, 0)
|
||||
|
||||
c.aggregatedAttLock.RLock()
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
for _, a := range c.aggregatedAtt {
|
||||
if slot == a[0].GetData().Slot && committeeIndex == a[0].GetData().CommitteeIndex {
|
||||
atts = append(atts, a...)
|
||||
for _, as := range c.aggregatedAtt {
|
||||
if as[0].Version() == version.Phase0 && slot == as[0].GetData().Slot && committeeIndex == as[0].GetData().CommitteeIndex {
|
||||
for _, a := range as {
|
||||
att, ok := a.(*ethpb.Attestation)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if ok {
|
||||
atts = append(atts, att)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return atts
|
||||
}
|
||||
|
||||
// AggregatedAttestationsBySlotIndexElectra returns the aggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndexElectra(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) []*ethpb.AttestationElectra {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndexElectra")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]*ethpb.AttestationElectra, 0)
|
||||
|
||||
c.aggregatedAttLock.RLock()
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
for _, as := range c.aggregatedAtt {
|
||||
if as[0].Version() == version.Electra && slot == as[0].GetData().Slot && as[0].CommitteeBitsVal().BitAt(uint64(committeeIndex)) {
|
||||
for _, a := range as {
|
||||
att, ok := a.(*ethpb.AttestationElectra)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if ok {
|
||||
atts = append(atts, att)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,18 +258,19 @@ func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
|
||||
if !helpers.IsAggregated(att) {
|
||||
return errors.New("attestation is not aggregated")
|
||||
}
|
||||
r, err := hashFn(att.GetData())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation data")
|
||||
}
|
||||
|
||||
if err := c.insertSeenBit(att); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.aggregatedAttLock.Lock()
|
||||
defer c.aggregatedAttLock.Unlock()
|
||||
attList, ok := c.aggregatedAtt[r]
|
||||
attList, ok := c.aggregatedAtt[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
@@ -241,9 +284,9 @@ func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
|
||||
}
|
||||
}
|
||||
if len(filtered) == 0 {
|
||||
delete(c.aggregatedAtt, r)
|
||||
delete(c.aggregatedAtt, id)
|
||||
} else {
|
||||
c.aggregatedAtt[r] = filtered
|
||||
c.aggregatedAtt[id] = filtered
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -254,14 +297,15 @@ func (c *AttCaches) HasAggregatedAttestation(att ethpb.Att) (bool, error) {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return false, err
|
||||
}
|
||||
r, err := hashFn(att.GetData())
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not tree hash attestation")
|
||||
return false, errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.aggregatedAttLock.RLock()
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
if atts, ok := c.aggregatedAtt[r]; ok {
|
||||
if atts, ok := c.aggregatedAtt[id]; ok {
|
||||
for _, a := range atts {
|
||||
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
|
||||
return false, err
|
||||
@@ -273,7 +317,7 @@ func (c *AttCaches) HasAggregatedAttestation(att ethpb.Att) (bool, error) {
|
||||
|
||||
c.blockAttLock.RLock()
|
||||
defer c.blockAttLock.RUnlock()
|
||||
if atts, ok := c.blockAtt[r]; ok {
|
||||
if atts, ok := c.blockAtt[id]; ok {
|
||||
for _, a := range atts {
|
||||
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
|
||||
return false, err
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
|
||||
c "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -69,7 +70,7 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b10111},
|
||||
},
|
||||
wantErrString: "could not tree hash attestation: --.BeaconBlockRoot (" + fssz.ErrBytesLength.Error() + ")",
|
||||
wantErrString: "could not create attestation ID",
|
||||
},
|
||||
{
|
||||
name: "already seen",
|
||||
@@ -92,15 +93,13 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
count: 1,
|
||||
},
|
||||
}
|
||||
r, err := hashFn(util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 100,
|
||||
}))
|
||||
id, err := attestation.NewId(util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100}}), attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
cache.seenAtt.Set(string(r[:]), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
|
||||
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
|
||||
assert.Equal(t, 0, len(cache.unAggregatedAtt), "Invalid start pool, atts: %d", len(cache.unAggregatedAtt))
|
||||
|
||||
err := cache.SaveAggregatedAttestation(tt.att)
|
||||
@@ -230,7 +229,7 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
err := cache.DeleteAggregatedAttestation(att)
|
||||
wantErr := "could not tree hash attestation data: --.BeaconBlockRoot (" + fssz.ErrBytesLength.Error() + ")"
|
||||
wantErr := "could not create attestation ID"
|
||||
assert.ErrorContains(t, wantErr, err)
|
||||
})
|
||||
|
||||
@@ -500,3 +499,49 @@ func TestKV_Aggregated_DuplicateAggregatedAttestations(t *testing.T) {
|
||||
assert.DeepSSZEqual(t, att2, returned[0], "Did not receive correct aggregated atts")
|
||||
assert.Equal(t, 1, len(returned), "Did not receive correct aggregated atts")
|
||||
}
|
||||
|
||||
func TestKV_Aggregated_AggregatedAttestationsBySlotIndex(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1011}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveAggregatedAttestation(att))
|
||||
}
|
||||
ctx := context.Background()
|
||||
returned := cache.AggregatedAttestationsBySlotIndex(ctx, 1, 1)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att1}, returned)
|
||||
returned = cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att2}, returned)
|
||||
returned = cache.AggregatedAttestationsBySlotIndex(ctx, 2, 1)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att3}, returned)
|
||||
}
|
||||
|
||||
func TestKV_Aggregated_AggregatedAttestationsBySlotIndexElectra(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
committeeBits := primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(1, true)
|
||||
att1 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1011}, CommitteeBits: committeeBits})
|
||||
committeeBits = primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(2, true)
|
||||
att2 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}, CommitteeBits: committeeBits})
|
||||
committeeBits = primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(1, true)
|
||||
att3 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}, CommitteeBits: committeeBits})
|
||||
atts := []*ethpb.AttestationElectra{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveAggregatedAttestation(att))
|
||||
}
|
||||
ctx := context.Background()
|
||||
returned := cache.AggregatedAttestationsBySlotIndexElectra(ctx, 1, 1)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att1}, returned)
|
||||
returned = cache.AggregatedAttestationsBySlotIndexElectra(ctx, 1, 2)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att2}, returned)
|
||||
returned = cache.AggregatedAttestationsBySlotIndexElectra(ctx, 2, 1)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att3}, returned)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package kv
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
// SaveBlockAttestation saves an block attestation in cache.
|
||||
@@ -10,14 +11,15 @@ func (c *AttCaches) SaveBlockAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
r, err := hashFn(att.GetData())
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.blockAttLock.Lock()
|
||||
defer c.blockAttLock.Unlock()
|
||||
atts, ok := c.blockAtt[r]
|
||||
atts, ok := c.blockAtt[id]
|
||||
if !ok {
|
||||
atts = make([]ethpb.Att, 0, 1)
|
||||
}
|
||||
@@ -31,7 +33,7 @@ func (c *AttCaches) SaveBlockAttestation(att ethpb.Att) error {
|
||||
}
|
||||
}
|
||||
|
||||
c.blockAtt[r] = append(atts, att.Copy())
|
||||
c.blockAtt[id] = append(atts, att.Copy())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -54,14 +56,15 @@ func (c *AttCaches) DeleteBlockAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
r, err := hashFn(att.GetData())
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.blockAttLock.Lock()
|
||||
defer c.blockAttLock.Unlock()
|
||||
delete(c.blockAtt, r)
|
||||
delete(c.blockAtt, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package kv
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
|
||||
@@ -10,14 +11,15 @@ func (c *AttCaches) SaveForkchoiceAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
r, err := hashFn(att)
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.forkchoiceAttLock.Lock()
|
||||
defer c.forkchoiceAttLock.Unlock()
|
||||
c.forkchoiceAtt[r] = att.Copy()
|
||||
c.forkchoiceAtt[id] = att
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -51,14 +53,15 @@ func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
r, err := hashFn(att)
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.forkchoiceAttLock.Lock()
|
||||
defer c.forkchoiceAttLock.Unlock()
|
||||
delete(c.forkchoiceAtt, r)
|
||||
delete(c.forkchoiceAtt, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,24 +9,22 @@ import (
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
var hashFn = hash.Proto
|
||||
|
||||
// AttCaches defines the caches used to satisfy attestation pool interface.
|
||||
// These caches are KV store for various attestations
|
||||
// such are unaggregated, aggregated or attestations within a block.
|
||||
type AttCaches struct {
|
||||
aggregatedAttLock sync.RWMutex
|
||||
aggregatedAtt map[[32]byte][]ethpb.Att
|
||||
aggregatedAtt map[attestation.Id][]ethpb.Att
|
||||
unAggregateAttLock sync.RWMutex
|
||||
unAggregatedAtt map[[32]byte]ethpb.Att
|
||||
unAggregatedAtt map[attestation.Id]ethpb.Att
|
||||
forkchoiceAttLock sync.RWMutex
|
||||
forkchoiceAtt map[[32]byte]ethpb.Att
|
||||
forkchoiceAtt map[attestation.Id]ethpb.Att
|
||||
blockAttLock sync.RWMutex
|
||||
blockAtt map[[32]byte][]ethpb.Att
|
||||
blockAtt map[attestation.Id][]ethpb.Att
|
||||
seenAtt *cache.Cache
|
||||
}
|
||||
|
||||
@@ -34,12 +32,12 @@ type AttCaches struct {
|
||||
// various kind of attestations.
|
||||
func NewAttCaches() *AttCaches {
|
||||
secsInEpoch := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
c := cache.New(secsInEpoch*time.Second, 2*secsInEpoch*time.Second)
|
||||
c := cache.New(2*secsInEpoch*time.Second, 2*secsInEpoch*time.Second)
|
||||
pool := &AttCaches{
|
||||
unAggregatedAtt: make(map[[32]byte]ethpb.Att),
|
||||
aggregatedAtt: make(map[[32]byte][]ethpb.Att),
|
||||
forkchoiceAtt: make(map[[32]byte]ethpb.Att),
|
||||
blockAtt: make(map[[32]byte][]ethpb.Att),
|
||||
unAggregatedAtt: make(map[attestation.Id]ethpb.Att),
|
||||
aggregatedAtt: make(map[attestation.Id][]ethpb.Att),
|
||||
forkchoiceAtt: make(map[attestation.Id]ethpb.Att),
|
||||
blockAtt: make(map[attestation.Id][]ethpb.Att),
|
||||
seenAtt: c,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,21 +1,20 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
func (c *AttCaches) insertSeenBit(att ethpb.Att) error {
|
||||
r, err := hashFn(att.GetData())
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
v, ok := c.seenAtt.Get(string(r[:]))
|
||||
v, ok := c.seenAtt.Get(id.String())
|
||||
if ok {
|
||||
seenBits, ok := v.([]bitfield.Bitlist)
|
||||
if !ok {
|
||||
@@ -24,7 +23,7 @@ func (c *AttCaches) insertSeenBit(att ethpb.Att) error {
|
||||
alreadyExists := false
|
||||
for _, bit := range seenBits {
|
||||
if c, err := bit.Contains(att.GetAggregationBits()); err != nil {
|
||||
return fmt.Errorf("failed to check seen bits on attestation when inserting bit: %w", err)
|
||||
return err
|
||||
} else if c {
|
||||
alreadyExists = true
|
||||
break
|
||||
@@ -33,21 +32,21 @@ func (c *AttCaches) insertSeenBit(att ethpb.Att) error {
|
||||
if !alreadyExists {
|
||||
seenBits = append(seenBits, att.GetAggregationBits())
|
||||
}
|
||||
c.seenAtt.Set(string(r[:]), seenBits, cache.DefaultExpiration /* one epoch */)
|
||||
c.seenAtt.Set(id.String(), seenBits, cache.DefaultExpiration /* one epoch */)
|
||||
return nil
|
||||
}
|
||||
|
||||
c.seenAtt.Set(string(r[:]), []bitfield.Bitlist{att.GetAggregationBits()}, cache.DefaultExpiration /* one epoch */)
|
||||
c.seenAtt.Set(id.String(), []bitfield.Bitlist{att.GetAggregationBits()}, cache.DefaultExpiration /* one epoch */)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AttCaches) hasSeenBit(att ethpb.Att) (bool, error) {
|
||||
r, err := hashFn(att.GetData())
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
v, ok := c.seenAtt.Get(string(r[:]))
|
||||
v, ok := c.seenAtt.Get(id.String())
|
||||
if ok {
|
||||
seenBits, ok := v.([]bitfield.Bitlist)
|
||||
if !ok {
|
||||
@@ -55,7 +54,7 @@ func (c *AttCaches) hasSeenBit(att ethpb.Att) (bool, error) {
|
||||
}
|
||||
for _, bit := range seenBits {
|
||||
if c, err := bit.Contains(att.GetAggregationBits()); err != nil {
|
||||
return false, fmt.Errorf("failed to check seen bits on attestation when reading bit: %w", err)
|
||||
return false, err
|
||||
} else if c {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
@@ -39,18 +40,18 @@ func TestAttCaches_hasSeenBit(t *testing.T) {
|
||||
func TestAttCaches_insertSeenBitDuplicates(t *testing.T) {
|
||||
c := NewAttCaches()
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000011}})
|
||||
r, err := hashFn(att1.Data)
|
||||
id, err := attestation.NewId(att1, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.insertSeenBit(att1))
|
||||
require.Equal(t, 1, c.seenAtt.ItemCount())
|
||||
|
||||
_, expirationTime1, ok := c.seenAtt.GetWithExpiration(string(r[:]))
|
||||
_, expirationTime1, ok := c.seenAtt.GetWithExpiration(id.String())
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// Make sure that duplicates are not inserted, but expiration time gets updated.
|
||||
require.NoError(t, c.insertSeenBit(att1))
|
||||
require.Equal(t, 1, c.seenAtt.ItemCount())
|
||||
_, expirationprysmTime, ok := c.seenAtt.GetWithExpiration(string(r[:]))
|
||||
_, expirationprysmTime, ok := c.seenAtt.GetWithExpiration(id.String())
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, expirationprysmTime.After(expirationTime1), "Expiration time is not updated")
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -27,13 +29,14 @@ func (c *AttCaches) SaveUnaggregatedAttestation(att ethpb.Att) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
r, err := hashFn(att)
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.unAggregateAttLock.Lock()
|
||||
defer c.unAggregateAttLock.Unlock()
|
||||
c.unAggregatedAtt[r] = att.Copy()
|
||||
c.unAggregatedAtt[id] = att
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -69,19 +72,56 @@ func (c *AttCaches) UnaggregatedAttestations() ([]ethpb.Att, error) {
|
||||
|
||||
// UnaggregatedAttestationsBySlotIndex returns the unaggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att {
|
||||
func (c *AttCaches) UnaggregatedAttestationsBySlotIndex(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) []*ethpb.Attestation {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.UnaggregatedAttestationsBySlotIndex")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]ethpb.Att, 0)
|
||||
atts := make([]*ethpb.Attestation, 0)
|
||||
|
||||
c.unAggregateAttLock.RLock()
|
||||
defer c.unAggregateAttLock.RUnlock()
|
||||
|
||||
unAggregatedAtts := c.unAggregatedAtt
|
||||
for _, a := range unAggregatedAtts {
|
||||
if slot == a.GetData().Slot && committeeIndex == a.GetData().CommitteeIndex {
|
||||
atts = append(atts, a)
|
||||
if a.Version() == version.Phase0 && slot == a.GetData().Slot && committeeIndex == a.GetData().CommitteeIndex {
|
||||
att, ok := a.(*ethpb.Attestation)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if ok {
|
||||
atts = append(atts, att)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return atts
|
||||
}
|
||||
|
||||
// UnaggregatedAttestationsBySlotIndexElectra returns the unaggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) UnaggregatedAttestationsBySlotIndexElectra(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) []*ethpb.AttestationElectra {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.UnaggregatedAttestationsBySlotIndexElectra")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]*ethpb.AttestationElectra, 0)
|
||||
|
||||
c.unAggregateAttLock.RLock()
|
||||
defer c.unAggregateAttLock.RUnlock()
|
||||
|
||||
unAggregatedAtts := c.unAggregatedAtt
|
||||
for _, a := range unAggregatedAtts {
|
||||
if a.Version() == version.Electra && slot == a.GetData().Slot && a.CommitteeBitsVal().BitAt(uint64(committeeIndex)) {
|
||||
att, ok := a.(*ethpb.AttestationElectra)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if ok {
|
||||
atts = append(atts, att)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,14 +141,14 @@ func (c *AttCaches) DeleteUnaggregatedAttestation(att ethpb.Att) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := hashFn(att)
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.unAggregateAttLock.Lock()
|
||||
defer c.unAggregateAttLock.Unlock()
|
||||
delete(c.unAggregatedAtt, r)
|
||||
delete(c.unAggregatedAtt, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
"testing"
|
||||
|
||||
c "github.com/patrickmn/go-cache"
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -39,7 +40,7 @@ func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
|
||||
BeaconBlockRoot: []byte{0b0},
|
||||
},
|
||||
},
|
||||
wantErrString: fssz.ErrBytesLength.Error(),
|
||||
wantErrString: "could not create attestation ID",
|
||||
},
|
||||
{
|
||||
name: "normal save",
|
||||
@@ -57,13 +58,13 @@ func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
|
||||
count: 0,
|
||||
},
|
||||
}
|
||||
r, err := hashFn(util.HydrateAttestationData(ðpb.AttestationData{Slot: 100}))
|
||||
id, err := attestation.NewId(util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100}}), attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
cache.seenAtt.Set(string(r[:]), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
|
||||
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
|
||||
assert.Equal(t, 0, len(cache.unAggregatedAtt), "Invalid start pool, atts: %d", len(cache.unAggregatedAtt))
|
||||
|
||||
if tt.att != nil && tt.att.GetSignature() == nil {
|
||||
@@ -246,9 +247,35 @@ func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndex(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
returned := cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 1)
|
||||
assert.DeepEqual(t, []ethpb.Att{att1}, returned)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att1}, returned)
|
||||
returned = cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)
|
||||
assert.DeepEqual(t, []ethpb.Att{att2}, returned)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att2}, returned)
|
||||
returned = cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 1)
|
||||
assert.DeepEqual(t, []ethpb.Att{att3}, returned)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att3}, returned)
|
||||
}
|
||||
|
||||
func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndexElectra(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
committeeBits := primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(1, true)
|
||||
att1 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}, CommitteeBits: committeeBits})
|
||||
committeeBits = primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(2, true)
|
||||
att2 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b110}, CommitteeBits: committeeBits})
|
||||
committeeBits = primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(1, true)
|
||||
att3 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110}, CommitteeBits: committeeBits})
|
||||
atts := []*ethpb.AttestationElectra{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestation(att))
|
||||
}
|
||||
ctx := context.Background()
|
||||
returned := cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 1, 1)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att1}, returned)
|
||||
returned = cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 1, 2)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att2}, returned)
|
||||
returned = cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 2, 1)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att3}, returned)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,8 @@ type Pool interface {
|
||||
SaveAggregatedAttestation(att ethpb.Att) error
|
||||
SaveAggregatedAttestations(atts []ethpb.Att) error
|
||||
AggregatedAttestations() []ethpb.Att
|
||||
AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att
|
||||
AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation
|
||||
AggregatedAttestationsBySlotIndexElectra(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.AttestationElectra
|
||||
DeleteAggregatedAttestation(att ethpb.Att) error
|
||||
HasAggregatedAttestation(att ethpb.Att) (bool, error)
|
||||
AggregatedAttestationCount() int
|
||||
@@ -26,7 +27,8 @@ type Pool interface {
|
||||
SaveUnaggregatedAttestation(att ethpb.Att) error
|
||||
SaveUnaggregatedAttestations(atts []ethpb.Att) error
|
||||
UnaggregatedAttestations() ([]ethpb.Att, error)
|
||||
UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att
|
||||
UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation
|
||||
UnaggregatedAttestationsBySlotIndexElectra(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.AttestationElectra
|
||||
DeleteUnaggregatedAttestation(att ethpb.Att) error
|
||||
DeleteSeenUnaggregatedAttestations() (int, error)
|
||||
UnaggregatedAttestationCount() int
|
||||
|
||||
@@ -3,14 +3,14 @@ package attestations
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -67,7 +67,7 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error {
|
||||
atts := append(s.cfg.Pool.AggregatedAttestations(), s.cfg.Pool.BlockAttestations()...)
|
||||
atts = append(atts, s.cfg.Pool.ForkchoiceAttestations()...)
|
||||
|
||||
attsByDataRoot := make(map[[32]byte][]ethpb.Att, len(atts))
|
||||
attsByVerAndDataRoot := make(map[attestation.Id][]ethpb.Att, len(atts))
|
||||
|
||||
// Consolidate attestations by aggregating them by similar data root.
|
||||
for _, att := range atts {
|
||||
@@ -79,14 +79,14 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
attDataRoot, err := att.GetData().HashTreeRoot()
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
attsByDataRoot[attDataRoot] = append(attsByDataRoot[attDataRoot], att)
|
||||
attsByVerAndDataRoot[id] = append(attsByVerAndDataRoot[id], att)
|
||||
}
|
||||
|
||||
for _, atts := range attsByDataRoot {
|
||||
for _, atts := range attsByVerAndDataRoot {
|
||||
if err := s.aggregateAndSaveForkChoiceAtts(atts); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -119,12 +119,12 @@ func (s *Service) aggregateAndSaveForkChoiceAtts(atts []ethpb.Att) error {
|
||||
// This checks if the attestation has previously been aggregated for fork choice
|
||||
// return true if yes, false if no.
|
||||
func (s *Service) seen(att ethpb.Att) (bool, error) {
|
||||
attRoot, err := hash.Proto(att.GetData())
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
incomingBits := att.GetAggregationBits()
|
||||
savedBits, ok := s.forkChoiceProcessedRoots.Get(attRoot)
|
||||
savedBits, ok := s.forkChoiceProcessedAtts.Get(id)
|
||||
if ok {
|
||||
savedBitlist, ok := savedBits.(bitfield.Bitlist)
|
||||
if !ok {
|
||||
@@ -149,6 +149,6 @@ func (s *Service) seen(att ethpb.Att) (bool, error) {
|
||||
}
|
||||
}
|
||||
|
||||
s.forkChoiceProcessedRoots.Add(attRoot, incomingBits)
|
||||
s.forkChoiceProcessedAtts.Add(id, incomingBits)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -13,16 +13,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
var forkChoiceProcessedRootsSize = 1 << 16
|
||||
var forkChoiceProcessedAttsSize = 1 << 16
|
||||
|
||||
// Service of attestation pool operations.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
err error
|
||||
forkChoiceProcessedRoots *lru.Cache
|
||||
genesisTime uint64
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
err error
|
||||
forkChoiceProcessedAtts *lru.Cache
|
||||
genesisTime uint64
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
@@ -35,7 +35,7 @@ type Config struct {
|
||||
// NewService instantiates a new attestation pool service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
cache := lruwrpr.New(forkChoiceProcessedRootsSize)
|
||||
cache := lruwrpr.New(forkChoiceProcessedAttsSize)
|
||||
|
||||
if cfg.pruneInterval == 0 {
|
||||
// Prune expired attestations from the pool every slot interval.
|
||||
@@ -44,10 +44,10 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
forkChoiceProcessedRoots: cache,
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
forkChoiceProcessedAtts: cache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -336,7 +336,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
//test with public filter
|
||||
// test with public filter
|
||||
cidr := "public"
|
||||
ip := "212.67.10.122"
|
||||
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: cidr})
|
||||
@@ -348,7 +348,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
|
||||
t.Errorf("Expected multiaddress with ip %s to not be rejected since we allow public addresses", ip)
|
||||
}
|
||||
|
||||
ip = "192.168.1.0" //this is private and should fail
|
||||
ip = "192.168.1.0" // this is private and should fail
|
||||
multiAddress, err = ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, 3000))
|
||||
require.NoError(t, err)
|
||||
valid = s.InterceptAddrDial("", multiAddress)
|
||||
@@ -356,7 +356,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
|
||||
t.Errorf("Expected multiaddress with ip %s to be rejected since we are only allowing public addresses", ip)
|
||||
}
|
||||
|
||||
//test with public allow filter, with a public address added to the deny list
|
||||
// test with public allow filter, with a public address added to the deny list
|
||||
invalidPublicIp := "212.67.10.122"
|
||||
validPublicIp := "91.65.69.69"
|
||||
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: "public", DenyListCIDR: []string{"212.67.89.112/16"}})
|
||||
@@ -384,7 +384,7 @@ func TestService_InterceptAddrDial_Private(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
//test with private filter
|
||||
// test with private filter
|
||||
cidr := "private"
|
||||
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr}})
|
||||
require.NoError(t, err)
|
||||
@@ -413,7 +413,7 @@ func TestService_InterceptAddrDial_AllowPrivate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
//test with private filter
|
||||
// test with private filter
|
||||
cidr := "private"
|
||||
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: cidr})
|
||||
require.NoError(t, err)
|
||||
@@ -442,7 +442,7 @@ func TestService_InterceptAddrDial_DenyPublic(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
//test with private filter
|
||||
// test with private filter
|
||||
cidr := "public"
|
||||
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr}})
|
||||
require.NoError(t, err)
|
||||
@@ -471,7 +471,7 @@ func TestService_InterceptAddrDial_AllowConflict(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
//test with private filter
|
||||
// test with private filter
|
||||
cidr := "public"
|
||||
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr, "192.168.0.0/16"}})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -51,7 +51,7 @@ func (quicProtocol) ENRKey() string { return "quic" }
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
// return early if discv5 isnt running
|
||||
// return early if discv5 isn't running
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -112,8 +112,6 @@ type HistoricalSummaryCreator struct{}
|
||||
type BlobIdentifierCreator struct{}
|
||||
type PendingBalanceDepositCreator struct{}
|
||||
type PendingPartialWithdrawalCreator struct{}
|
||||
type ConsolidationCreator struct{}
|
||||
type SignedConsolidationCreator struct{}
|
||||
type PendingConsolidationCreator struct{}
|
||||
type StatusCreator struct{}
|
||||
type BeaconBlocksByRangeRequestCreator struct{}
|
||||
@@ -287,10 +285,6 @@ func (PendingBalanceDepositCreator) Create() MarshalerProtoMessage {
|
||||
func (PendingPartialWithdrawalCreator) Create() MarshalerProtoMessage {
|
||||
return ðpb.PendingPartialWithdrawal{}
|
||||
}
|
||||
func (ConsolidationCreator) Create() MarshalerProtoMessage { return ðpb.Consolidation{} }
|
||||
func (SignedConsolidationCreator) Create() MarshalerProtoMessage {
|
||||
return ðpb.SignedConsolidation{}
|
||||
}
|
||||
func (PendingConsolidationCreator) Create() MarshalerProtoMessage {
|
||||
return ðpb.PendingConsolidation{}
|
||||
}
|
||||
@@ -405,8 +399,6 @@ var creators = []MarshalerProtoCreator{
|
||||
BlobIdentifierCreator{},
|
||||
PendingBalanceDepositCreator{},
|
||||
PendingPartialWithdrawalCreator{},
|
||||
ConsolidationCreator{},
|
||||
SignedConsolidationCreator{},
|
||||
PendingConsolidationCreator{},
|
||||
StatusCreator{},
|
||||
BeaconBlocksByRangeRequestCreator{},
|
||||
@@ -553,26 +545,6 @@ func TestSszNetworkEncoder_RoundTrip_SignedVoluntaryExit(t *testing.T) {
|
||||
assertProtoMessagesEqual(t, decoded, msg)
|
||||
}
|
||||
|
||||
func TestSszNetworkEncoder_RoundTrip_Consolidation(t *testing.T) {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
data := []byte("\xc800")
|
||||
msg := ðpb.Consolidation{}
|
||||
|
||||
if err := proto.Unmarshal(data, msg); err != nil {
|
||||
t.Logf("Failed to unmarshal: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err := e.EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
decoded := ðpb.Consolidation{}
|
||||
require.NoError(t, e.DecodeGossip(buf.Bytes(), decoded))
|
||||
|
||||
assertProtoMessagesEqual(t, decoded, msg)
|
||||
}
|
||||
|
||||
func TestSszNetworkEncoder_RoundTrip(t *testing.T) {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
testRoundTripWithLength(t, e)
|
||||
|
||||
@@ -27,7 +27,8 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
// versioned by epoch.
|
||||
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
if topic == BlockSubnetTopicFormat {
|
||||
switch topic {
|
||||
case BlockSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.SignedBeaconBlockElectra{}
|
||||
}
|
||||
@@ -43,8 +44,25 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
if epoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.SignedBeaconBlockAltair{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
case AttestationSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.AttestationElectra{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
case AttesterSlashingSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.AttesterSlashingElectra{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
case AggregateAndProofSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
default:
|
||||
return gossipTopicMappings[topic]
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
}
|
||||
|
||||
// AllTopics returns all topics stored in our
|
||||
@@ -75,4 +93,7 @@ func init() {
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Electra objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockElectra{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.AttestationElectra{})] = AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.AttesterSlashingElectra{})] = AttesterSlashingSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedAggregateAttestationAndProofElectra{})] = AggregateAndProofSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -22,20 +22,20 @@ func TestMappingHasNoDuplicates(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
|
||||
func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
altairForkEpoch := primitives.Epoch(100)
|
||||
BellatrixForkEpoch := primitives.Epoch(200)
|
||||
CapellaForkEpoch := primitives.Epoch(300)
|
||||
DenebForkEpoch := primitives.Epoch(400)
|
||||
ElectraForkEpoch := primitives.Epoch(500)
|
||||
bellatrixForkEpoch := primitives.Epoch(200)
|
||||
capellaForkEpoch := primitives.Epoch(300)
|
||||
denebForkEpoch := primitives.Epoch(400)
|
||||
electraForkEpoch := primitives.Epoch(500)
|
||||
|
||||
bCfg.AltairForkEpoch = altairForkEpoch
|
||||
bCfg.BellatrixForkEpoch = BellatrixForkEpoch
|
||||
bCfg.CapellaForkEpoch = CapellaForkEpoch
|
||||
bCfg.DenebForkEpoch = DenebForkEpoch
|
||||
bCfg.ElectraForkEpoch = ElectraForkEpoch
|
||||
bCfg.BellatrixForkEpoch = bellatrixForkEpoch
|
||||
bCfg.CapellaForkEpoch = capellaForkEpoch
|
||||
bCfg.DenebForkEpoch = denebForkEpoch
|
||||
bCfg.ElectraForkEpoch = electraForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = primitives.Epoch(100)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.BellatrixForkVersion)] = primitives.Epoch(200)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.CapellaForkVersion)] = primitives.Epoch(300)
|
||||
@@ -47,29 +47,83 @@ func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
|
||||
pMessage := GossipTopicMappings(BlockSubnetTopicFormat, 0)
|
||||
_, ok := pMessage.(*ethpb.SignedBeaconBlock)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Altair Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockAltair)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Bellatrix Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, BellatrixForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockBellatrix)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Capella Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, CapellaForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockCapella)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Deneb Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, DenebForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockDeneb)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Electra Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, ElectraForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttestationElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashingElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProofElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
@@ -2,14 +2,10 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
const backOffCounter = 50
|
||||
|
||||
// filterNodes wraps an iterator such that Next only returns nodes for which
|
||||
// the 'check' function returns true. This custom implementation also
|
||||
// checks for context deadlines so that in the event the parent context has
|
||||
@@ -28,21 +24,13 @@ type filterIter struct {
|
||||
// Next looks up for the next valid node according to our
|
||||
// filter criteria.
|
||||
func (f *filterIter) Next() bool {
|
||||
lookupCounter := 0
|
||||
for f.Iterator.Next() {
|
||||
// Do not excessively perform lookups if we constantly receive non-viable peers.
|
||||
if lookupCounter > backOffCounter {
|
||||
lookupCounter = 0
|
||||
runtime.Gosched()
|
||||
time.Sleep(pollingPeriod)
|
||||
}
|
||||
if f.Context.Err() != nil {
|
||||
return false
|
||||
}
|
||||
if f.check(f.Node()) {
|
||||
return true
|
||||
}
|
||||
lookupCounter++
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
bootNodeENR := bootListener.Self().String()
|
||||
|
||||
// Create 3 nodes, each subscribed to a different subnet.
|
||||
// Each node is connected to the boostrap node.
|
||||
// Each node is connected to the bootstrap node.
|
||||
services := make([]*Service, 0, 3)
|
||||
|
||||
for i := 1; i <= 3; i++ {
|
||||
|
||||
@@ -43,6 +43,7 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -26,7 +26,13 @@ var (
|
||||
BlockMap map[[4]byte]func() (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
// MetaDataMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
MetaDataMap map[[4]byte]func() metadata.Metadata
|
||||
MetaDataMap map[[4]byte]func() (metadata.Metadata, error)
|
||||
// AttestationMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
AttestationMap map[[4]byte]func() (ethpb.Att, error)
|
||||
// AggregateAttestationMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
AggregateAttestationMap map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error)
|
||||
)
|
||||
|
||||
// InitializeDataMaps initializes all the relevant object maps. This function is called to
|
||||
@@ -67,24 +73,68 @@ func InitializeDataMaps() {
|
||||
}
|
||||
|
||||
// Reset our metadata map.
|
||||
MetaDataMap = map[[4]byte]func() metadata.Metadata{
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV0(ðpb.MetaDataV0{})
|
||||
MetaDataMap = map[[4]byte]func() (metadata.Metadata, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV0(ðpb.MetaDataV0{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our attestation map.
|
||||
AttestationMap = map[[4]byte]func() (ethpb.Att, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.AttestationElectra{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our aggregate attestation map.
|
||||
AggregateAttestationMap = map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,9 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestInitializeDataMaps(t *testing.T) {
|
||||
@@ -44,8 +46,36 @@ func TestInitializeDataMaps(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.action()
|
||||
_, ok := BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
bFunc, ok := BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
if tt.exists {
|
||||
b, err := bFunc()
|
||||
require.NoError(t, err)
|
||||
generic, err := b.PbGenericBlock()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, generic.GetPhase0())
|
||||
}
|
||||
mdFunc, ok := MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if tt.exists {
|
||||
md, err := mdFunc()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, md.MetadataObjV0())
|
||||
}
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
attFunc, ok := AttestationMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if tt.exists {
|
||||
att, err := attFunc()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, version.Phase0, att.Version())
|
||||
}
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
aggFunc, ok := AggregateAttestationMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
if tt.exists {
|
||||
agg, err := aggFunc()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, version.Phase0, agg.Version())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func (s *Service) endpoints(
|
||||
endpoints = append(endpoints, s.eventsEndpoints()...)
|
||||
endpoints = append(endpoints, s.prysmBeaconEndpoints(ch, stater)...)
|
||||
endpoints = append(endpoints, s.prysmNodeEndpoints()...)
|
||||
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService, stater)...)
|
||||
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService)...)
|
||||
if enableDebug {
|
||||
endpoints = append(endpoints, s.debugEndpoints(stater)...)
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
func (*Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
server := &blob.Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
@@ -777,7 +777,7 @@ func (s *Service) beaconEndpoints(
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) configEndpoints() []endpoint {
|
||||
func (*Service) configEndpoints() []endpoint {
|
||||
const namespace = "config"
|
||||
return []endpoint{
|
||||
{
|
||||
@@ -1045,7 +1045,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) prysmValidatorEndpoints(coreService *core.Service, stater lookup.Stater) []endpoint {
|
||||
func (*Service) prysmValidatorEndpoints(coreService *core.Service) []endpoint {
|
||||
server := &validatorprysm.Server{
|
||||
CoreService: coreService,
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"handlers_block.go",
|
||||
"log.go",
|
||||
"server.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/validator",
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/network/httputil"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -592,7 +592,7 @@ func (s *Server) PrepareBeaconProposer(w http.ResponseWriter, r *http.Request) {
|
||||
if len(validatorIndices) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"validatorIndices": validatorIndices,
|
||||
}).Info("Updated fee recipient addresses")
|
||||
}
|
||||
|
||||
@@ -220,8 +220,9 @@ func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *h
|
||||
|
||||
consensusBlockValue, httpError := getConsensusBlockValue(ctx, s.BlockRewardFetcher, v1alpha1resp.Block)
|
||||
if httpError != nil {
|
||||
httputil.WriteError(w, httpError)
|
||||
return
|
||||
log.WithError(httpError).Debug("Failed to get consensus block value")
|
||||
// Having the consensus block value is not critical to block production
|
||||
consensusBlockValue = ""
|
||||
}
|
||||
|
||||
w.Header().Set(api.ExecutionPayloadBlindedHeader, fmt.Sprintf("%v", v1alpha1resp.IsBlinded))
|
||||
@@ -297,7 +298,7 @@ func getConsensusBlockValue(ctx context.Context, blockRewardsFetcher rewards.Blo
|
||||
}
|
||||
}
|
||||
if bb.Version() == version.Phase0 {
|
||||
// ignore for phase 0
|
||||
// Getting the block value for Phase 0 is very hard, so we ignore it
|
||||
return "", nil
|
||||
}
|
||||
// Get consensus payload value which is the same as the total from the block rewards api.
|
||||
|
||||
5
beacon-chain/rpc/eth/validator/log.go
Normal file
5
beacon-chain/rpc/eth/validator/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package validator
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "beacon-api")
|
||||
@@ -2,7 +2,6 @@ package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
|
||||
@@ -100,12 +99,8 @@ func (vs *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
|
||||
best = aggregatedAtt
|
||||
}
|
||||
}
|
||||
att, ok := best.(*ethpb.Attestation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("best attestation has wrong type (expected %T, got %T)", ðpb.Attestation{}, best)
|
||||
}
|
||||
a := ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: att,
|
||||
Aggregate: best,
|
||||
SelectionProof: req.SlotSignature,
|
||||
AggregatorIndex: validatorIndex,
|
||||
}
|
||||
|
||||
@@ -25,16 +25,6 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
|
||||
// Test for Electra version
|
||||
t.Run("electra block", func(t *testing.T) {
|
||||
eb := util.NewBeaconBlockElectra()
|
||||
eb.Block.Body.Consolidations = []*eth.SignedConsolidation{
|
||||
{
|
||||
Signature: make([]byte, 96),
|
||||
Message: ð.Consolidation{
|
||||
SourceIndex: 1,
|
||||
TargetIndex: 2,
|
||||
Epoch: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := blocks.NewSignedBeaconBlock(eb)
|
||||
require.NoError(t, err)
|
||||
r1, err := eb.Block.HashTreeRoot()
|
||||
|
||||
@@ -592,16 +592,6 @@ func TestServer_GetBeaconBlock_Electra(t *testing.T) {
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
Consolidations: []*ethpb.SignedConsolidation{
|
||||
{
|
||||
Message: ðpb.Consolidation{
|
||||
SourceIndex: 1,
|
||||
TargetIndex: 2,
|
||||
Epoch: 3,
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("sig"), 96),
|
||||
},
|
||||
},
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
@@ -644,18 +634,26 @@ func TestServer_GetBeaconBlock_Electra(t *testing.T) {
|
||||
Amount: 789,
|
||||
},
|
||||
}
|
||||
cr := []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte("sa"), 20),
|
||||
SourcePubkey: bytesutil.PadTo(privKeys[1].PublicKey().Marshal(), 48),
|
||||
TargetPubkey: bytesutil.PadTo(privKeys[2].PublicKey().Marshal(), 48),
|
||||
},
|
||||
}
|
||||
payload := &enginev1.ExecutionPayloadElectra{
|
||||
Timestamp: uint64(timeStamp.Unix()),
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
DepositRequests: dr,
|
||||
WithdrawalRequests: wr,
|
||||
Timestamp: uint64(timeStamp.Unix()),
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
DepositRequests: dr,
|
||||
WithdrawalRequests: wr,
|
||||
ConsolidationRequests: cr,
|
||||
}
|
||||
|
||||
proposerServer := getProposerServer(db, beaconState, parentRoot[:])
|
||||
|
||||
@@ -100,14 +100,13 @@ func (vs *Server) WaitForActivation(req *ethpb.ValidatorActivationRequest, strea
|
||||
}
|
||||
|
||||
waitTime := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
timer := time.NewTimer(waitTime)
|
||||
defer timer.Stop()
|
||||
ticker := time.NewTicker(waitTime)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
timer.Reset(waitTime)
|
||||
select {
|
||||
// Pinging every slot for activation.
|
||||
case <-timer.C:
|
||||
case <-ticker.C:
|
||||
activeValidatorExists, validatorStatuses, err := vs.activationStatus(stream.Context(), req.PublicKeys)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not fetch validator status: %v", err)
|
||||
|
||||
@@ -4,7 +4,6 @@ package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
@@ -154,7 +153,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
connectedRPCClients: make(map[net.Addr]bool),
|
||||
}
|
||||
|
||||
address := fmt.Sprintf("%s:%s", s.cfg.Host, s.cfg.Port)
|
||||
address := net.JoinHostPort(s.cfg.Host, s.cfg.Port)
|
||||
lis, err := net.Listen("tcp", address)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Could not listen to port in Start() %s", address)
|
||||
|
||||
@@ -22,7 +22,7 @@ const (
|
||||
// Receive indexed attestations from some source event feed,
|
||||
// validating their integrity before appending them to an attestation queue
|
||||
// for batch processing in a separate routine.
|
||||
func (s *Service) receiveAttestations(ctx context.Context, indexedAttsChan chan ethpb.IndexedAtt) {
|
||||
func (s *Service) receiveAttestations(ctx context.Context, indexedAttsChan chan *slashertypes.WrappedIndexedAtt) {
|
||||
defer s.wg.Done()
|
||||
|
||||
sub := s.serviceCfg.IndexedAttestationsFeed.Subscribe(indexedAttsChan)
|
||||
@@ -39,7 +39,7 @@ func (s *Service) receiveAttestations(ctx context.Context, indexedAttsChan chan
|
||||
continue
|
||||
}
|
||||
attWrapper := &slashertypes.IndexedAttestationWrapper{
|
||||
IndexedAttestation: att,
|
||||
IndexedAttestation: att.IndexedAtt,
|
||||
DataRoot: dataRoot,
|
||||
}
|
||||
s.attsQueue.push(attWrapper)
|
||||
@@ -141,7 +141,7 @@ func (s *Service) processAttestations(
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Check for attestatinos slashings (double, sourrounding, surrounded votes).
|
||||
// Check for attestations slashings (double, surrounding, surrounded votes).
|
||||
slashings, err := s.checkSlashableAttestations(ctx, currentEpoch, validAttestations)
|
||||
if err != nil {
|
||||
log.WithError(err).Error(couldNotCheckSlashableAtt)
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestSlasher_receiveAttestations_OK(t *testing.T) {
|
||||
},
|
||||
attsQueue: newAttestationsQueue(),
|
||||
}
|
||||
indexedAttsChan := make(chan ethpb.IndexedAtt)
|
||||
indexedAttsChan := make(chan *slashertypes.WrappedIndexedAtt)
|
||||
defer close(indexedAttsChan)
|
||||
|
||||
s.wg.Add(1)
|
||||
@@ -40,13 +40,15 @@ func TestSlasher_receiveAttestations_OK(t *testing.T) {
|
||||
secondIndices := []uint64{4, 5, 6}
|
||||
att1 := createAttestationWrapperEmptySig(t, 1, 2, firstIndices, nil)
|
||||
att2 := createAttestationWrapperEmptySig(t, 1, 2, secondIndices, nil)
|
||||
indexedAttsChan <- att1.IndexedAttestation
|
||||
indexedAttsChan <- att2.IndexedAttestation
|
||||
wrappedAtt1 := &slashertypes.WrappedIndexedAtt{IndexedAtt: att1.IndexedAttestation}
|
||||
wrappedAtt2 := &slashertypes.WrappedIndexedAtt{IndexedAtt: att2.IndexedAttestation}
|
||||
indexedAttsChan <- wrappedAtt1
|
||||
indexedAttsChan <- wrappedAtt2
|
||||
cancel()
|
||||
s.wg.Wait()
|
||||
wanted := []*slashertypes.IndexedAttestationWrapper{
|
||||
att1,
|
||||
att2,
|
||||
{IndexedAttestation: att1.IndexedAttestation, DataRoot: att1.DataRoot},
|
||||
{IndexedAttestation: att2.IndexedAttestation, DataRoot: att2.DataRoot},
|
||||
}
|
||||
require.DeepEqual(t, wanted, s.attsQueue.dequeue())
|
||||
}
|
||||
@@ -212,7 +214,7 @@ func TestSlasher_receiveAttestations_OnlyValidAttestations(t *testing.T) {
|
||||
},
|
||||
attsQueue: newAttestationsQueue(),
|
||||
}
|
||||
indexedAttsChan := make(chan ethpb.IndexedAtt)
|
||||
indexedAttsChan := make(chan *slashertypes.WrappedIndexedAtt)
|
||||
defer close(indexedAttsChan)
|
||||
|
||||
s.wg.Add(1)
|
||||
@@ -223,18 +225,21 @@ func TestSlasher_receiveAttestations_OnlyValidAttestations(t *testing.T) {
|
||||
secondIndices := []uint64{4, 5, 6}
|
||||
// Add a valid attestation.
|
||||
validAtt := createAttestationWrapperEmptySig(t, 1, 2, firstIndices, nil)
|
||||
indexedAttsChan <- validAtt.IndexedAttestation
|
||||
wrappedValidAtt := &slashertypes.WrappedIndexedAtt{IndexedAtt: validAtt.IndexedAttestation}
|
||||
indexedAttsChan <- wrappedValidAtt
|
||||
// Send an invalid, bad attestation which will not
|
||||
// pass integrity checks at it has invalid attestation data.
|
||||
indexedAttsChan <- ðpb.IndexedAttestation{
|
||||
AttestingIndices: secondIndices,
|
||||
indexedAttsChan <- &slashertypes.WrappedIndexedAtt{
|
||||
IndexedAtt: ðpb.IndexedAttestation{
|
||||
AttestingIndices: secondIndices,
|
||||
},
|
||||
}
|
||||
cancel()
|
||||
s.wg.Wait()
|
||||
// Expect only a single, valid attestation was added to the queue.
|
||||
require.Equal(t, 1, s.attsQueue.size())
|
||||
wanted := []*slashertypes.IndexedAttestationWrapper{
|
||||
validAtt,
|
||||
{IndexedAttestation: validAtt.IndexedAttestation, DataRoot: validAtt.DataRoot},
|
||||
}
|
||||
require.DeepEqual(t, wanted, s.attsQueue.dequeue())
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
beaconChainSync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
@@ -117,7 +118,7 @@ func (s *Service) run() {
|
||||
"Finished retrieving last epoch written per validator",
|
||||
)
|
||||
|
||||
indexedAttsChan := make(chan ethpb.IndexedAtt, 1)
|
||||
indexedAttsChan := make(chan *types.WrappedIndexedAtt, 1)
|
||||
beaconBlockHeadersChan := make(chan *ethpb.SignedBeaconBlockHeader, 1)
|
||||
|
||||
s.wg.Add(1)
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/prysmctl:__subpackages__",
|
||||
"//testing/slasher/simulator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -26,6 +26,13 @@ func (c ChunkKind) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// WrappedIndexedAtt is a wrapper over the IndexedAtt interface.
|
||||
// The wrapper is needed to overcome the limitation of the event feed library
|
||||
// which doesn't work well with interface types.
|
||||
type WrappedIndexedAtt struct {
|
||||
ethpb.IndexedAtt
|
||||
}
|
||||
|
||||
// IndexedAttestationWrapper contains an indexed attestation with its
|
||||
// data root to reduce duplicated computation.
|
||||
type IndexedAttestationWrapper struct {
|
||||
|
||||
@@ -333,6 +333,7 @@ func TestExpectedWithdrawals(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("electra all pending partial withdrawals", func(t *testing.T) {
|
||||
t.Skip("Failing until spectests are updated to v1.5.0-alpha.3")
|
||||
// Load a serialized Electra state from disk.
|
||||
// This spectest has a fully hydrated beacon state with partial pending withdrawals.
|
||||
serializedBytes, err := util.BazelFileBytes("tests/mainnet/electra/operations/execution_layer_withdrawal_request/pyspec_tests/pending_withdrawals_consume_all_excess_balance/pre.ssz_snappy")
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func TestExitEpochAndUpdateChurn_SpectestCase(t *testing.T) {
|
||||
t.Skip("Failing until spectests are updated to v1.5.0-alpha.3")
|
||||
// Load a serialized Electra state from disk.
|
||||
// The spec tests shows that the exit epoch is 262 for validator 0 performing a voluntary exit.
|
||||
serializedBytes, err := util.BazelFileBytes("tests/mainnet/electra/operations/voluntary_exit/pyspec_tests/exit_existing_churn_and_churn_limit_balance/pre.ssz_snappy")
|
||||
|
||||
@@ -117,6 +117,10 @@ func (b *BeaconState) SaveValidatorIndices() {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.validatorIndexCache == nil {
|
||||
b.validatorIndexCache = newFinalizedValidatorIndexCache()
|
||||
}
|
||||
|
||||
b.saveValidatorIndices()
|
||||
}
|
||||
|
||||
|
||||
@@ -844,6 +844,7 @@ func TestBeaconState_InitializeInactivityScoresCorrectly_Deneb(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBeaconChainCopy_Electra(t *testing.T) {
|
||||
t.Skip("Failing until spectests are updated to v1.5.0-alpha.3")
|
||||
// Load a serialized Electra state from disk.
|
||||
// This is a fully hydrated random test case from spectests.
|
||||
serializedBytes, err := util.BazelFileBytes("tests/mainnet/electra/ssz_static/BeaconState/ssz_random/case_0/serialized.ssz_snappy")
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"pending_consolidations_root.go",
|
||||
"pending_partial_withdrawals_root.go",
|
||||
"reference.go",
|
||||
"slice_root.go",
|
||||
"sync_committee.root.go",
|
||||
"trie_helpers.go",
|
||||
"unrealized_justification.go",
|
||||
|
||||
@@ -2,9 +2,10 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func HistoricalSummariesRoot(summaries []*ethpb.HistoricalSummary) ([32]byte, error) {
|
||||
return SliceRoot(summaries, fieldparams.HistoricalRootsLength)
|
||||
return ssz.SliceRoot(summaries, fieldparams.HistoricalRootsLength)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,10 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func PendingBalanceDepositsRoot(slice []*ethpb.PendingBalanceDeposit) ([32]byte, error) {
|
||||
return SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
|
||||
return ssz.SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,10 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func PendingConsolidationsRoot(slice []*ethpb.PendingConsolidation) ([32]byte, error) {
|
||||
return SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
|
||||
return ssz.SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,10 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func PendingPartialWithdrawalsRoot(slice []*ethpb.PendingPartialWithdrawal) ([32]byte, error) {
|
||||
return SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
|
||||
return ssz.SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ go_library(
|
||||
"validate_aggregate_proof.go",
|
||||
"validate_attester_slashing.go",
|
||||
"validate_beacon_attestation.go",
|
||||
"validate_beacon_attestation_electra.go",
|
||||
"validate_beacon_blocks.go",
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
@@ -87,6 +88,7 @@ go_library(
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -160,7 +162,6 @@ go_test(
|
||||
"rpc_beacon_blocks_by_root_test.go",
|
||||
"rpc_blob_sidecars_by_range_test.go",
|
||||
"rpc_blob_sidecars_by_root_test.go",
|
||||
"rpc_chunked_response_test.go",
|
||||
"rpc_goodbye_test.go",
|
||||
"rpc_handler_test.go",
|
||||
"rpc_metadata_test.go",
|
||||
@@ -177,6 +178,7 @@ go_test(
|
||||
"sync_test.go",
|
||||
"validate_aggregate_proof_test.go",
|
||||
"validate_attester_slashing_test.go",
|
||||
"validate_beacon_attestation_electra_test.go",
|
||||
"validate_beacon_attestation_test.go",
|
||||
"validate_beacon_blocks_test.go",
|
||||
"validate_blob_test.go",
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -50,11 +57,12 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
return nil, errors.Errorf("message of %T does not support marshaller interface", base)
|
||||
}
|
||||
// Handle different message types across forks.
|
||||
if topic == p2p.BlockSubnetTopicFormat {
|
||||
m, err = extractBlockDataType(fDigest[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dt, err := extractValidDataTypeFromTopic(topic, fDigest[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dt != nil {
|
||||
m = dt
|
||||
}
|
||||
if err := s.cfg.p2p.Encoding().DecodeGossip(msg.Data, m); err != nil {
|
||||
return nil, err
|
||||
@@ -63,7 +71,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
}
|
||||
|
||||
// Replaces our fork digest with the formatter.
|
||||
func (_ *Service) replaceForkDigest(topic string) (string, error) {
|
||||
func (*Service) replaceForkDigest(topic string) (string, error) {
|
||||
subStrings := strings.Split(topic, "/")
|
||||
if len(subStrings) != 4 {
|
||||
return "", errInvalidTopic
|
||||
@@ -71,3 +79,48 @@ func (_ *Service) replaceForkDigest(topic string) (string, error) {
|
||||
subStrings[2] = "%x"
|
||||
return strings.Join(subStrings, "/"), nil
|
||||
}
|
||||
|
||||
func extractValidDataTypeFromTopic(topic string, digest []byte, clock *startup.Clock) (ssz.Unmarshaler, error) {
|
||||
switch topic {
|
||||
case p2p.BlockSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.BlockMap, digest, clock)
|
||||
case p2p.AttestationSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AttestationMap, digest, clock)
|
||||
case p2p.AggregateAndProofSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AggregateAttestationMap, digest, clock)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func extractDataTypeFromTypeMap[T any](typeMap map[[4]byte]func() (T, error), digest []byte, tor blockchain.TemporalOracle) (T, error) {
|
||||
var zero T
|
||||
|
||||
if len(digest) == 0 {
|
||||
f, ok := typeMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return zero, fmt.Errorf("no %T type exists for the genesis fork version", zero)
|
||||
}
|
||||
return f()
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return zero, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, f := range typeMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return f()
|
||||
}
|
||||
}
|
||||
return zero, errors.Wrapf(
|
||||
ErrNoValidDigest,
|
||||
"could not extract %T data type, saw digest=%#x, genesis=%v, vr=%#x",
|
||||
zero,
|
||||
digest,
|
||||
tor.GenesisTime(),
|
||||
tor.GenesisValidatorsRoot(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -11,15 +11,20 @@ import (
|
||||
"github.com/d4l3k/messagediff"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
@@ -109,3 +114,197 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := signing.ComputeForkDigest(params.BeaconConfig().CapellaForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := signing.ComputeForkDigest(params.BeaconConfig().DenebForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := signing.ComputeForkDigest(params.BeaconConfig().ElectraForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
wantMd metadata.Metadata
|
||||
wantAtt ethpb.Att
|
||||
wantAggregate ethpb.SignedAggregateAttAndProof
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV0(ðpb.MetaDataV0{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{Body: ðpb.BeaconBlockBodyAltair{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork version",
|
||||
args: args{
|
||||
digest: bellatrixDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{Body: ðpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "capella fork version",
|
||||
args: args{
|
||||
digest: capellaDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{ExecutionPayload: &enginev1.ExecutionPayloadCapella{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "deneb fork version",
|
||||
args: args{
|
||||
digest: denebDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "electra fork version",
|
||||
args: args{
|
||||
digest: electraDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockElectra{Block: ðpb.BeaconBlockElectra{Body: ðpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadElectra{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.AttestationElectra{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProofElectra{},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("block: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotBlock, tt.wantBlock) {
|
||||
t.Errorf("block: got = %v, want %v", gotBlock, tt.wantBlock)
|
||||
}
|
||||
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("attestation: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotAtt, tt.wantAtt) {
|
||||
t.Errorf("attestation: got = %v, want %v", gotAtt, tt.wantAtt)
|
||||
}
|
||||
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("aggregate: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotAggregate, tt.wantAggregate) {
|
||||
t.Errorf("aggregate: got = %v, want %v", gotAggregate, tt.wantAggregate)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func NewRegularSyncFuzz(opts ...Option) *Service {
|
||||
cancel: cancel,
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -87,12 +88,13 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb.SignedAggregateAttestationAndProof) {
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.SignedAggregateAttAndProof) {
|
||||
for _, signedAtt := range attestations {
|
||||
att := signedAtt.Message
|
||||
aggregate := signedAtt.AggregateAttestationAndProof().AggregateVal()
|
||||
data := aggregate.GetData()
|
||||
// The pending attestations can arrive in both aggregated and unaggregated forms,
|
||||
// each from has distinct validation steps.
|
||||
if helpers.IsAggregated(att.Aggregate) {
|
||||
if helpers.IsAggregated(aggregate) {
|
||||
// Save the pending aggregated attestation to the pool if it passes the aggregated
|
||||
// validation steps.
|
||||
valRes, err := s.validateAggregatedAtt(ctx, signedAtt)
|
||||
@@ -101,11 +103,11 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
|
||||
}
|
||||
aggValid := pubsub.ValidationAccept == valRes
|
||||
if s.validateBlockInAttestation(ctx, signedAtt) && aggValid {
|
||||
if err := s.cfg.attPool.SaveAggregatedAttestation(att.Aggregate); err != nil {
|
||||
if err := s.cfg.attPool.SaveAggregatedAttestation(aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregate attestation")
|
||||
continue
|
||||
}
|
||||
s.setAggregatorIndexEpochSeen(att.Aggregate.Data.Target.Epoch, att.AggregatorIndex)
|
||||
s.setAggregatorIndexEpochSeen(data.Target.Epoch, signedAtt.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.cfg.p2p.Broadcast(ctx, signedAtt); err != nil {
|
||||
@@ -116,39 +118,39 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
|
||||
// This is an important validation before retrieving attestation pre state to defend against
|
||||
// attestation's target intentionally reference checkpoint that's long ago.
|
||||
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(att.Aggregate.Data.BeaconBlockRoot)) {
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
log.WithError(blockchain.ErrNotDescendantOfFinalized).Debug("Could not verify finalized consistency")
|
||||
continue
|
||||
}
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, att.Aggregate); err != nil {
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not verify FFG consistency")
|
||||
continue
|
||||
}
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Aggregate.Data.Target)
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve attestation prestate")
|
||||
continue
|
||||
}
|
||||
|
||||
valid, err := s.validateUnaggregatedAttWithState(ctx, att.Aggregate, preState)
|
||||
valid, err := s.validateUnaggregatedAttWithState(ctx, aggregate, preState)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending unaggregated attestation failed validation")
|
||||
continue
|
||||
}
|
||||
if valid == pubsub.ValidationAccept {
|
||||
if err := s.cfg.attPool.SaveUnaggregatedAttestation(att.Aggregate); err != nil {
|
||||
if err := s.cfg.attPool.SaveUnaggregatedAttestation(aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not save unaggregated attestation")
|
||||
continue
|
||||
}
|
||||
s.setSeenCommitteeIndicesSlot(att.Aggregate.Data.Slot, att.Aggregate.Data.CommitteeIndex, att.Aggregate.AggregationBits)
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, aggregate.GetAggregationBits())
|
||||
|
||||
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(att.Aggregate.Data.Slot))
|
||||
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve active validator count")
|
||||
continue
|
||||
}
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, signedAtt.Message.Aggregate), signedAtt.Message.Aggregate); err != nil {
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, aggregate), aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast")
|
||||
}
|
||||
}
|
||||
@@ -160,8 +162,8 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
|
||||
// root of the missing block. The value is the list of pending attestations
|
||||
// that voted for that block root. The caller of this function is responsible
|
||||
// for not sending repeated attestations to the pending queue.
|
||||
func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof) {
|
||||
root := bytesutil.ToBytes32(att.Message.Aggregate.Data.BeaconBlockRoot)
|
||||
func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
|
||||
root := bytesutil.ToBytes32(att.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
|
||||
|
||||
s.pendingAttsLock.Lock()
|
||||
defer s.pendingAttsLock.Unlock()
|
||||
@@ -178,7 +180,7 @@ func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof)
|
||||
_, ok := s.blkRootToPendingAtts[root]
|
||||
if !ok {
|
||||
pendingAttCount.Inc()
|
||||
s.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{att}
|
||||
s.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{att}
|
||||
return
|
||||
}
|
||||
// Skip if the attestation from the same aggregator already exists in
|
||||
@@ -192,20 +194,32 @@ func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof)
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], att)
|
||||
}
|
||||
|
||||
func attsAreEqual(a, b *ethpb.SignedAggregateAttestationAndProof) bool {
|
||||
if a.Signature != nil {
|
||||
return b.Signature != nil && a.Message.AggregatorIndex == b.Message.AggregatorIndex
|
||||
func attsAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
if a.GetSignature() != nil {
|
||||
return b.GetSignature() != nil && a.AggregateAttestationAndProof().GetAggregatorIndex() == b.AggregateAttestationAndProof().GetAggregatorIndex()
|
||||
}
|
||||
if b.Signature != nil {
|
||||
if b.GetSignature() != nil {
|
||||
return false
|
||||
}
|
||||
if a.Message.Aggregate.Data.Slot != b.Message.Aggregate.Data.Slot {
|
||||
|
||||
aAggregate := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAggregate := b.AggregateAttestationAndProof().AggregateVal()
|
||||
aData := aAggregate.GetData()
|
||||
bData := bAggregate.GetData()
|
||||
|
||||
if aData.Slot != bData.Slot {
|
||||
return false
|
||||
}
|
||||
if a.Message.Aggregate.Data.CommitteeIndex != b.Message.Aggregate.Data.CommitteeIndex {
|
||||
|
||||
if a.Version() >= version.Electra {
|
||||
if !bytes.Equal(aAggregate.CommitteeBitsVal().Bytes(), bAggregate.CommitteeBitsVal().Bytes()) {
|
||||
return false
|
||||
}
|
||||
} else if aData.CommitteeIndex != bData.CommitteeIndex {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(a.Message.Aggregate.AggregationBits, b.Message.Aggregate.AggregationBits)
|
||||
|
||||
return bytes.Equal(aAggregate.GetAggregationBits(), bAggregate.GetAggregationBits())
|
||||
}
|
||||
|
||||
// This validates the pending attestations in the queue are still valid.
|
||||
@@ -221,7 +235,7 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot primitives.Slot)
|
||||
|
||||
for bRoot, atts := range s.blkRootToPendingAtts {
|
||||
for i := len(atts) - 1; i >= 0; i-- {
|
||||
if slot >= atts[i].Message.Aggregate.Data.Slot+params.BeaconConfig().SlotsPerEpoch {
|
||||
if slot >= atts[i].AggregateAttestationAndProof().AggregateVal().GetData().Slot+params.BeaconConfig().SlotsPerEpoch {
|
||||
// Remove the pending attestation from the list in place.
|
||||
atts = append(atts[:i], atts[i+1:]...)
|
||||
}
|
||||
|
||||
@@ -46,12 +46,12 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
r := &Service{
|
||||
cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
|
||||
a := ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}}
|
||||
r.blkRootToPendingAtts[[32]byte{'A'}] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a}}
|
||||
r.blkRootToPendingAtts[[32]byte{'A'}] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: a}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -134,7 +134,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
atts, err := r.cfg.attPool.UnaggregatedAttestations()
|
||||
@@ -162,7 +162,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
@@ -182,7 +182,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, r32))
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
|
||||
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, false, p1.BroadcastCalled.Load(), "Broadcasted bad aggregate")
|
||||
@@ -245,13 +245,13 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
go r.verifierRoutine()
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, true, p1.BroadcastCalled.Load(), "Could not broadcast the good aggregate")
|
||||
@@ -330,7 +330,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -339,7 +339,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
|
||||
@@ -353,7 +353,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
|
||||
func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
|
||||
// 100 Attestations per block root.
|
||||
@@ -401,7 +401,7 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
|
||||
func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
|
||||
r1 := [32]byte{'A'}
|
||||
@@ -428,7 +428,7 @@ func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
|
||||
|
||||
func TestSavePendingAtts_BeyondLimit(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
|
||||
for i := 0; i < pendingAttsLimit; i++ {
|
||||
@@ -457,5 +457,71 @@ func TestSavePendingAtts_BeyondLimit(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r1]), "Saved pending atts")
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r2]), "Saved pending atts")
|
||||
|
||||
}
|
||||
|
||||
func Test_attsAreEqual_Committee(t *testing.T) {
|
||||
t.Run("Phase 0 equal", func(t *testing.T) {
|
||||
att1 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 123}}}}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 123}}}}
|
||||
assert.Equal(t, true, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Phase 0 not equal", func(t *testing.T) {
|
||||
att1 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 123}}}}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 456}}}}
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Electra equal", func(t *testing.T) {
|
||||
cb1 := primitives.NewAttestationCommitteeBits()
|
||||
cb1.SetBitAt(0, true)
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb1,
|
||||
}}}
|
||||
cb2 := primitives.NewAttestationCommitteeBits()
|
||||
cb2.SetBitAt(0, true)
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb2,
|
||||
}}}
|
||||
assert.Equal(t, true, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Electra not equal", func(t *testing.T) {
|
||||
cb1 := primitives.NewAttestationCommitteeBits()
|
||||
cb1.SetBitAt(0, true)
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb1,
|
||||
}}}
|
||||
cb2 := primitives.NewAttestationCommitteeBits()
|
||||
cb2.SetBitAt(1, true)
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb2,
|
||||
}}}
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,14 +4,12 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -107,7 +105,7 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, tor blockchain.TemporalOrac
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blk, err := extractBlockDataType(rpcCtx, tor)
|
||||
blk, err := extractDataTypeFromTypeMap(types.BlockMap, rpcCtx, tor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -131,7 +129,7 @@ func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blk, err := extractBlockDataType(rpcCtx, tor)
|
||||
blk, err := extractDataTypeFromTypeMap(types.BlockMap, rpcCtx, tor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -139,30 +137,6 @@ func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle,
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func extractBlockDataType(digest []byte, tor blockchain.TemporalOracle) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if len(digest) == 0 {
|
||||
bFunc, ok := types.BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("no block type exists for the genesis fork version.")
|
||||
}
|
||||
return bFunc()
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, blkFunc := range types.BlockMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return blkFunc()
|
||||
}
|
||||
}
|
||||
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract block data type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
// WriteBlobSidecarChunk writes blob chunk object to stream.
|
||||
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, sidecar blocks.VerifiedROBlob) error {
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestExtractBlockDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want interfaces.ReadOnlySignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{Body: ðpb.BeaconBlockBodyAltair{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork version",
|
||||
args: args{
|
||||
digest: bellatrixDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{Body: ðpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := extractBlockDataType(tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("extractBlockDataType() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("extractBlockDataType() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -7,13 +7,9 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
@@ -112,7 +108,7 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msg, err := extractMetaDataType(rpcCtx[:], s.cfg.clock)
|
||||
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, rpcCtx[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -133,27 +129,3 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func extractMetaDataType(digest []byte, tor blockchain.TemporalOracle) (metadata.Metadata, error) {
|
||||
if len(digest) == 0 {
|
||||
mdFunc, ok := types.MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("no metadata type exists for the genesis fork version.")
|
||||
}
|
||||
return mdFunc(), nil
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, mdFunc := range types.MetaDataMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return mdFunc(), nil
|
||||
}
|
||||
}
|
||||
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract metadata type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
@@ -2,16 +2,13 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
db "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
@@ -21,7 +18,6 @@ import (
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/equality"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -233,80 +229,3 @@ func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) {
|
||||
t.Error("Peer is disconnected despite receiving a valid ping")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractMetaDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
clock blockchain.TemporalOracle
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want metadata.Metadata
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV1(&pb.MetaDataV1{}),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := extractMetaDataType(tt.args.digest, tt.args.clock)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("extractMetaDataType() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("extractMetaDataType() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user