mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
29 Commits
handleCras
...
bazel-rele
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dfaf53da00 | ||
|
|
2317375983 | ||
|
|
6354748b12 | ||
|
|
e910471784 | ||
|
|
ab7e97ba63 | ||
|
|
e99de7726d | ||
|
|
606fdd2299 | ||
|
|
1eb6025aaa | ||
|
|
d431ceee25 | ||
|
|
4597599196 | ||
|
|
0c32eb5c03 | ||
|
|
4b1cb6fa80 | ||
|
|
9cfb823cc6 | ||
|
|
cb502ceb8c | ||
|
|
8da4d572d9 | ||
|
|
1c6fa65f7b | ||
|
|
eaa2566e90 | ||
|
|
6957f0637f | ||
|
|
01b1f15bdf | ||
|
|
b787fd877a | ||
|
|
2c89ce810d | ||
|
|
e687fff922 | ||
|
|
5e2498be7e | ||
|
|
76f958710f | ||
|
|
1775cf89c6 | ||
|
|
8fecfaee48 | ||
|
|
f089405d2f | ||
|
|
029c81a2e4 | ||
|
|
56c48b4971 |
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@latest
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
|
||||
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.1)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-alpha.9/src/engine)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.3)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.1/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -215,7 +215,7 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.2.0-rc.2"
|
||||
consensus_spec_version = "v1.2.0-rc.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -231,7 +231,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "eff52923eebbed6e37a5282db5290abe67c68d6aa54302e3db2b0718c3edf867",
|
||||
sha256 = "18ca21497f41042cdbe60e2333b100d218b2994fb514964b9deb23daf615a12f",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -247,7 +247,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "6183d39d40ae659347e8bcfa435cbbe6de8c19ab327b61d47f906bb087bc7a67",
|
||||
sha256 = "47b8f6fabe39b4a69f13054ba74e26ab51581ddbd359c18cf0f03317474e299c",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -263,7 +263,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "894d16608d7d37a8f6206165e6c2b6ffcc45b13152b5f411e9283b005ca9793f",
|
||||
sha256 = "a061efc05429b169393c32dc2633a948269461b0fe681f11d41e170a880dcc71",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "006674e5d7eee613f1155e154ab97f6b57589ec92e6e3e5f7affd2b53581e907",
|
||||
sha256 = "753d51c6a6cc6df101c897e4bea77f73b271f50aeda74440f412514d4bd88a86",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -139,6 +139,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
@@ -516,6 +519,29 @@ func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfac
|
||||
return nil
|
||||
}
|
||||
|
||||
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.BeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.Data.Target.Epoch)
|
||||
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
|
||||
@@ -3006,10 +3006,9 @@ func TestStore_NoViableHead_Reboot_DoublyLinkedTree(t *testing.T) {
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
// The node is optimistic now.
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
@@ -3230,10 +3229,9 @@ func TestStore_NoViableHead_Reboot_Protoarray(t *testing.T) {
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
// The node is optimistic now
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
@@ -3314,6 +3312,75 @@ func TestStore_NoViableHead_Reboot_Protoarray(t *testing.T) {
|
||||
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
|
||||
}
|
||||
|
||||
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(doublylinkedtree.New()),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot int64, delay int64) {
|
||||
|
||||
@@ -150,11 +150,6 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add block attestations to the fork choice pool to compute head.
|
||||
if err := s.cfg.AttPool.SaveBlockAttestations(b.Body().Attestations()); err != nil {
|
||||
log.WithError(err).Error("Could not save block attestations for fork choice")
|
||||
return nil
|
||||
}
|
||||
// Mark block exits as seen so we don't include same ones in future blocks.
|
||||
for _, e := range b.Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.MarkIncluded(e)
|
||||
|
||||
@@ -76,9 +76,9 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 2 {
|
||||
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 0 {
|
||||
t.Errorf("Did not get the correct number of block attestations saved to the pool. "+
|
||||
"Got %d but wanted %d", baCount, 2)
|
||||
"Got %d but wanted %d", baCount, 0)
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
@@ -231,14 +231,15 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := forkChoicer.InsertNode(s.ctx, st, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := forkChoicer.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := forkChoicer.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
}
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be any until
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
@@ -528,3 +529,45 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableStartOptimistic: true,
|
||||
})
|
||||
defer resetFn()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
stateGen := stategen.New(beaconDB)
|
||||
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
require.Equal(t, true, c.cfg.ForkChoiceStore.HasNode(headRoot))
|
||||
op, err := c.cfg.ForkChoiceStore.IsOptimistic(headRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, op)
|
||||
}
|
||||
|
||||
@@ -185,7 +185,21 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.Depos
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return dc.deposits
|
||||
// Make a shallow copy of the deposits and return that. This way, the
|
||||
// caller can safely iterate over the returned list of deposits without
|
||||
// the possibility of new deposits showing up. If we were to return the
|
||||
// list without a copy, when a new deposit is added to the cache, it
|
||||
// would also be present in the returned value. This could result in a
|
||||
// race condition if the list is being iterated over.
|
||||
//
|
||||
// It's not necessary to make a deep copy of this list because the
|
||||
// deposits in the cache should never be modified. It is still possible
|
||||
// for the caller to modify one of the underlying deposits and modify
|
||||
// the cache, but that's not a race condition. Also, a deep copy would
|
||||
// take too long and use too much memory.
|
||||
deposits := make([]*ethpb.DepositContainer, len(dc.deposits))
|
||||
copy(deposits, dc.deposits)
|
||||
return deposits
|
||||
}
|
||||
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
|
||||
@@ -65,7 +65,6 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
currClient := s.rpcClient
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
errorLogger(err, "Could not connect to execution client endpoint")
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Close previous client, if connection was successful.
|
||||
|
||||
@@ -183,11 +183,15 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
// Releasing here the checkpoints lock because
|
||||
// AncestorRoot acquires a lock on nodes and that can
|
||||
// cause a double lock.
|
||||
f.store.checkpointsLock.Unlock()
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
if root == currentRoot {
|
||||
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
@@ -296,7 +300,8 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
}
|
||||
|
||||
// updateBalances updates the balances that directly voted for each block taking into account the
|
||||
// validators' latest votes. This function requires a lock in Store.nodesLock.
|
||||
// validators' latest votes. This function requires a lock in Store.nodesLock
|
||||
// and votesLock
|
||||
func (f *ForkChoice) updateBalances(newBalances []uint64) error {
|
||||
for index, vote := range f.votes {
|
||||
// Skip if validator has been slashed
|
||||
@@ -424,6 +429,9 @@ func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoo
|
||||
// store-tracked list. Votes from these validators are not accounted for
|
||||
// in forkchoice.
|
||||
func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.ValidatorIndex) {
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// return early if the index was already included:
|
||||
@@ -433,8 +441,6 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.Validator
|
||||
f.store.slashedIndices[index] = true
|
||||
|
||||
// Subtract last vote from this equivocating validator
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
if index >= types.ValidatorIndex(len(f.balances)) {
|
||||
return
|
||||
|
||||
@@ -40,7 +40,8 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its children.
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children. This function assumes the caller has a lock on Store.nodesLock
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
@@ -115,10 +116,13 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if !n.optimistic || n.parent == nil {
|
||||
if !n.optimistic {
|
||||
return nil
|
||||
}
|
||||
|
||||
n.optimistic = false
|
||||
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
@@ -41,15 +41,14 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
f.store.checkpointsLock.Lock()
|
||||
|
||||
f.store.checkpointsLock.RLock()
|
||||
bjcp := f.store.bestJustifiedCheckpoint
|
||||
jcp := f.store.justifiedCheckpoint
|
||||
fcp := f.store.finalizedCheckpoint
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if bjcp.Epoch > jcp.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -59,15 +58,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
// loop call here.
|
||||
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
if r == fcp.Root {
|
||||
f.store.checkpointsLock.Lock()
|
||||
f.store.prevJustifiedCheckpoint = jcp
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
f.store.checkpointsLock.Unlock()
|
||||
}
|
||||
}
|
||||
f.store.checkpointsLock.Unlock()
|
||||
if !features.Get().DisablePullTips {
|
||||
f.updateUnrealizedCheckpoints()
|
||||
}
|
||||
|
||||
@@ -389,3 +389,14 @@ func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
func TestSetOptimisticToValid(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
op, err := f.IsOptimistic([32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, op)
|
||||
require.NoError(t, f.SetOptimisticToValid(context.Background(), [32]byte{}))
|
||||
op, err = f.IsOptimistic([32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, op)
|
||||
}
|
||||
|
||||
@@ -170,8 +170,11 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
|
||||
// Update best descendants
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx,
|
||||
s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch); err != nil {
|
||||
s.checkpointsLock.RLock()
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
)
|
||||
|
||||
// This computes validator balance delta from validator votes.
|
||||
// It returns a list of deltas that represents the difference between old balances and new balances.
|
||||
// It returns a list of deltas that represents the difference between old
|
||||
// balances and new balances. This function assumes the caller holds a lock in
|
||||
// Store.nodesLock and Store.votesLock
|
||||
func computeDeltas(
|
||||
ctx context.Context,
|
||||
count int,
|
||||
|
||||
@@ -41,15 +41,14 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
f.store.checkpointsLock.Lock()
|
||||
|
||||
f.store.checkpointsLock.RLock()
|
||||
bjcp := f.store.bestJustifiedCheckpoint
|
||||
jcp := f.store.justifiedCheckpoint
|
||||
fcp := f.store.finalizedCheckpoint
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if bjcp.Epoch > jcp.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -59,15 +58,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
// loop call here.
|
||||
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
if r == fcp.Root {
|
||||
f.store.checkpointsLock.Lock()
|
||||
f.store.prevJustifiedCheckpoint = jcp
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
f.store.checkpointsLock.Unlock()
|
||||
}
|
||||
}
|
||||
f.store.checkpointsLock.Unlock()
|
||||
if !features.Get().DisablePullTips {
|
||||
f.updateUnrealizedCheckpoints()
|
||||
}
|
||||
|
||||
@@ -188,11 +188,15 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
// release the checkpoints lock here because
|
||||
// AncestorRoot takes a lock on nodes and that can lead
|
||||
// to double locks
|
||||
f.store.checkpointsLock.Unlock()
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
if root == currentRoot {
|
||||
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
@@ -285,6 +289,8 @@ func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32
|
||||
if r1 == r2 {
|
||||
return r1, nil
|
||||
}
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
i1, ok := f.store.nodesIndices[r1]
|
||||
if !ok || i1 >= uint64(len(f.store.nodes)) {
|
||||
@@ -406,8 +412,12 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
if !s.viableForHead(bestNode) {
|
||||
s.allTipsAreInvalid = true
|
||||
s.checkpointsLock.RLock()
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
|
||||
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestNode.justifiedEpoch, s.justifiedCheckpoint.Epoch)
|
||||
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, fEpoch, bestNode.justifiedEpoch, jEpoch)
|
||||
}
|
||||
s.allTipsAreInvalid = false
|
||||
|
||||
@@ -426,7 +436,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
return bestNode.root, nil
|
||||
}
|
||||
|
||||
// updateCanonicalNodes updates the canonical nodes mapping given the input block root.
|
||||
// updateCanonicalNodes updates the canonical nodes mapping given the input
|
||||
// block root. This function assumes the caller holds a lock in Store.nodesLock
|
||||
func (s *Store) updateCanonicalNodes(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.updateCanonicalNodes")
|
||||
defer span.End()
|
||||
@@ -548,14 +559,14 @@ func (s *Store) insert(ctx context.Context,
|
||||
if slot > s.highestReceivedSlot {
|
||||
s.highestReceivedSlot = slot
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// applyWeightChanges iterates backwards through the nodes in store. It checks all nodes parent
|
||||
// and its best child. For each node, it updates the weight with input delta and
|
||||
// back propagate the nodes' delta to its parents' delta. After scoring changes,
|
||||
// the best child is then updated along with the best descendant.
|
||||
// the best child is then updated along with the best descendant. This function
|
||||
// assumes the caller holds a lock in Store.nodesLock
|
||||
func (s *Store) applyWeightChanges(
|
||||
ctx context.Context, newBalances []uint64, delta []int,
|
||||
) error {
|
||||
@@ -900,6 +911,8 @@ func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
|
||||
// store-tracked list. Votes from these validators are not accounted for
|
||||
// in forkchoice.
|
||||
func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.ValidatorIndex) {
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// return early if the index was already included:
|
||||
@@ -909,9 +922,6 @@ func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.Validat
|
||||
f.store.slashedIndices[index] = true
|
||||
|
||||
// Subtract last vote from this equivocating validator
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
if index >= types.ValidatorIndex(len(f.balances)) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ go_library(
|
||||
"//beacon-chain/db/slasherkv:go_default_library",
|
||||
"//beacon-chain/deterministic-genesis:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/gateway:go_default_library",
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/slasherkv"
|
||||
interopcoldstart "github.com/prysmaticlabs/prysm/v3/beacon-chain/deterministic-genesis"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/gateway"
|
||||
@@ -100,7 +99,6 @@ type BeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
@@ -229,9 +227,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting Fork Choice")
|
||||
beacon.startForkChoice()
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(); err != nil {
|
||||
return nil, err
|
||||
@@ -355,14 +350,6 @@ func (b *BeaconNode) Close() {
|
||||
close(b.stop)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startForkChoice() {
|
||||
if !features.Get().DisableForkchoiceDoublyLinkedTree {
|
||||
b.forkChoiceStore = doublylinkedtree.New()
|
||||
} else {
|
||||
b.forkChoiceStore = protoarray.New()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
@@ -609,13 +596,19 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
blockchain.WithP2PBroadcaster(b.fetchP2P()),
|
||||
blockchain.WithStateNotifier(b),
|
||||
blockchain.WithForkChoiceStore(b.forkChoiceStore),
|
||||
blockchain.WithAttestationService(attService),
|
||||
blockchain.WithStateGen(b.stateGen),
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
)
|
||||
|
||||
if features.Get().DisableForkchoiceDoublyLinkedTree {
|
||||
opts = append(opts, blockchain.WithForkChoiceStore(protoarray.New()))
|
||||
} else {
|
||||
opts = append(opts, blockchain.WithForkChoiceStore(doublylinkedtree.New()))
|
||||
}
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not register blockchain service")
|
||||
@@ -843,7 +836,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
return b.services.RegisterService(rpcService)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerPrometheusService(cliCtx *cli.Context) error {
|
||||
func (b *BeaconNode) registerPrometheusService(_ *cli.Context) error {
|
||||
var additionalHandlers []prometheus.Handler
|
||||
var p *p2p.Service
|
||||
if err := b.services.FetchService(&p); err != nil {
|
||||
|
||||
@@ -6,12 +6,6 @@ datadir: /var/lib/prysm/beacon
|
||||
# http-web3provider: ETH1 API endpoint, eg. http://localhost:8545 for a local geth service on the default port
|
||||
http-web3provider: http://localhost:8545
|
||||
|
||||
# fallback-web3provider: List of backup ETH1 API endpoints, used if above is not working
|
||||
# For example:
|
||||
# fallback-web3provider:
|
||||
# - https://mainnet.infura.io/v3/YOUR-PROJECT-ID
|
||||
# - https://eth-mainnet.alchemyapi.io/v2/YOUR-PROJECT-ID
|
||||
|
||||
|
||||
# Optional tuning parameters
|
||||
# For full list, see https://docs.prylabs.network/docs/prysm-usage/parameters
|
||||
|
||||
@@ -781,8 +781,8 @@ type validatorRegistrationJson struct {
|
||||
}
|
||||
|
||||
type signedValidatorRegistrationJson struct {
|
||||
Message validatorRegistrationJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
Message *validatorRegistrationJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type signedValidatorRegistrationsRequestJson struct {
|
||||
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
@@ -82,6 +83,7 @@ go_test(
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
|
||||
@@ -722,66 +722,24 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = blk.PbPhase0Block()
|
||||
if err != nil && !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
v1Alpha1Attestations := blk.Block().Body().Attestations()
|
||||
v1Attestations := make([]*ethpbv1.Attestation, 0, len(v1Alpha1Attestations))
|
||||
for _, att := range v1Alpha1Attestations {
|
||||
migratedAtt := migration.V1Alpha1AttestationToV1(att)
|
||||
v1Attestations = append(v1Attestations, migratedAtt)
|
||||
}
|
||||
if err == nil {
|
||||
v1Blk, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
return ðpbv1.BlockAttestationsResponse{
|
||||
Data: v1Blk.Block.Body.Attestations,
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
|
||||
altairBlk, err := blk.PbAltairBlock()
|
||||
if err != nil && !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
if err == nil {
|
||||
if altairBlk == nil {
|
||||
return nil, status.Errorf(codes.Internal, "Nil block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlk.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
return ðpbv1.BlockAttestationsResponse{
|
||||
Data: v2Blk.Body.Attestations,
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
bellatrixBlock, err := blk.PbBellatrixBlock()
|
||||
if err != nil && !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
if err == nil {
|
||||
if bellatrixBlock == nil {
|
||||
return nil, status.Errorf(codes.Internal, "Nil block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
return ðpbv1.BlockAttestationsResponse{
|
||||
Data: v2Blk.Body.Attestations,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
return ðpbv1.BlockAttestationsResponse{
|
||||
Data: v1Attestations,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) blockFromBlockID(ctx context.Context, blockId []byte) (interfaces.SignedBeaconBlock, error) {
|
||||
|
||||
@@ -1857,8 +1857,11 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
|
||||
v1Block, err := migration.V1Alpha1ToV1SignedBlock(tt.want)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !reflect.DeepEqual(blk.Data, v1Block.Block.Body.Attestations) {
|
||||
blkAtts := blk.Data
|
||||
if len(blkAtts) == 0 {
|
||||
blkAtts = nil
|
||||
}
|
||||
if !reflect.DeepEqual(blkAtts, v1Block.Block.Body.Attestations) {
|
||||
t.Error("Expected attestations to equal")
|
||||
}
|
||||
})
|
||||
@@ -1961,7 +1964,11 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
v1Block, err := migration.V1Alpha1BeaconBlockAltairToV2(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !reflect.DeepEqual(blk.Data, v1Block.Body.Attestations) {
|
||||
blkAtts := blk.Data
|
||||
if len(blkAtts) == 0 {
|
||||
blkAtts = nil
|
||||
}
|
||||
if !reflect.DeepEqual(blkAtts, v1Block.Body.Attestations) {
|
||||
t.Error("Expected attestations to equal")
|
||||
}
|
||||
})
|
||||
@@ -2064,7 +2071,11 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
v1Block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !reflect.DeepEqual(blk.Data, v1Block.Body.Attestations) {
|
||||
blkAtts := blk.Data
|
||||
if len(blkAtts) == 0 {
|
||||
blkAtts = nil
|
||||
}
|
||||
if !reflect.DeepEqual(blkAtts, v1Block.Body.Attestations) {
|
||||
t.Error("Expected attestations to equal")
|
||||
}
|
||||
})
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/operation"
|
||||
corehelpers "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
@@ -164,6 +165,10 @@ func (bs *Server) SubmitAttesterSlashing(ctx context.Context, req *ethpbv1.Attes
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, req.Attestation_1.Data.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
|
||||
}
|
||||
|
||||
alphaSlashing := migration.V1AttSlashingToV1Alpha1(req)
|
||||
err = blocks.VerifyAttesterSlashing(ctx, headState, alphaSlashing)
|
||||
@@ -216,6 +221,10 @@ func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpbv1.Propo
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, req.SignedHeader_1.Message.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
|
||||
}
|
||||
|
||||
alphaSlashing := migration.V1ProposerSlashingToV1Alpha1(req)
|
||||
err = blocks.VerifyProposerSlashing(headState, alphaSlashing)
|
||||
@@ -269,6 +278,14 @@ func (bs *Server) SubmitVoluntaryExit(ctx context.Context, req *ethpbv1.SignedVo
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
s, err := slots.EpochStart(req.Message.Epoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get epoch from message: %v", err)
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, s)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
|
||||
}
|
||||
|
||||
validator, err := headState.ValidatorAtIndexReadOnly(req.Message.ValidatorIndex)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
grpcutil "github.com/prysmaticlabs/prysm/v3/api/grpc"
|
||||
blockchainmock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
slashingsmock "github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings/mock"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits/mock"
|
||||
@@ -444,6 +445,80 @@ func TestSubmitAttesterSlashing_Ok(t *testing.T) {
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
slashing := ðpbv1.AttesterSlashing{
|
||||
Attestation_1: ðpbv1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Index: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Attestation_2: ðpbv1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Index: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, att := range []*ethpbv1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(newBs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
att.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitAttesterSlashing(ctx, slashing)
|
||||
require.NoError(t, err)
|
||||
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, migration.V1AttSlashingToV1Alpha1(slashing), pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
bs, err := util.NewBeaconState()
|
||||
@@ -551,6 +626,68 @@ func TestSubmitProposerSlashing_Ok(t *testing.T) {
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
slashing := ðpbv1.ProposerSlashing{
|
||||
SignedHeader_1: ðpbv1.SignedBeaconBlockHeader{
|
||||
Message: ðpbv1.BeaconBlockHeader{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
SignedHeader_2: ðpbv1.SignedBeaconBlockHeader{
|
||||
Message: ðpbv1.BeaconBlockHeader{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, h := range []*ethpbv1.SignedBeaconBlockHeader{slashing.SignedHeader_1, slashing.SignedHeader_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(
|
||||
newBs,
|
||||
slots.ToEpoch(h.Message.Slot),
|
||||
h.Message,
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
keys[0],
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
h.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitProposerSlashing(ctx, slashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
bs, err := util.NewBeaconState()
|
||||
@@ -630,6 +767,47 @@ func TestSubmitVoluntaryExit_Ok(t *testing.T) {
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
}
|
||||
|
||||
func TestSubmitVoluntaryExit_AcrossFork(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.AltairForkEpoch = params.BeaconConfig().ShardCommitteePeriod + 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
// Satisfy activity time required before exiting.
|
||||
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod))))
|
||||
|
||||
exit := ðpbv1.SignedVoluntaryExit{
|
||||
Message: ðpbv1.VoluntaryExit{
|
||||
Epoch: params.BeaconConfig().ShardCommitteePeriod + 1,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)+1))
|
||||
require.NoError(t, err)
|
||||
|
||||
sb, err := signing.ComputeDomainAndSign(newBs, exit.Message.Epoch, exit.Message, params.BeaconConfig().DomainVoluntaryExit, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
exit.Signature = sig.Marshal()
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
VoluntaryExitsPool: &mock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitVoluntaryExit(ctx, exit)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSubmitVoluntaryExit_InvalidValidatorIndex(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
@@ -86,6 +87,7 @@ go_test(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
statev1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
|
||||
@@ -546,7 +547,24 @@ func (vs *Server) PrepareBeaconProposer(
|
||||
defer span.End()
|
||||
var feeRecipients []common.Address
|
||||
var validatorIndices []types.ValidatorIndex
|
||||
for _, recipientContainer := range request.Recipients {
|
||||
newRecipients := make([]*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer, 0, len(request.Recipients))
|
||||
for _, r := range request.Recipients {
|
||||
f, err := vs.V1Alpha1Server.BeaconDB.FeeRecipientByValidatorID(ctx, r.ValidatorIndex)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
newRecipients = append(newRecipients, r)
|
||||
case err != nil:
|
||||
return nil, status.Errorf(codes.Internal, "Could not get fee recipient by validator index: %v", err)
|
||||
default:
|
||||
}
|
||||
if common.BytesToAddress(r.FeeRecipient) != f {
|
||||
newRecipients = append(newRecipients, r)
|
||||
}
|
||||
}
|
||||
if len(newRecipients) == 0 {
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
for _, recipientContainer := range newRecipients {
|
||||
recipient := hexutil.Encode(recipientContainer.FeeRecipient)
|
||||
if !common.IsHexAddress(recipient) {
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid fee recipient address: %v", recipient))
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -3621,6 +3622,89 @@ func TestPrepareBeaconProposer(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
v1Server := &v1alpha1validator.Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
proposerServer := &Server{V1Alpha1Server: v1Server}
|
||||
|
||||
// New validator
|
||||
f := bytesutil.PadTo([]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
req := ðpbv1.PrepareBeaconProposerRequest{
|
||||
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
|
||||
{FeeRecipient: f, ValidatorIndex: 1},
|
||||
},
|
||||
}
|
||||
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// Same validator
|
||||
hook.Reset()
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// Same validator with different fee recipient
|
||||
hook.Reset()
|
||||
f = bytesutil.PadTo([]byte{0x01, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
req = ðpbv1.PrepareBeaconProposerRequest{
|
||||
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
|
||||
{FeeRecipient: f, ValidatorIndex: 1},
|
||||
},
|
||||
}
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// More than one validator
|
||||
hook.Reset()
|
||||
f = bytesutil.PadTo([]byte{0x01, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
req = ðpbv1.PrepareBeaconProposerRequest{
|
||||
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
|
||||
{FeeRecipient: f, ValidatorIndex: 1},
|
||||
{FeeRecipient: f, ValidatorIndex: 2},
|
||||
},
|
||||
}
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// Same validators
|
||||
hook.Reset()
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
}
|
||||
|
||||
func BenchmarkServer_PrepareBeaconProposer(b *testing.B) {
|
||||
db := dbutil.SetupDB(b)
|
||||
ctx := context.Background()
|
||||
v1Server := &v1alpha1validator.Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
proposerServer := &Server{V1Alpha1Server: v1Server}
|
||||
|
||||
f := bytesutil.PadTo([]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
recipients := make([]*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer, 0)
|
||||
for i := 0; i < 10000; i++ {
|
||||
recipients = append(recipients, ðpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{FeeRecipient: f, ValidatorIndex: types.ValidatorIndex(i)})
|
||||
}
|
||||
|
||||
req := ðpbv1.PrepareBeaconProposerRequest{
|
||||
Recipients: recipients,
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_SubmitValidatorRegistrations(t *testing.T) {
|
||||
type args struct {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
@@ -81,7 +82,26 @@ func (vs *Server) PrepareBeaconProposer(
|
||||
defer span.End()
|
||||
var feeRecipients []common.Address
|
||||
var validatorIndices []types.ValidatorIndex
|
||||
for _, recipientContainer := range request.Recipients {
|
||||
|
||||
newRecipients := make([]*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer, 0, len(request.Recipients))
|
||||
for _, r := range request.Recipients {
|
||||
f, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, r.ValidatorIndex)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
newRecipients = append(newRecipients, r)
|
||||
case err != nil:
|
||||
return nil, status.Errorf(codes.Internal, "Could not get fee recipient by validator index: %v", err)
|
||||
default:
|
||||
}
|
||||
if common.BytesToAddress(r.FeeRecipient) != f {
|
||||
newRecipients = append(newRecipients, r)
|
||||
}
|
||||
}
|
||||
if len(newRecipients) == 0 {
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
for _, recipientContainer := range newRecipients {
|
||||
recipient := hexutil.Encode(recipientContainer.FeeRecipient)
|
||||
if !common.IsHexAddress(recipient) {
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid fee recipient address: %v", recipient))
|
||||
|
||||
@@ -2357,6 +2357,84 @@ func TestProposer_PrepareBeaconProposer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
proposerServer := &Server{BeaconDB: db}
|
||||
|
||||
// New validator
|
||||
f := bytesutil.PadTo([]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
req := ðpb.PrepareBeaconProposerRequest{
|
||||
Recipients: []*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer{
|
||||
{FeeRecipient: f, ValidatorIndex: 1},
|
||||
},
|
||||
}
|
||||
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// Same validator
|
||||
hook.Reset()
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// Same validator with different fee recipient
|
||||
hook.Reset()
|
||||
f = bytesutil.PadTo([]byte{0x01, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
req = ðpb.PrepareBeaconProposerRequest{
|
||||
Recipients: []*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer{
|
||||
{FeeRecipient: f, ValidatorIndex: 1},
|
||||
},
|
||||
}
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// More than one validator
|
||||
hook.Reset()
|
||||
f = bytesutil.PadTo([]byte{0x01, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
req = ðpb.PrepareBeaconProposerRequest{
|
||||
Recipients: []*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer{
|
||||
{FeeRecipient: f, ValidatorIndex: 1},
|
||||
{FeeRecipient: f, ValidatorIndex: 2},
|
||||
},
|
||||
}
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// Same validators
|
||||
hook.Reset()
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
}
|
||||
|
||||
func BenchmarkServer_PrepareBeaconProposer(b *testing.B) {
|
||||
db := dbutil.SetupDB(b)
|
||||
ctx := context.Background()
|
||||
proposerServer := &Server{BeaconDB: db}
|
||||
|
||||
f := bytesutil.PadTo([]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
recipients := make([]*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer, 0)
|
||||
for i := 0; i < 10000; i++ {
|
||||
recipients = append(recipients, ðpb.PrepareBeaconProposerRequest_FeeRecipientContainer{FeeRecipient: f, ValidatorIndex: types.ValidatorIndex(i)})
|
||||
}
|
||||
|
||||
req := ðpb.PrepareBeaconProposerRequest{
|
||||
Recipients: recipients,
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposer_SubmitValidatorRegistrations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
proposerServer := &Server{}
|
||||
|
||||
@@ -190,7 +190,7 @@ func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlo
|
||||
continue
|
||||
}
|
||||
if chunkErr := s.chunkBlockWriter(stream, b); chunkErr != nil {
|
||||
log.WithError(chunkErr).Error("Could not send a chunked response")
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
return chunkErr
|
||||
|
||||
@@ -178,6 +178,7 @@ func (s *Service) Start() {
|
||||
s.processPendingBlocksQueue()
|
||||
s.processPendingAttsQueue()
|
||||
s.maintainPeerStatuses()
|
||||
s.resyncIfBehind()
|
||||
|
||||
// Update sync metrics.
|
||||
async.RunEvery(s.ctx, syncMetricsInterval, s.updateMetrics)
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/file"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -60,11 +61,13 @@ func parseJWTSecretFromFile(c *cli.Context) ([]byte, error) {
|
||||
if len(secret) < 32 {
|
||||
return nil, errors.New("provided JWT secret should be a hex string of at least 32 bytes")
|
||||
}
|
||||
log.Infof("Finished reading JWT secret from %s", jwtSecretFile)
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
func parseExecutionChainEndpoint(c *cli.Context) (string, error) {
|
||||
if c.String(flags.ExecutionEngineEndpoint.Name) == "" {
|
||||
aliasUsed := c.IsSet(flags.HTTPWeb3ProviderFlag.Name)
|
||||
if c.String(flags.ExecutionEngineEndpoint.Name) == "" && !aliasUsed {
|
||||
return "", fmt.Errorf(
|
||||
"you need to specify %s to provide a connection endpoint to an Ethereum execution client "+
|
||||
"for your Prysm beacon node. This is a requirement for running a node. You can read more about "+
|
||||
@@ -73,5 +76,12 @@ func parseExecutionChainEndpoint(c *cli.Context) (string, error) {
|
||||
flags.ExecutionEngineEndpoint.Name,
|
||||
)
|
||||
}
|
||||
// If users only declare the deprecated flag without setting the execution engine
|
||||
// flag, we fallback to using the deprecated flag value.
|
||||
if aliasUsed && !c.IsSet(flags.ExecutionEngineEndpoint.Name) {
|
||||
log.Warnf("The %s flag has been deprecated and will be removed in a future release,"+
|
||||
"please use the execution endpoint flag instead %s", flags.HTTPWeb3ProviderFlag.Name, flags.ExecutionEngineEndpoint.Name)
|
||||
return c.String(flags.HTTPWeb3ProviderFlag.Name), nil
|
||||
}
|
||||
return c.String(flags.ExecutionEngineEndpoint.Name), nil
|
||||
}
|
||||
|
||||
@@ -32,6 +32,13 @@ var (
|
||||
Usage: "An execution client http endpoint. Can contain auth header as well in the format",
|
||||
Value: "http://localhost:8551",
|
||||
}
|
||||
// Deprecated: HTTPWeb3ProviderFlag is a deprecated flag and is an alias for the ExecutionEngineEndpoint flag.
|
||||
HTTPWeb3ProviderFlag = &cli.StringFlag{
|
||||
Name: "http-web3provider",
|
||||
Usage: "DEPRECATED: A mainchain web3 provider string http endpoint. Can contain auth header as well in the format --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Basic xxx\" for project secret (base64 encoded) and --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Bearer xxx\" for jwt use",
|
||||
Value: "http://localhost:8551",
|
||||
Hidden: true,
|
||||
}
|
||||
// ExecutionJWTSecretFlag provides a path to a file containing a hex-encoded string representing a 32 byte secret
|
||||
// used to authenticate with an execution node via HTTP. This is required if using an HTTP connection, otherwise all requests
|
||||
// to execution nodes for consensus-related calls will fail. This is not required if using an IPC connection.
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
var appFlags = []cli.Flag{
|
||||
flags.DepositContractFlag,
|
||||
flags.ExecutionEngineEndpoint,
|
||||
flags.HTTPWeb3ProviderFlag,
|
||||
flags.ExecutionJWTSecretFlag,
|
||||
flags.RPCHost,
|
||||
flags.RPCPort,
|
||||
@@ -231,6 +232,13 @@ func startNode(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
logrus.SetLevel(level)
|
||||
// Set libp2p logger to only panic logs for the info level.
|
||||
golog.SetAllLoggers(golog.LevelPanic)
|
||||
|
||||
if level == logrus.DebugLevel {
|
||||
// Set libp2p logger to error logs for the debug level.
|
||||
golog.SetAllLoggers(golog.LevelError)
|
||||
}
|
||||
if level == logrus.TraceLevel {
|
||||
// libp2p specific logging.
|
||||
golog.SetAllLoggers(golog.LevelDebug)
|
||||
|
||||
@@ -107,6 +107,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.GRPCGatewayPort,
|
||||
flags.GPRCGatewayCorsDomain,
|
||||
flags.ExecutionEngineEndpoint,
|
||||
flags.HTTPWeb3ProviderFlag,
|
||||
flags.ExecutionJWTSecretFlag,
|
||||
flags.SetGCPercent,
|
||||
flags.SlotsPerArchivedPoint,
|
||||
|
||||
@@ -96,31 +96,3 @@ func ExpandSingleEndpointIfFile(ctx *cli.Context, flag *cli.StringFlag) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpandWeb3EndpointsIfFile expands the path for --fallback-web3provider if specified as a file.
|
||||
func ExpandWeb3EndpointsIfFile(ctx *cli.Context, flags *cli.StringSliceFlag) error {
|
||||
// Return early if no flag value is set.
|
||||
if !ctx.IsSet(flags.Name) {
|
||||
return nil
|
||||
}
|
||||
rawFlags := ctx.StringSlice(flags.Name)
|
||||
for i, rawValue := range rawFlags {
|
||||
switch {
|
||||
case strings.HasPrefix(rawValue, "http://"):
|
||||
case strings.HasPrefix(rawValue, "https://"):
|
||||
case strings.HasPrefix(rawValue, "ws://"):
|
||||
case strings.HasPrefix(rawValue, "wss://"):
|
||||
default:
|
||||
web3endpoint, err := file.ExpandPath(rawValue)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not expand path for %s", rawValue)
|
||||
}
|
||||
// Given that rawFlags is a pointer this will replace the unexpanded path
|
||||
// with the expanded one. Also there is no easy way to replace the string
|
||||
// slice flag value compared to other flag types. This is why we resort to
|
||||
// replacing it like this.
|
||||
rawFlags[i] = web3endpoint
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -111,43 +111,3 @@ func TestExpandSingleEndpointIfFile(t *testing.T) {
|
||||
require.NoError(t, ExpandSingleEndpointIfFile(context, HTTPWeb3ProviderFlag))
|
||||
require.Equal(t, curentdir+"/path.ipc", context.String(HTTPWeb3ProviderFlag.Name))
|
||||
}
|
||||
|
||||
func TestExpandWeb3EndpointsIfFile(t *testing.T) {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
HTTPWeb3ProviderFlag := &cli.StringSliceFlag{Name: "fallback-web3provider", Value: cli.NewStringSlice()}
|
||||
set.Var(cli.NewStringSlice(), HTTPWeb3ProviderFlag.Name, "")
|
||||
context := cli.NewContext(&app, set, nil)
|
||||
// with nothing set
|
||||
require.NoError(t, ExpandWeb3EndpointsIfFile(context, HTTPWeb3ProviderFlag))
|
||||
require.DeepEqual(t, []string{}, context.StringSlice(HTTPWeb3ProviderFlag.Name))
|
||||
|
||||
// with url scheme
|
||||
require.NoError(t, context.Set(HTTPWeb3ProviderFlag.Name, "http://localhost:8545"))
|
||||
require.NoError(t, ExpandWeb3EndpointsIfFile(context, HTTPWeb3ProviderFlag))
|
||||
require.DeepEqual(t, []string{"http://localhost:8545"}, context.StringSlice(HTTPWeb3ProviderFlag.Name))
|
||||
|
||||
// reset context
|
||||
set = flag.NewFlagSet("test", 0)
|
||||
set.Var(cli.NewStringSlice(), HTTPWeb3ProviderFlag.Name, "")
|
||||
context = cli.NewContext(&app, set, nil)
|
||||
|
||||
// relative user home path
|
||||
usr, err := user.Current()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, context.Set(HTTPWeb3ProviderFlag.Name, "~/relative/path.ipc"))
|
||||
require.NoError(t, ExpandWeb3EndpointsIfFile(context, HTTPWeb3ProviderFlag))
|
||||
require.DeepEqual(t, []string{usr.HomeDir + "/relative/path.ipc"}, context.StringSlice(HTTPWeb3ProviderFlag.Name))
|
||||
|
||||
// reset context
|
||||
set = flag.NewFlagSet("test", 0)
|
||||
set.Var(cli.NewStringSlice(), HTTPWeb3ProviderFlag.Name, "")
|
||||
context = cli.NewContext(&app, set, nil)
|
||||
|
||||
// current dir path
|
||||
curentdir, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, context.Set(HTTPWeb3ProviderFlag.Name, "./path.ipc"))
|
||||
require.NoError(t, ExpandWeb3EndpointsIfFile(context, HTTPWeb3ProviderFlag))
|
||||
require.DeepEqual(t, []string{curentdir + "/path.ipc"}, context.StringSlice(HTTPWeb3ProviderFlag.Name))
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
@@ -340,16 +341,17 @@ var (
|
||||
|
||||
// EnableBuilderFlag enables the periodic validator registration API calls that will update the custom builder with validator settings.
|
||||
EnableBuilderFlag = &cli.BoolFlag{
|
||||
Name: "enable-builder",
|
||||
Usage: "Enables Builder validator registration APIs for the validator client to update settings such as fee recipient and gas limit. Note* this flag is not required if using proposer settings config file",
|
||||
Value: false,
|
||||
Name: "enable-builder",
|
||||
Usage: "Enables Builder validator registration APIs for the validator client to update settings such as fee recipient and gas limit. Note* this flag is not required if using proposer settings config file",
|
||||
Value: false,
|
||||
Aliases: []string{"enable-validator-registration"},
|
||||
}
|
||||
|
||||
// BuilderGasLimitFlag defines the gas limit for the builder to use for constructing a payload.
|
||||
BuilderGasLimitFlag = &cli.IntFlag{
|
||||
BuilderGasLimitFlag = &cli.StringFlag{
|
||||
Name: "suggested-gas-limit",
|
||||
Usage: "Sets gas limit for the builder to use for constructing a payload for all the validators",
|
||||
Value: int(params.BeaconConfig().DefaultBuilderGasLimit),
|
||||
Value: fmt.Sprint(params.BeaconConfig().DefaultBuilderGasLimit),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"edit.go",
|
||||
"recover.go",
|
||||
"wallet.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/validator/wallet",
|
||||
@@ -12,6 +13,7 @@ go_library(
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//io/prompt:go_default_library",
|
||||
"//runtime/tos:go_default_library",
|
||||
"//validator/accounts:go_default_library",
|
||||
"//validator/accounts/userprompt:go_default_library",
|
||||
@@ -20,13 +22,18 @@ go_library(
|
||||
"//validator/keymanager/remote:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_tyler_smith_go_bip39//:go_default_library",
|
||||
"@com_github_tyler_smith_go_bip39//wordlists:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["edit_test.go"],
|
||||
srcs = [
|
||||
"edit_test.go",
|
||||
"recover_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
@@ -34,8 +41,10 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//validator/accounts:go_default_library",
|
||||
"//validator/accounts/iface:go_default_library",
|
||||
"//validator/accounts/wallet:go_default_library",
|
||||
"//validator/keymanager:go_default_library",
|
||||
"//validator/keymanager/derived:go_default_library",
|
||||
"//validator/keymanager/remote:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
175
cmd/validator/wallet/recover.go
Normal file
175
cmd/validator/wallet/recover.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package wallet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/prompt"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/accounts/userprompt"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/accounts/wallet"
|
||||
"github.com/tyler-smith/go-bip39"
|
||||
"github.com/tyler-smith/go-bip39/wordlists"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// #nosec G101 -- Not sensitive data
|
||||
mnemonicPassphraseYesNoText = "(Advanced) Do you have an optional '25th word' passphrase for your mnemonic? [y/n]"
|
||||
// #nosec G101 -- Not sensitive data
|
||||
mnemonicPassphrasePromptText = "(Advanced) Enter the '25th word' passphrase for your mnemonic"
|
||||
)
|
||||
|
||||
func walletRecover(c *cli.Context) error {
|
||||
mnemonic, err := inputMnemonic(c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get mnemonic phrase")
|
||||
}
|
||||
opts := []accounts.Option{
|
||||
accounts.WithMnemonic(mnemonic),
|
||||
}
|
||||
|
||||
skipMnemonic25thWord := c.IsSet(flags.SkipMnemonic25thWordCheckFlag.Name)
|
||||
has25thWordFile := c.IsSet(flags.Mnemonic25thWordFileFlag.Name)
|
||||
if !skipMnemonic25thWord && !has25thWordFile {
|
||||
resp, err := prompt.ValidatePrompt(
|
||||
os.Stdin, mnemonicPassphraseYesNoText, prompt.ValidateYesOrNo,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate choice")
|
||||
}
|
||||
if strings.EqualFold(resp, "y") {
|
||||
mnemonicPassphrase, err := prompt.InputPassword(
|
||||
c,
|
||||
flags.Mnemonic25thWordFileFlag,
|
||||
mnemonicPassphrasePromptText,
|
||||
"Confirm mnemonic passphrase",
|
||||
false, /* Should confirm password */
|
||||
func(input string) error {
|
||||
if strings.TrimSpace(input) == "" {
|
||||
return errors.New("input cannot be empty")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts = append(opts, accounts.WithMnemonic25thWord(mnemonicPassphrase))
|
||||
}
|
||||
}
|
||||
walletDir, err := userprompt.InputDirectory(c, userprompt.WalletDirPromptText, flags.WalletDirFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
walletPassword, err := prompt.InputPassword(
|
||||
c,
|
||||
flags.WalletPasswordFileFlag,
|
||||
wallet.NewWalletPasswordPromptText,
|
||||
wallet.ConfirmPasswordPromptText,
|
||||
true, /* Should confirm password */
|
||||
prompt.ValidatePasswordInput,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numAccounts, err := inputNumAccounts(c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get number of accounts to recover")
|
||||
}
|
||||
opts = append(opts, accounts.WithWalletDir(walletDir))
|
||||
opts = append(opts, accounts.WithWalletPassword(walletPassword))
|
||||
opts = append(opts, accounts.WithNumAccounts(int(numAccounts)))
|
||||
|
||||
acc, err := accounts.NewCLIManager(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = acc.WalletRecover(c.Context); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof(
|
||||
"Successfully recovered HD wallet with accounts and saved configuration to disk",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func inputMnemonic(cliCtx *cli.Context) (mnemonicPhrase string, err error) {
|
||||
if cliCtx.IsSet(flags.MnemonicFileFlag.Name) {
|
||||
mnemonicFilePath := cliCtx.String(flags.MnemonicFileFlag.Name)
|
||||
data, err := os.ReadFile(mnemonicFilePath) // #nosec G304 -- ReadFile is safe
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
enteredMnemonic := string(data)
|
||||
if err := accounts.ValidateMnemonic(enteredMnemonic); err != nil {
|
||||
return "", errors.Wrap(err, "mnemonic phrase did not pass validation")
|
||||
}
|
||||
return enteredMnemonic, nil
|
||||
}
|
||||
allowedLanguages := map[string][]string{
|
||||
"chinese_simplified": wordlists.ChineseSimplified,
|
||||
"chinese_traditional": wordlists.ChineseTraditional,
|
||||
"czech": wordlists.Czech,
|
||||
"english": wordlists.English,
|
||||
"french": wordlists.French,
|
||||
"japanese": wordlists.Japanese,
|
||||
"korean": wordlists.Korean,
|
||||
"italian": wordlists.Italian,
|
||||
"spanish": wordlists.Spanish,
|
||||
}
|
||||
languages := make([]string, 0)
|
||||
for k := range allowedLanguages {
|
||||
languages = append(languages, k)
|
||||
}
|
||||
sort.Strings(languages)
|
||||
selectedLanguage, err := prompt.ValidatePrompt(
|
||||
os.Stdin,
|
||||
fmt.Sprintf("Enter the language of your seed phrase: %s", strings.Join(languages, ", ")),
|
||||
func(input string) error {
|
||||
if _, ok := allowedLanguages[input]; !ok {
|
||||
return errors.New("input not in the list of allowed languages")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get mnemonic language: %w", err)
|
||||
}
|
||||
bip39.SetWordList(allowedLanguages[selectedLanguage])
|
||||
mnemonicPhrase, err = prompt.ValidatePrompt(
|
||||
os.Stdin,
|
||||
"Enter the seed phrase for the wallet you would like to recover",
|
||||
accounts.ValidateMnemonic)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get mnemonic phrase: %w", err)
|
||||
}
|
||||
return mnemonicPhrase, nil
|
||||
}
|
||||
|
||||
func inputNumAccounts(cliCtx *cli.Context) (int64, error) {
|
||||
if cliCtx.IsSet(flags.NumAccountsFlag.Name) {
|
||||
numAccounts := cliCtx.Int64(flags.NumAccountsFlag.Name)
|
||||
if numAccounts <= 0 {
|
||||
return 0, errors.New("must recover at least 1 account")
|
||||
}
|
||||
return numAccounts, nil
|
||||
}
|
||||
numAccounts, err := prompt.ValidatePrompt(os.Stdin, "Enter how many accounts you would like to generate from the mnemonic", prompt.ValidateNumber)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
numAccountsInt, err := strconv.Atoi(numAccounts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if numAccountsInt <= 0 {
|
||||
return 0, errors.New("must recover at least 1 account")
|
||||
}
|
||||
return int64(numAccountsInt), nil
|
||||
}
|
||||
113
cmd/validator/wallet/recover_test.go
Normal file
113
cmd/validator/wallet/recover_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package wallet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/accounts/iface"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/accounts/wallet"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/keymanager"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/keymanager/derived"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
walletDirName = "wallet"
|
||||
mnemonicFileName = "mnemonic.txt"
|
||||
mnemonic = "garage car helmet trade salmon embrace market giant movie wet same champion dawn chair shield drill amazing panther accident puzzle garden mosquito kind arena"
|
||||
)
|
||||
|
||||
type recoverCfgStruct struct {
|
||||
walletDir string
|
||||
passwordFilePath string
|
||||
mnemonicFilePath string
|
||||
numAccounts int64
|
||||
}
|
||||
|
||||
func setupRecoverCfg(t *testing.T) *recoverCfgStruct {
|
||||
testDir := t.TempDir()
|
||||
walletDir := filepath.Join(testDir, walletDirName)
|
||||
passwordFilePath := filepath.Join(testDir, passwordFileName)
|
||||
require.NoError(t, os.WriteFile(passwordFilePath, []byte(password), os.ModePerm))
|
||||
mnemonicFilePath := filepath.Join(testDir, mnemonicFileName)
|
||||
require.NoError(t, os.WriteFile(mnemonicFilePath, []byte(mnemonic), os.ModePerm))
|
||||
|
||||
return &recoverCfgStruct{
|
||||
walletDir: walletDir,
|
||||
passwordFilePath: passwordFilePath,
|
||||
mnemonicFilePath: mnemonicFilePath,
|
||||
}
|
||||
}
|
||||
|
||||
func createRecoverCliCtx(t *testing.T, cfg *recoverCfgStruct) *cli.Context {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(flags.WalletDirFlag.Name, cfg.walletDir, "")
|
||||
set.String(flags.WalletPasswordFileFlag.Name, cfg.passwordFilePath, "")
|
||||
set.String(flags.KeymanagerKindFlag.Name, keymanager.Derived.String(), "")
|
||||
set.String(flags.MnemonicFileFlag.Name, cfg.mnemonicFilePath, "")
|
||||
set.Bool(flags.SkipMnemonic25thWordCheckFlag.Name, true, "")
|
||||
set.Int64(flags.NumAccountsFlag.Name, cfg.numAccounts, "")
|
||||
assert.NoError(t, set.Set(flags.SkipMnemonic25thWordCheckFlag.Name, "true"))
|
||||
assert.NoError(t, set.Set(flags.WalletDirFlag.Name, cfg.walletDir))
|
||||
assert.NoError(t, set.Set(flags.WalletPasswordFileFlag.Name, cfg.passwordFilePath))
|
||||
assert.NoError(t, set.Set(flags.KeymanagerKindFlag.Name, keymanager.Derived.String()))
|
||||
assert.NoError(t, set.Set(flags.MnemonicFileFlag.Name, cfg.mnemonicFilePath))
|
||||
assert.NoError(t, set.Set(flags.NumAccountsFlag.Name, strconv.Itoa(int(cfg.numAccounts))))
|
||||
return cli.NewContext(&app, set, nil)
|
||||
}
|
||||
|
||||
func TestRecoverDerivedWallet(t *testing.T) {
|
||||
cfg := setupRecoverCfg(t)
|
||||
cfg.numAccounts = 4
|
||||
cliCtx := createRecoverCliCtx(t, cfg)
|
||||
require.NoError(t, walletRecover(cliCtx))
|
||||
|
||||
ctx := context.Background()
|
||||
w, err := wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: cfg.walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
derivedKM, ok := km.(*derived.Keymanager)
|
||||
if !ok {
|
||||
t.Fatal("not a derived keymanager")
|
||||
}
|
||||
names, err := derivedKM.ValidatingAccountNames(ctx)
|
||||
assert.NoError(t, err)
|
||||
require.Equal(t, len(names), int(cfg.numAccounts))
|
||||
}
|
||||
|
||||
// TestRecoverDerivedWallet_OneAccount is a test for regression in cases where the number of accounts recovered is 1
|
||||
func TestRecoverDerivedWallet_OneAccount(t *testing.T) {
|
||||
cfg := setupRecoverCfg(t)
|
||||
cfg.numAccounts = 1
|
||||
cliCtx := createRecoverCliCtx(t, cfg)
|
||||
require.NoError(t, walletRecover(cliCtx))
|
||||
|
||||
_, err := wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: cfg.walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRecoverDerivedWallet_AlreadyExists(t *testing.T) {
|
||||
cfg := setupRecoverCfg(t)
|
||||
cfg.numAccounts = 4
|
||||
cliCtx := createRecoverCliCtx(t, cfg)
|
||||
require.NoError(t, walletRecover(cliCtx))
|
||||
|
||||
// Trying to recover an HD wallet into a directory that already exists should give an error
|
||||
require.ErrorContains(t, "a wallet already exists at this location", walletRecover(cliCtx))
|
||||
}
|
||||
@@ -108,13 +108,13 @@ var Commands = &cli.Command{
|
||||
if err := cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags); err != nil {
|
||||
return err
|
||||
}
|
||||
return tos.VerifyTosAcceptedOrPrompt(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := features.ConfigureValidator(cliCtx); err != nil {
|
||||
if err := tos.VerifyTosAcceptedOrPrompt(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := accounts.RecoverWalletCli(cliCtx); err != nil {
|
||||
return features.ConfigureBeaconChain(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := walletRecover(cliCtx); err != nil {
|
||||
log.WithError(err).Fatal("Could not recover wallet")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -21,10 +21,12 @@ package features
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/gohashtree"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -64,6 +66,7 @@ type Flags struct {
|
||||
DisableForkchoiceDoublyLinkedTree bool // DisableForkChoiceDoublyLinkedTree specifies whether fork choice store will use a doubly linked tree.
|
||||
EnableBatchGossipAggregation bool // EnableBatchGossipAggregation specifies whether to further aggregate our gossip batches before verifying them.
|
||||
EnableOnlyBlindedBeaconBlocks bool // EnableOnlyBlindedBeaconBlocks enables only storing blinded beacon blocks in the DB post-Bellatrix fork.
|
||||
EnableStartOptimistic bool // EnableStartOptimistic treats every block as optimistic at startup.
|
||||
|
||||
// KeystoreImportDebounceInterval specifies the time duration the validator waits to reload new keys if they have
|
||||
// changed on disk. This feature is for advanced use cases only.
|
||||
@@ -209,7 +212,22 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
if ctx.Bool(disableVecHTR.Name) {
|
||||
logEnabled(disableVecHTR)
|
||||
} else {
|
||||
applyVectorizedHTRConfig(cfg)
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, syscall.SIGILL)
|
||||
defer signal.Stop(sigc)
|
||||
buffer := make([][32]byte, 2)
|
||||
err := gohashtree.Hash(buffer, buffer)
|
||||
if err != nil {
|
||||
log.Error("could not test if gohashtree is supported")
|
||||
} else {
|
||||
t := time.NewTimer(time.Millisecond * 100)
|
||||
select {
|
||||
case <-sigc:
|
||||
log.Error("gohashtree is not supported in this CPU")
|
||||
case <-t.C:
|
||||
cfg.EnableVectorizedHTR = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if ctx.Bool(disableForkChoiceDoublyLinkedTree.Name) {
|
||||
logEnabled(disableForkChoiceDoublyLinkedTree)
|
||||
@@ -224,6 +242,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(EnableOnlyBlindedBeaconBlocks)
|
||||
cfg.EnableOnlyBlindedBeaconBlocks = true
|
||||
}
|
||||
if ctx.Bool(enableStartupOptimistic.Name) {
|
||||
logEnabled(enableStartupOptimistic)
|
||||
cfg.EnableStartOptimistic = true
|
||||
}
|
||||
Init(cfg)
|
||||
return nil
|
||||
}
|
||||
@@ -299,28 +321,3 @@ func logDisabled(flag cli.DocGenerationFlag) {
|
||||
}
|
||||
log.WithField(name, flag.GetUsage()).Warn(disabledFeatureFlag)
|
||||
}
|
||||
|
||||
func applyVectorizedHTRConfig(cfg *Flags) (appliedCfg *Flags) {
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
log.Error("gohashtree is not supported in this CPU")
|
||||
}
|
||||
}()
|
||||
appliedCfg = cfg
|
||||
buffer := make([][32]byte, 2)
|
||||
err := mockSIGILL(buffer, buffer)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not test if gohashtree is supported")
|
||||
return
|
||||
}
|
||||
appliedCfg.EnableVectorizedHTR = true
|
||||
return
|
||||
}
|
||||
|
||||
func mockSIGILL(_, _ [][32]byte) error {
|
||||
p, err := os.FindProcess(os.Getpid())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.Signal(syscall.SIGILL)
|
||||
}
|
||||
|
||||
@@ -12,8 +12,84 @@ var (
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedBackupWebHookFlag = &cli.BoolFlag{
|
||||
Name: "enable-db-backup-webhook",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedBoltMmapFlag = &cli.StringFlag{
|
||||
Name: "bolt-mmap-initial-size",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedDisableDiscV5Flag = &cli.BoolFlag{
|
||||
Name: "disable-discv5",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedDisableAttHistoryCacheFlag = &cli.BoolFlag{
|
||||
Name: "disable-attesting-history-db-cache",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedEnableVectorizedHtr = &cli.BoolFlag{
|
||||
Name: "enable-vectorized-htr",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedEnablePeerScorer = &cli.BoolFlag{
|
||||
Name: "enable-peer-scorer",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedEnableForkchoiceDoublyLinkedTree = &cli.BoolFlag{
|
||||
Name: "enable-forkchoice-doubly-linked-tree",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedDutyCountdown = &cli.BoolFlag{
|
||||
Name: "enable-duty-count-down",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedHeadSync = &cli.BoolFlag{
|
||||
Name: "head-sync",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedGossipBatchAggregation = &cli.BoolFlag{
|
||||
Name: "enable-gossip-batch-aggregation",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedEnableLargerGossipHistory = &cli.BoolFlag{
|
||||
Name: "enable-larger-gossip-history",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedFallbackProvider = &cli.StringFlag{
|
||||
Name: "fallback-web3provider",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
// Deprecated flags for both the beacon node and validator client.
|
||||
var deprecatedFlags = []cli.Flag{
|
||||
exampleDeprecatedFeatureFlag,
|
||||
deprecatedBoltMmapFlag,
|
||||
deprecatedDisableDiscV5Flag,
|
||||
deprecatedDisableAttHistoryCacheFlag,
|
||||
deprecatedEnableVectorizedHtr,
|
||||
deprecatedEnablePeerScorer,
|
||||
deprecatedEnableForkchoiceDoublyLinkedTree,
|
||||
deprecatedDutyCountdown,
|
||||
deprecatedHeadSync,
|
||||
deprecatedGossipBatchAggregation,
|
||||
deprecatedEnableLargerGossipHistory,
|
||||
deprecatedFallbackProvider,
|
||||
}
|
||||
|
||||
var deprecatedBeaconFlags = []cli.Flag{
|
||||
deprecatedBackupWebHookFlag,
|
||||
}
|
||||
|
||||
@@ -113,6 +113,12 @@ var (
|
||||
Name: "enable-only-blinded-beacon-blocks",
|
||||
Usage: "Enables storing only blinded beacon blocks in the database without full execution layer transactions",
|
||||
}
|
||||
enableStartupOptimistic = &cli.BoolFlag{
|
||||
Name: "startup-optimistic",
|
||||
Usage: "Treats every block as optimistically synced at launch. Use with caution",
|
||||
Value: false,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
@@ -138,7 +144,7 @@ var E2EValidatorFlags = []string{
|
||||
}
|
||||
|
||||
// BeaconChainFlags contains a list of all the feature flags that apply to the beacon-chain client.
|
||||
var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
|
||||
var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []cli.Flag{
|
||||
devModeFlag,
|
||||
writeSSZStateTransitionsFlag,
|
||||
disableGRPCConnectionLogging,
|
||||
@@ -156,7 +162,8 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
|
||||
disableForkChoiceDoublyLinkedTree,
|
||||
disableGossipBatchAggregation,
|
||||
EnableOnlyBlindedBeaconBlocks,
|
||||
}...)
|
||||
enableStartupOptimistic,
|
||||
}...)...)
|
||||
|
||||
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.
|
||||
var E2EBeaconChainFlags = []string{
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package validator_service_config
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -25,10 +27,41 @@ type ProposerOptionPayload struct {
|
||||
// GasLimit is a number set to help the network decide on the maximum gas in each block.
|
||||
type BuilderConfig struct {
|
||||
Enabled bool `json:"enabled" yaml:"enabled"`
|
||||
GasLimit uint64 `json:"gas_limit,omitempty" yaml:"gas_limit,omitempty"`
|
||||
GasLimit Uint64 `json:"gas_limit,omitempty" yaml:"gas_limit,omitempty"`
|
||||
Relays []string `json:"relays" yaml:"relays"`
|
||||
}
|
||||
|
||||
type Uint64 uint64
|
||||
|
||||
func (u *Uint64) UnmarshalJSON(bs []byte) error {
|
||||
str := string(bs) // Parse plain numbers directly.
|
||||
if bs[0] == '"' && bs[len(bs)-1] == '"' {
|
||||
// Unwrap the quotes from string numbers.
|
||||
str = string(bs[1 : len(bs)-1])
|
||||
}
|
||||
x, err := strconv.ParseUint(str, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Uint64(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uint64) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var str string
|
||||
err := unmarshal(&str)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
x, err := strconv.ParseUint(str, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Uint64(x)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerSettings is a Prysm internal representation of the fee recipient config on the validator client.
|
||||
// ProposerSettingsPayload maps to ProposerSettings on import through the CLI.
|
||||
type ProposerSettings struct {
|
||||
|
||||
@@ -25,7 +25,7 @@ func (b *SignedBeaconBlock) Proto() (proto.Message, error) {
|
||||
var ok bool
|
||||
block, ok = blockMessage.(*eth.BeaconBlock)
|
||||
if !ok {
|
||||
return nil, errors.Wrap(err, incorrectBlockVersion)
|
||||
return nil, errIncorrectBlockVersion
|
||||
}
|
||||
}
|
||||
return ð.SignedBeaconBlock{
|
||||
@@ -38,7 +38,7 @@ func (b *SignedBeaconBlock) Proto() (proto.Message, error) {
|
||||
var ok bool
|
||||
block, ok = blockMessage.(*eth.BeaconBlockAltair)
|
||||
if !ok {
|
||||
return nil, errors.Wrap(err, incorrectBlockVersion)
|
||||
return nil, errIncorrectBlockVersion
|
||||
}
|
||||
}
|
||||
return ð.SignedBeaconBlockAltair{
|
||||
@@ -52,7 +52,7 @@ func (b *SignedBeaconBlock) Proto() (proto.Message, error) {
|
||||
var ok bool
|
||||
block, ok = blockMessage.(*eth.BlindedBeaconBlockBellatrix)
|
||||
if !ok {
|
||||
return nil, errors.Wrap(err, incorrectBlockVersion)
|
||||
return nil, errIncorrectBlockVersion
|
||||
}
|
||||
}
|
||||
return ð.SignedBlindedBeaconBlockBellatrix{
|
||||
@@ -65,7 +65,7 @@ func (b *SignedBeaconBlock) Proto() (proto.Message, error) {
|
||||
var ok bool
|
||||
block, ok = blockMessage.(*eth.BeaconBlockBellatrix)
|
||||
if !ok {
|
||||
return nil, errors.Wrap(err, incorrectBlockVersion)
|
||||
return nil, errIncorrectBlockVersion
|
||||
}
|
||||
}
|
||||
return ð.SignedBeaconBlockBellatrix{
|
||||
@@ -95,7 +95,7 @@ func (b *BeaconBlock) Proto() (proto.Message, error) {
|
||||
var ok bool
|
||||
body, ok = bodyMessage.(*eth.BeaconBlockBody)
|
||||
if !ok {
|
||||
return nil, errors.Wrap(err, incorrectBodyVersion)
|
||||
return nil, errIncorrectBodyVersion
|
||||
}
|
||||
}
|
||||
return ð.BeaconBlock{
|
||||
@@ -111,7 +111,7 @@ func (b *BeaconBlock) Proto() (proto.Message, error) {
|
||||
var ok bool
|
||||
body, ok = bodyMessage.(*eth.BeaconBlockBodyAltair)
|
||||
if !ok {
|
||||
return nil, errors.Wrap(err, incorrectBodyVersion)
|
||||
return nil, errIncorrectBodyVersion
|
||||
}
|
||||
}
|
||||
return ð.BeaconBlockAltair{
|
||||
@@ -128,7 +128,7 @@ func (b *BeaconBlock) Proto() (proto.Message, error) {
|
||||
var ok bool
|
||||
body, ok = bodyMessage.(*eth.BlindedBeaconBlockBodyBellatrix)
|
||||
if !ok {
|
||||
return nil, errors.Wrap(err, incorrectBodyVersion)
|
||||
return nil, errIncorrectBodyVersion
|
||||
}
|
||||
}
|
||||
return ð.BlindedBeaconBlockBellatrix{
|
||||
@@ -144,7 +144,7 @@ func (b *BeaconBlock) Proto() (proto.Message, error) {
|
||||
var ok bool
|
||||
body, ok = bodyMessage.(*eth.BeaconBlockBodyBellatrix)
|
||||
if !ok {
|
||||
return nil, errors.Wrap(err, incorrectBodyVersion)
|
||||
return nil, errIncorrectBodyVersion
|
||||
}
|
||||
}
|
||||
return ð.BeaconBlockBellatrix{
|
||||
|
||||
36
hack/generate-release.sh
Executable file
36
hack/generate-release.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#! /usr/bin/env bash
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
BUILDKITE_TAG=$(git rev-parse --verify HEAD)
|
||||
OUTDIR=/tmp/$BUILDKITE_TAG
|
||||
mkdir -p $OUTDIR
|
||||
|
||||
declare -a configs=("--config=linux_amd64 --config=llvm" "--config=release --config=linux_amd64 --define=blst_modern=true" "--config=release --config=linux_amd64 --define=blst_modern=true" "--config=osx_amd64_docker" "--config=osx_arm64_docker" "--config=windows_amd64_docker")
|
||||
declare -a targetsuffix=("$BUILDKITE_TAG-linux-amd64" "$BUILDKITE_TAG-modern-linux-amd64" "$BUILDKITE_TAG-linux-arm64" "$BUILDKITE_TAG-darwin-amd64" "$BUILDKITE_TAG-darwin-arm64" "$BUILDKITE_TAG-windows-amd64.exe")
|
||||
|
||||
bazel query 'kind(rule, //cmd/...:*)' --output label_kind --logging=0 2>/dev/null | grep go_binary | tr -s ' ' | cut -d' ' -f3 | while read target ; do
|
||||
for i in "${!configs[@]}"
|
||||
do
|
||||
bname=$(echo $target | cut -d':' -f2)
|
||||
cfg="${configs[$i]}"
|
||||
suff="${targetsuffix[$i]}"
|
||||
echo "bazel build --config=release $cfg $target"
|
||||
output=$(bazel cquery $target --output starlark --starlark:file=tools/cquery/format-out/output.cquery 2>/dev/null)
|
||||
fname=$OUTDIR/$bname-$suff
|
||||
echo "cp $output $fname"
|
||||
pushd $OUTDIR > /dev/null
|
||||
echo "sha256sum $fname > $fname.sha256"
|
||||
echo "gpg -o $fname.sig --sign --detach-sig $fname"
|
||||
popd > /dev/null
|
||||
echo "$SCRIPT_DIR/../hack/upload-github-release-asset.sh github_api_token=$TOKEN owner=prysmaticlabs repo=prysm tag=$BUILDKITE_TAG filename=$fname"
|
||||
done
|
||||
done
|
||||
echo "gsutil -m cp -a public-read /tmp/validator-$BUILDKITE_TAG-* gs://prysmaticlabs.com/releases/"
|
||||
echo "gsutil -m cp -a public-read /tmp/beacon-chain-$BUILDKITE_TAG-* gs://prysmaticlabs.com/releases/"
|
||||
echo "gsutil -m cp -a public-read /tmp/client-stats-$BUILDKITE_TAG-* gs://prysmaticlabs.com/releases/"
|
||||
echo $BUILDKITE_TAG > /tmp/latest
|
||||
echo "gsutil -h "Cache-Control:no-cache,max-age=0" -h "Content-Type:text/html;charset=UTF-8" cp -a public-read /tmp/latest gs://prysmaticlabs.com/releases/latest"
|
||||
echo "gsutil -m acl ch -u AllUsers:R gs://prysmaticlabs.com/releases/*"
|
||||
echo "./hack/tag-versioned-docker-images.sh "
|
||||
echo "./hack/tag-versioned-docker-images.sh -s"
|
||||
|
||||
@@ -37,12 +37,17 @@ func NewBuilder(t testing.TB, initialState state.BeaconState, initialBlock inter
|
||||
// Tick resets the genesis time to now()-tick and adjusts the slot to the appropriate value.
|
||||
func (bb *Builder) Tick(t testing.TB, tick int64) {
|
||||
bb.service.SetGenesisTime(time.Unix(time.Now().Unix()-tick, 0))
|
||||
bb.service.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix() - tick))
|
||||
if tick > bb.lastTick {
|
||||
slot := uint64(tick) / params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, bb.service.ForkChoicer().NewSlot(context.TODO(), types.Slot(slot)))
|
||||
bb.lastTick = tick
|
||||
lastSlot := uint64(bb.lastTick) / params.BeaconConfig().SecondsPerSlot
|
||||
currentSlot := uint64(tick) / params.BeaconConfig().SecondsPerSlot
|
||||
for lastSlot < currentSlot {
|
||||
lastSlot++
|
||||
bb.service.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix() - int64(params.BeaconConfig().SecondsPerSlot*lastSlot)))
|
||||
require.NoError(t, bb.service.ForkChoicer().NewSlot(context.TODO(), types.Slot(lastSlot)))
|
||||
}
|
||||
if tick > int64(params.BeaconConfig().SecondsPerSlot*lastSlot) {
|
||||
bb.service.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix() - tick))
|
||||
}
|
||||
bb.lastTick = tick
|
||||
}
|
||||
|
||||
// block returns the block root.
|
||||
|
||||
5
tools/cquery/format-out/output.cquery
Normal file
5
tools/cquery/format-out/output.cquery
Normal file
@@ -0,0 +1,5 @@
|
||||
# via https://stackoverflow.com/questions/52729104/how-do-i-get-output-files-for-a-given-bazel-target
|
||||
def format(target):
|
||||
outputs = target.files.to_list()
|
||||
return outputs[0].path if len(outputs) > 0 else "(missing)"
|
||||
|
||||
@@ -52,8 +52,6 @@ go_library(
|
||||
"@com_github_manifoldco_promptui//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_tyler_smith_go_bip39//:go_default_library",
|
||||
"@com_github_tyler_smith_go_bip39//wordlists:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@com_github_wealdtech_go_eth2_wallet_encryptor_keystorev4//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
|
||||
@@ -25,7 +25,7 @@ func NewCLIManager(opts ...Option) (*AccountsCLIManager, error) {
|
||||
}
|
||||
|
||||
// AccountsCLIManager defines a struct capable of performing various validator
|
||||
// wallet account operations via the command line.
|
||||
// wallet & account operations via the command line.
|
||||
type AccountsCLIManager struct {
|
||||
wallet *wallet.Wallet
|
||||
keymanager keymanager.IKeymanager
|
||||
@@ -48,6 +48,11 @@ type AccountsCLIManager struct {
|
||||
filteredPubKeys []bls.PublicKey
|
||||
rawPubKeys [][]byte
|
||||
formattedPubKeys []string
|
||||
walletDir string
|
||||
walletPassword string
|
||||
mnemonic string
|
||||
numAccounts int
|
||||
mnemonic25thWord string
|
||||
}
|
||||
|
||||
func (acm *AccountsCLIManager) prepareBeaconClients(ctx context.Context) (*ethpb.BeaconNodeValidatorClient, *ethpb.NodeClient, error) {
|
||||
|
||||
@@ -178,3 +178,43 @@ func WithFormattedPubKeys(formattedPubKeys []string) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithWalletDir specifies the password for backups.
|
||||
func WithWalletDir(walletDir string) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
acc.walletDir = walletDir
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithWalletPassword specifies the password for backups.
|
||||
func WithWalletPassword(walletPassword string) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
acc.walletPassword = walletPassword
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMnemonic specifies the password for backups.
|
||||
func WithMnemonic(mnemonic string) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
acc.mnemonic = mnemonic
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMnemonic25thWord specifies the password for backups.
|
||||
func WithMnemonic25thWord(mnemonic25thWord string) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
acc.mnemonic25thWord = mnemonic25thWord
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMnemonic25thWord specifies the password for backups.
|
||||
func WithNumAccounts(numAccounts int) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
acc.numAccounts = numAccounts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/manifoldco/promptui"
|
||||
@@ -22,6 +23,14 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// #nosec G101 -- Not sensitive data
|
||||
newMnemonicPassphraseYesNoText = "(Advanced) Do you want to setup a '25th word' passphrase for your mnemonic? [y/n]"
|
||||
// #nosec G101 -- Not sensitive data
|
||||
newMnemonicPassphrasePromptText = "(Advanced) Setup a passphrase '25th word' for your mnemonic " +
|
||||
"(WARNING: You cannot recover your keys from your mnemonic if you forget this passphrase!)"
|
||||
)
|
||||
|
||||
// CreateWalletConfig defines the parameters needed to call the create wallet functions.
|
||||
type CreateWalletConfig struct {
|
||||
SkipMnemonicConfirm bool
|
||||
@@ -278,3 +287,27 @@ func inputKeymanagerKind(cliCtx *cli.Context) (keymanager.Kind, error) {
|
||||
}
|
||||
return keymanager.Kind(selection), nil
|
||||
}
|
||||
|
||||
// TODO(mikeneuder): Remove duplicate function when migration wallet create
|
||||
// to cmd/validator/wallet.
|
||||
func inputNumAccounts(cliCtx *cli.Context) (int64, error) {
|
||||
if cliCtx.IsSet(flags.NumAccountsFlag.Name) {
|
||||
numAccounts := cliCtx.Int64(flags.NumAccountsFlag.Name)
|
||||
if numAccounts <= 0 {
|
||||
return 0, errors.New("must recover at least 1 account")
|
||||
}
|
||||
return numAccounts, nil
|
||||
}
|
||||
numAccounts, err := prompt.ValidatePrompt(os.Stdin, "Enter how many accounts you would like to generate from the mnemonic", prompt.ValidateNumber)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
numAccountsInt, err := strconv.Atoi(numAccounts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if numAccountsInt <= 0 {
|
||||
return 0, errors.New("must recover at least 1 account")
|
||||
}
|
||||
return int64(numAccountsInt), nil
|
||||
}
|
||||
|
||||
@@ -2,35 +2,16 @@ package accounts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/prompt"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/accounts/userprompt"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/accounts/wallet"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/keymanager"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/keymanager/derived"
|
||||
"github.com/tyler-smith/go-bip39"
|
||||
"github.com/tyler-smith/go-bip39/wordlists"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
phraseWordCount = 24
|
||||
// #nosec G101 -- Not sensitive data
|
||||
newMnemonicPassphraseYesNoText = "(Advanced) Do you want to setup a '25th word' passphrase for your mnemonic? [y/n]"
|
||||
// #nosec G101 -- Not sensitive data
|
||||
newMnemonicPassphrasePromptText = "(Advanced) Setup a passphrase '25th word' for your mnemonic " +
|
||||
"(WARNING: You cannot recover your keys from your mnemonic if you forget this passphrase!)"
|
||||
// #nosec G101 -- Not sensitive data
|
||||
mnemonicPassphraseYesNoText = "(Advanced) Do you have an optional '25th word' passphrase for your mnemonic? [y/n]"
|
||||
// #nosec G101 -- Not sensitive data
|
||||
mnemonicPassphrasePromptText = "(Advanced) Enter the '25th word' passphrase for your mnemonic"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -38,89 +19,10 @@ var (
|
||||
ErrEmptyMnemonic = errors.New("phrase cannot be empty")
|
||||
)
|
||||
|
||||
// RecoverWalletConfig to run the recover wallet function.
|
||||
type RecoverWalletConfig struct {
|
||||
WalletDir string
|
||||
WalletPassword string
|
||||
Mnemonic string
|
||||
NumAccounts int
|
||||
Mnemonic25thWord string
|
||||
}
|
||||
|
||||
// RecoverWalletCli uses a menmonic seed phrase to recover a wallet into the path provided. This
|
||||
// uses the CLI to extract necessary values to run the function.
|
||||
func RecoverWalletCli(cliCtx *cli.Context) error {
|
||||
mnemonic, err := inputMnemonic(cliCtx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get mnemonic phrase")
|
||||
}
|
||||
config := &RecoverWalletConfig{
|
||||
Mnemonic: mnemonic,
|
||||
}
|
||||
skipMnemonic25thWord := cliCtx.IsSet(flags.SkipMnemonic25thWordCheckFlag.Name)
|
||||
has25thWordFile := cliCtx.IsSet(flags.Mnemonic25thWordFileFlag.Name)
|
||||
if !skipMnemonic25thWord && !has25thWordFile {
|
||||
resp, err := prompt.ValidatePrompt(
|
||||
os.Stdin, mnemonicPassphraseYesNoText, prompt.ValidateYesOrNo,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate choice")
|
||||
}
|
||||
if strings.EqualFold(resp, "y") {
|
||||
mnemonicPassphrase, err := prompt.InputPassword(
|
||||
cliCtx,
|
||||
flags.Mnemonic25thWordFileFlag,
|
||||
mnemonicPassphrasePromptText,
|
||||
"Confirm mnemonic passphrase",
|
||||
false, /* Should confirm password */
|
||||
func(input string) error {
|
||||
if strings.TrimSpace(input) == "" {
|
||||
return errors.New("input cannot be empty")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Mnemonic25thWord = mnemonicPassphrase
|
||||
}
|
||||
}
|
||||
walletDir, err := userprompt.InputDirectory(cliCtx, userprompt.WalletDirPromptText, flags.WalletDirFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
walletPassword, err := prompt.InputPassword(
|
||||
cliCtx,
|
||||
flags.WalletPasswordFileFlag,
|
||||
wallet.NewWalletPasswordPromptText,
|
||||
wallet.ConfirmPasswordPromptText,
|
||||
true, /* Should confirm password */
|
||||
prompt.ValidatePasswordInput,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numAccounts, err := inputNumAccounts(cliCtx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get number of accounts to recover")
|
||||
}
|
||||
config.WalletDir = walletDir
|
||||
config.WalletPassword = walletPassword
|
||||
config.NumAccounts = int(numAccounts)
|
||||
if _, err = RecoverWallet(cliCtx.Context, config); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof(
|
||||
"Successfully recovered HD wallet with accounts and saved configuration to disk",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecoverWallet uses a menmonic seed phrase to recover a wallet into the path provided.
|
||||
func RecoverWallet(ctx context.Context, cfg *RecoverWalletConfig) (*wallet.Wallet, error) {
|
||||
// WalletRecover uses a menmonic seed phrase to recover a wallet into the path provided.
|
||||
func (acm *AccountsCLIManager) WalletRecover(ctx context.Context) (*wallet.Wallet, error) {
|
||||
// Ensure that the wallet directory does not contain a wallet already
|
||||
dirExists, err := wallet.Exists(cfg.WalletDir)
|
||||
dirExists, err := wallet.Exists(acm.walletDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -129,9 +31,9 @@ func RecoverWallet(ctx context.Context, cfg *RecoverWalletConfig) (*wallet.Walle
|
||||
" alternative location for the new wallet or remove the current wallet")
|
||||
}
|
||||
w := wallet.New(&wallet.Config{
|
||||
WalletDir: cfg.WalletDir,
|
||||
WalletDir: acm.walletDir,
|
||||
KeymanagerKind: keymanager.Derived,
|
||||
WalletPassword: cfg.WalletPassword,
|
||||
WalletPassword: acm.walletPassword,
|
||||
})
|
||||
if err := w.SaveWallet(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save wallet to disk")
|
||||
@@ -143,91 +45,16 @@ func RecoverWallet(ctx context.Context, cfg *RecoverWalletConfig) (*wallet.Walle
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not make keymanager for given phrase")
|
||||
}
|
||||
if err := km.RecoverAccountsFromMnemonic(ctx, cfg.Mnemonic, cfg.Mnemonic25thWord, cfg.NumAccounts); err != nil {
|
||||
if err := km.RecoverAccountsFromMnemonic(ctx, acm.mnemonic, acm.mnemonic25thWord, acm.numAccounts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("wallet-path", w.AccountsDir()).Infof(
|
||||
"Successfully recovered HD wallet with %d accounts. Please use `accounts list` to view details for your accounts",
|
||||
cfg.NumAccounts,
|
||||
acm.numAccounts,
|
||||
)
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func inputMnemonic(cliCtx *cli.Context) (mnemonicPhrase string, err error) {
|
||||
if cliCtx.IsSet(flags.MnemonicFileFlag.Name) {
|
||||
mnemonicFilePath := cliCtx.String(flags.MnemonicFileFlag.Name)
|
||||
data, err := os.ReadFile(mnemonicFilePath) // #nosec G304 -- ReadFile is safe
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
enteredMnemonic := string(data)
|
||||
if err := ValidateMnemonic(enteredMnemonic); err != nil {
|
||||
return "", errors.Wrap(err, "mnemonic phrase did not pass validation")
|
||||
}
|
||||
return enteredMnemonic, nil
|
||||
}
|
||||
allowedLanguages := map[string][]string{
|
||||
"chinese_simplified": wordlists.ChineseSimplified,
|
||||
"chinese_traditional": wordlists.ChineseTraditional,
|
||||
"czech": wordlists.Czech,
|
||||
"english": wordlists.English,
|
||||
"french": wordlists.French,
|
||||
"japanese": wordlists.Japanese,
|
||||
"korean": wordlists.Korean,
|
||||
"italian": wordlists.Italian,
|
||||
"spanish": wordlists.Spanish,
|
||||
}
|
||||
languages := make([]string, 0)
|
||||
for k := range allowedLanguages {
|
||||
languages = append(languages, k)
|
||||
}
|
||||
sort.Strings(languages)
|
||||
selectedLanguage, err := prompt.ValidatePrompt(
|
||||
os.Stdin,
|
||||
fmt.Sprintf("Enter the language of your seed phrase: %s", strings.Join(languages, ", ")),
|
||||
func(input string) error {
|
||||
if _, ok := allowedLanguages[input]; !ok {
|
||||
return errors.New("input not in the list of allowed languages")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get mnemonic language: %w", err)
|
||||
}
|
||||
bip39.SetWordList(allowedLanguages[selectedLanguage])
|
||||
mnemonicPhrase, err = prompt.ValidatePrompt(
|
||||
os.Stdin,
|
||||
"Enter the seed phrase for the wallet you would like to recover",
|
||||
ValidateMnemonic)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get mnemonic phrase: %w", err)
|
||||
}
|
||||
return mnemonicPhrase, nil
|
||||
}
|
||||
|
||||
func inputNumAccounts(cliCtx *cli.Context) (int64, error) {
|
||||
if cliCtx.IsSet(flags.NumAccountsFlag.Name) {
|
||||
numAccounts := cliCtx.Int64(flags.NumAccountsFlag.Name)
|
||||
if numAccounts <= 0 {
|
||||
return 0, errors.New("must recover at least 1 account")
|
||||
}
|
||||
return numAccounts, nil
|
||||
}
|
||||
numAccounts, err := prompt.ValidatePrompt(os.Stdin, "Enter how many accounts you would like to generate from the mnemonic", prompt.ValidateNumber)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
numAccountsInt, err := strconv.Atoi(numAccounts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if numAccountsInt <= 0 {
|
||||
return 0, errors.New("must recover at least 1 account")
|
||||
}
|
||||
return int64(numAccountsInt), nil
|
||||
}
|
||||
|
||||
// ValidateMnemonic ensures that it is not empty and that the count of the words are
|
||||
// as specified(currently 24).
|
||||
func ValidateMnemonic(mnemonic string) error {
|
||||
|
||||
@@ -1,112 +1,11 @@
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/accounts/iface"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/accounts/wallet"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/keymanager"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/keymanager/derived"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type recoverCfgStruct struct {
|
||||
walletDir string
|
||||
passwordFilePath string
|
||||
mnemonicFilePath string
|
||||
numAccounts int64
|
||||
}
|
||||
|
||||
func setupRecoverCfg(t *testing.T) *recoverCfgStruct {
|
||||
testDir := t.TempDir()
|
||||
walletDir := filepath.Join(testDir, walletDirName)
|
||||
passwordFilePath := filepath.Join(testDir, passwordFileName)
|
||||
require.NoError(t, os.WriteFile(passwordFilePath, []byte(password), os.ModePerm))
|
||||
mnemonicFilePath := filepath.Join(testDir, mnemonicFileName)
|
||||
require.NoError(t, os.WriteFile(mnemonicFilePath, []byte(mnemonic), os.ModePerm))
|
||||
|
||||
return &recoverCfgStruct{
|
||||
walletDir: walletDir,
|
||||
passwordFilePath: passwordFilePath,
|
||||
mnemonicFilePath: mnemonicFilePath,
|
||||
}
|
||||
}
|
||||
|
||||
func createRecoverCliCtx(t *testing.T, cfg *recoverCfgStruct) *cli.Context {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(flags.WalletDirFlag.Name, cfg.walletDir, "")
|
||||
set.String(flags.WalletPasswordFileFlag.Name, cfg.passwordFilePath, "")
|
||||
set.String(flags.KeymanagerKindFlag.Name, keymanager.Derived.String(), "")
|
||||
set.String(flags.MnemonicFileFlag.Name, cfg.mnemonicFilePath, "")
|
||||
set.Bool(flags.SkipMnemonic25thWordCheckFlag.Name, true, "")
|
||||
set.Int64(flags.NumAccountsFlag.Name, cfg.numAccounts, "")
|
||||
assert.NoError(t, set.Set(flags.SkipMnemonic25thWordCheckFlag.Name, "true"))
|
||||
assert.NoError(t, set.Set(flags.WalletDirFlag.Name, cfg.walletDir))
|
||||
assert.NoError(t, set.Set(flags.WalletPasswordFileFlag.Name, cfg.passwordFilePath))
|
||||
assert.NoError(t, set.Set(flags.KeymanagerKindFlag.Name, keymanager.Derived.String()))
|
||||
assert.NoError(t, set.Set(flags.MnemonicFileFlag.Name, cfg.mnemonicFilePath))
|
||||
assert.NoError(t, set.Set(flags.NumAccountsFlag.Name, strconv.Itoa(int(cfg.numAccounts))))
|
||||
return cli.NewContext(&app, set, nil)
|
||||
}
|
||||
|
||||
func TestRecoverDerivedWallet(t *testing.T) {
|
||||
cfg := setupRecoverCfg(t)
|
||||
cfg.numAccounts = 4
|
||||
cliCtx := createRecoverCliCtx(t, cfg)
|
||||
require.NoError(t, RecoverWalletCli(cliCtx))
|
||||
|
||||
ctx := context.Background()
|
||||
w, err := wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: cfg.walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
derivedKM, ok := km.(*derived.Keymanager)
|
||||
if !ok {
|
||||
t.Fatal("not a derived keymanager")
|
||||
}
|
||||
names, err := derivedKM.ValidatingAccountNames(ctx)
|
||||
assert.NoError(t, err)
|
||||
require.Equal(t, len(names), int(cfg.numAccounts))
|
||||
}
|
||||
|
||||
// TestRecoverDerivedWallet_OneAccount is a test for regression in cases where the number of accounts recovered is 1
|
||||
func TestRecoverDerivedWallet_OneAccount(t *testing.T) {
|
||||
cfg := setupRecoverCfg(t)
|
||||
cfg.numAccounts = 1
|
||||
cliCtx := createRecoverCliCtx(t, cfg)
|
||||
require.NoError(t, RecoverWalletCli(cliCtx))
|
||||
|
||||
_, err := wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: cfg.walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRecoverDerivedWallet_AlreadyExists(t *testing.T) {
|
||||
cfg := setupRecoverCfg(t)
|
||||
cfg.numAccounts = 4
|
||||
cliCtx := createRecoverCliCtx(t, cfg)
|
||||
require.NoError(t, RecoverWalletCli(cliCtx))
|
||||
|
||||
// Trying to recover an HD wallet into a directory that already exists should give an error
|
||||
require.ErrorContains(t, "a wallet already exists at this location", RecoverWalletCli(cliCtx))
|
||||
}
|
||||
|
||||
func TestValidateMnemonic(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -937,17 +937,21 @@ func (v *validator) logDuties(slot types.Slot, duties []*ethpb.DutiesResponse_Du
|
||||
}
|
||||
}
|
||||
for i := types.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
startTime := slots.StartTime(v.genesisTime, slotOffset+i)
|
||||
durationTillDuty := time.Until(startTime)
|
||||
|
||||
if len(attesterKeys[i]) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slotOffset + i,
|
||||
"slotInEpoch": (slotOffset + i) % params.BeaconConfig().SlotsPerEpoch,
|
||||
"timeTillDuty": durationTillDuty.Round(time.Second),
|
||||
"attesterDutiesAtSlot": len(attesterKeys[i]),
|
||||
"totalAttestersInEpoch": totalAttestingKeys,
|
||||
"pubKeys": attesterKeys[i],
|
||||
}).Info("Attestation schedule")
|
||||
}
|
||||
if proposerKeys[i] != "" {
|
||||
log.WithField("slot", slotOffset+i).WithField("pubKey", proposerKeys[i]).Info("Proposal schedule")
|
||||
log.WithField("slot", slotOffset+i).WithField("timeTillDuty", durationTillDuty.Round(time.Second)).WithField("pubKey", proposerKeys[i]).Info("Proposal schedule")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1066,7 +1070,7 @@ func (v *validator) buildSignedRegReqs(ctx context.Context, pubkeys [][fieldpara
|
||||
feeRecipient = v.ProposerSettings.DefaultConfig.FeeRecipient // Use cli config for fee recipient.
|
||||
config := v.ProposerSettings.DefaultConfig.BuilderConfig
|
||||
if config != nil && config.Enabled {
|
||||
gasLimit = config.GasLimit // Use cli config for gas limit.
|
||||
gasLimit = uint64(config.GasLimit) // Use cli config for gas limit.
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
@@ -1077,7 +1081,7 @@ func (v *validator) buildSignedRegReqs(ctx context.Context, pubkeys [][fieldpara
|
||||
builderConfig := config.BuilderConfig
|
||||
if builderConfig != nil {
|
||||
if builderConfig.Enabled {
|
||||
gasLimit = builderConfig.GasLimit // Use file config for gas limit.
|
||||
gasLimit = uint64(builderConfig.GasLimit) // Use file config for gas limit.
|
||||
enabled = true
|
||||
} else {
|
||||
enabled = false // Custom config can disable validator from register.
|
||||
|
||||
@@ -1514,7 +1514,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress("0x055Fb65722E7b2455043BFEBf6177F1D2e9738D9"),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: uint64(40000000),
|
||||
GasLimit: 40000000,
|
||||
},
|
||||
}
|
||||
v.ProposerSettings = &validatorserviceconfig.ProposerSettings{
|
||||
@@ -1523,7 +1523,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress(defaultFeeHex),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: uint64(35000000),
|
||||
GasLimit: 35000000,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1541,11 +1541,11 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
|
||||
{
|
||||
FeeRecipient: common.HexToAddress("0x055Fb65722E7b2455043BFEBf6177F1D2e9738D9").Bytes(),
|
||||
GasLimit: uint64(40000000),
|
||||
GasLimit: 40000000,
|
||||
},
|
||||
{
|
||||
FeeRecipient: byteValueAddress,
|
||||
GasLimit: uint64(35000000),
|
||||
GasLimit: 35000000,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1594,7 +1594,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress("0x055Fb65722E7b2455043BFEBf6177F1D2e9738D9"),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: uint64(40000000),
|
||||
GasLimit: 40000000,
|
||||
},
|
||||
}
|
||||
v.ProposerSettings = &validatorserviceconfig.ProposerSettings{
|
||||
@@ -1603,7 +1603,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress(defaultFeeHex),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: false,
|
||||
GasLimit: uint64(35000000),
|
||||
GasLimit: 35000000,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1715,7 +1715,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress(defaultFeeHex),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: params.BeaconConfig().DefaultBuilderGasLimit,
|
||||
GasLimit: validatorserviceconfig.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1771,7 +1771,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress(defaultFeeHex),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: uint64(40000000),
|
||||
GasLimit: 40000000,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1963,7 +1963,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.Address{},
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: uint64(40000000),
|
||||
GasLimit: 40000000,
|
||||
},
|
||||
}
|
||||
v.ProposerSettings = &validatorserviceconfig.ProposerSettings{
|
||||
@@ -1972,7 +1972,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress(defaultFeeHex),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: uint64(40000000),
|
||||
GasLimit: 40000000,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -498,9 +499,17 @@ func proposerSettings(cliCtx *cli.Context) (*validatorServiceConfig.ProposerSett
|
||||
suggestedFee := cliCtx.String(flags.SuggestedFeeRecipientFlag.Name)
|
||||
var vr *validatorServiceConfig.BuilderConfig
|
||||
if cliCtx.Bool(flags.EnableBuilderFlag.Name) {
|
||||
sgl := cliCtx.String(flags.BuilderGasLimitFlag.Name)
|
||||
vr = &validatorServiceConfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: reviewGasLimit(uint64(cliCtx.Int(flags.BuilderGasLimitFlag.Name))),
|
||||
GasLimit: validatorServiceConfig.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
|
||||
}
|
||||
if sgl != "" {
|
||||
gl, err := strconv.ParseUint(sgl, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.New("Gas Limit is not a uint64")
|
||||
}
|
||||
vr.GasLimit = reviewGasLimit(validatorServiceConfig.Uint64(gl))
|
||||
}
|
||||
}
|
||||
fileConfig = &validatorServiceConfig.ProposerSettingsPayload{
|
||||
@@ -595,10 +604,10 @@ func warnNonChecksummedAddress(feeRecipient string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func reviewGasLimit(gasLimit uint64) uint64 {
|
||||
func reviewGasLimit(gasLimit validatorServiceConfig.Uint64) validatorServiceConfig.Uint64 {
|
||||
// sets gas limit to default if not defined or set to 0
|
||||
if gasLimit == 0 {
|
||||
return params.BeaconConfig().DefaultBuilderGasLimit
|
||||
return validatorServiceConfig.Uint64(params.BeaconConfig().DefaultBuilderGasLimit)
|
||||
}
|
||||
//TODO(10810): add in warning for ranges
|
||||
return gasLimit
|
||||
|
||||
@@ -211,6 +211,7 @@ func TestProposerSettings(t *testing.T) {
|
||||
dir string
|
||||
url string
|
||||
defaultfee string
|
||||
defaultgas string
|
||||
}
|
||||
|
||||
type args struct {
|
||||
@@ -271,14 +272,14 @@ func TestProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: params.BeaconConfig().DefaultBuilderGasLimit,
|
||||
GasLimit: validatorserviceconfig.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
|
||||
},
|
||||
},
|
||||
bytesutil.ToBytes48(key2): {
|
||||
FeeRecipient: common.HexToAddress("0x60155530FCE8a85ec7055A5F8b2bE214B3DaeFd4"),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: params.BeaconConfig().DefaultBuilderGasLimit,
|
||||
GasLimit: validatorserviceconfig.Uint64(35000000),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -286,7 +287,7 @@ func TestProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: params.BeaconConfig().DefaultBuilderGasLimit,
|
||||
GasLimit: validatorserviceconfig.Uint64(40000000),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -336,7 +337,7 @@ func TestProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: uint64(40000000),
|
||||
GasLimit: 40000000,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -344,7 +345,7 @@ func TestProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: false,
|
||||
GasLimit: params.BeaconConfig().DefaultBuilderGasLimit,
|
||||
GasLimit: validatorserviceconfig.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -386,7 +387,32 @@ func TestProposerSettings(t *testing.T) {
|
||||
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: params.BeaconConfig().DefaultBuilderGasLimit,
|
||||
GasLimit: validatorserviceconfig.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
wantErr: "",
|
||||
validatorRegistrationEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "Happy Path Suggested Fee , validator registration enabled and default gas",
|
||||
args: args{
|
||||
proposerSettingsFlagValues: &proposerSettingsFlag{
|
||||
dir: "",
|
||||
url: "",
|
||||
defaultfee: "0x6e35733c5af9B61374A128e6F85f553aF09ff89A",
|
||||
defaultgas: "50000000",
|
||||
},
|
||||
},
|
||||
want: func() *validatorserviceconfig.ProposerSettings {
|
||||
return &validatorserviceconfig.ProposerSettings{
|
||||
ProposeConfig: nil,
|
||||
DefaultConfig: &validatorserviceconfig.ProposerOption{
|
||||
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: 50000000,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -487,6 +513,20 @@ func TestProposerSettings(t *testing.T) {
|
||||
},
|
||||
wantErr: "cannot specify both",
|
||||
},
|
||||
{
|
||||
name: "Bad Gas value in JSON",
|
||||
args: args{
|
||||
proposerSettingsFlagValues: &proposerSettingsFlag{
|
||||
dir: "./testdata/bad-gas-value-proposer-settings.json",
|
||||
url: "",
|
||||
defaultfee: "",
|
||||
},
|
||||
},
|
||||
want: func() *validatorserviceconfig.ProposerSettings {
|
||||
return nil
|
||||
},
|
||||
wantErr: "failed to unmarshal yaml file",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -514,6 +554,10 @@ func TestProposerSettings(t *testing.T) {
|
||||
set.String(flags.SuggestedFeeRecipientFlag.Name, tt.args.proposerSettingsFlagValues.defaultfee, "")
|
||||
require.NoError(t, set.Set(flags.SuggestedFeeRecipientFlag.Name, tt.args.proposerSettingsFlagValues.defaultfee))
|
||||
}
|
||||
if tt.args.proposerSettingsFlagValues.defaultgas != "" {
|
||||
set.String(flags.BuilderGasLimitFlag.Name, tt.args.proposerSettingsFlagValues.defaultgas, "")
|
||||
require.NoError(t, set.Set(flags.BuilderGasLimitFlag.Name, tt.args.proposerSettingsFlagValues.defaultgas))
|
||||
}
|
||||
if tt.validatorRegistrationEnabled {
|
||||
set.Bool(flags.EnableBuilderFlag.Name, true, "")
|
||||
}
|
||||
|
||||
19
validator/node/testdata/bad-gas-value-proposer-settings.json
vendored
Normal file
19
validator/node/testdata/bad-gas-value-proposer-settings.json
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"proposer_config": {
|
||||
"0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a": {
|
||||
"fee_recipient": "0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3",
|
||||
"builder": {
|
||||
"enabled": true,
|
||||
"gas_limit": "asdfsffsdf"
|
||||
}
|
||||
}
|
||||
},
|
||||
"default_config": {
|
||||
"fee_recipient": "0x6e35733c5af9B61374A128e6F85f553aF09ff89A",
|
||||
"builder": {
|
||||
"enabled": true,
|
||||
"gas_limit": 40000000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,14 +4,14 @@
|
||||
"fee_recipient": "0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3",
|
||||
"builder": {
|
||||
"enabled": true,
|
||||
"gas_limit": 30000000
|
||||
"gas_limit": "30000000"
|
||||
}
|
||||
},
|
||||
"0xb057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7b": {
|
||||
"fee_recipient": "0x60155530FCE8a85ec7055A5F8b2bE214B3DaeFd4",
|
||||
"builder": {
|
||||
"enabled": true,
|
||||
"gas_limit": 30000000
|
||||
"gas_limit": 35000000
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -19,7 +19,7 @@
|
||||
"fee_recipient": "0x6e35733c5af9B61374A128e6F85f553aF09ff89A",
|
||||
"builder": {
|
||||
"enabled": true,
|
||||
"gas_limit": 30000000
|
||||
"gas_limit": 40000000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,4 +9,4 @@ default_config:
|
||||
fee_recipient: '0x6e35733c5af9B61374A128e6F85f553aF09ff89A'
|
||||
builder:
|
||||
enabled: false
|
||||
gas_limit: 30000000
|
||||
gas_limit: '30000000'
|
||||
@@ -406,11 +406,11 @@ func (s *Server) GetGasLimit(_ context.Context, req *ethpbservice.PubkeyRequest)
|
||||
proposerOption, found := s.validatorService.ProposerSettings.ProposeConfig[bytesutil.ToBytes48(validatorKey)]
|
||||
if found {
|
||||
if proposerOption.BuilderConfig != nil {
|
||||
resp.Data.GasLimit = proposerOption.BuilderConfig.GasLimit
|
||||
resp.Data.GasLimit = uint64(proposerOption.BuilderConfig.GasLimit)
|
||||
return resp, nil
|
||||
}
|
||||
} else if s.validatorService.ProposerSettings.DefaultConfig != nil && s.validatorService.ProposerSettings.DefaultConfig.BuilderConfig != nil {
|
||||
resp.Data.GasLimit = s.validatorService.ProposerSettings.DefaultConfig.BuilderConfig.GasLimit
|
||||
resp.Data.GasLimit = uint64(s.validatorService.ProposerSettings.DefaultConfig.BuilderConfig.GasLimit)
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
@@ -437,10 +437,10 @@ func (s *Server) SetGasLimit(ctx context.Context, req *ethpbservice.SetGasLimitR
|
||||
// "DefaultConfig.BuilderConfig".
|
||||
bo := *s.validatorService.ProposerSettings.DefaultConfig.BuilderConfig
|
||||
pBuilderConfig = &bo
|
||||
pBuilderConfig.GasLimit = req.GasLimit
|
||||
pBuilderConfig.GasLimit = validatorServiceConfig.Uint64(req.GasLimit)
|
||||
} else {
|
||||
// No default BuildConfig to copy from, just create one and set "GasLimit", but keep "Enabled" to "false".
|
||||
pBuilderConfig = &validatorServiceConfig.BuilderConfig{Enabled: false, GasLimit: req.GasLimit}
|
||||
pBuilderConfig = &validatorServiceConfig.BuilderConfig{Enabled: false, GasLimit: validatorServiceConfig.Uint64(req.GasLimit)}
|
||||
}
|
||||
|
||||
pOption := validatorServiceConfig.DefaultProposerOption()
|
||||
@@ -463,7 +463,7 @@ func (s *Server) SetGasLimit(ctx context.Context, req *ethpbservice.SetGasLimitR
|
||||
if proposerOption.BuilderConfig == nil {
|
||||
proposerOption.BuilderConfig = pBuilderConfig
|
||||
} else {
|
||||
proposerOption.BuilderConfig.GasLimit = req.GasLimit
|
||||
proposerOption.BuilderConfig.GasLimit = validatorServiceConfig.Uint64(req.GasLimit)
|
||||
}
|
||||
} else {
|
||||
s.validatorService.ProposerSettings.ProposeConfig[bytesutil.ToBytes48(validatorKey)] = &pOption
|
||||
|
||||
@@ -1007,7 +1007,7 @@ func TestServer_SetGasLimit(t *testing.T) {
|
||||
_, err = s.SetGasLimit(ctx, ðpbservice.SetGasLimitRequest{Pubkey: tt.pubkey, GasLimit: tt.newGasLimit})
|
||||
require.NoError(t, err)
|
||||
for _, w := range tt.w {
|
||||
assert.Equal(t, w.gaslimit, s.validatorService.ProposerSettings.ProposeConfig[bytesutil.ToBytes48(w.pubkey)].BuilderConfig.GasLimit)
|
||||
assert.Equal(t, w.gaslimit, uint64(s.validatorService.ProposerSettings.ProposeConfig[bytesutil.ToBytes48(w.pubkey)].BuilderConfig.GasLimit))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -187,13 +187,18 @@ func (s *Server) RecoverWallet(ctx context.Context, req *pb.RecoverWalletRequest
|
||||
return nil, status.Error(codes.InvalidArgument, "password did not pass validation")
|
||||
}
|
||||
|
||||
if _, err := accounts.RecoverWallet(ctx, &accounts.RecoverWalletConfig{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: walletPassword,
|
||||
Mnemonic: mnemonic,
|
||||
NumAccounts: numAccounts,
|
||||
Mnemonic25thWord: req.Mnemonic25ThWord,
|
||||
}); err != nil {
|
||||
opts := []accounts.Option{
|
||||
accounts.WithWalletDir(walletDir),
|
||||
accounts.WithWalletPassword(walletPassword),
|
||||
accounts.WithMnemonic(mnemonic),
|
||||
accounts.WithMnemonic25thWord(req.Mnemonic25ThWord),
|
||||
accounts.WithNumAccounts(numAccounts),
|
||||
}
|
||||
acc, err := accounts.NewCLIManager(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := acc.WalletRecover(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.initializeWallet(ctx, &wallet.Config{
|
||||
|
||||
Reference in New Issue
Block a user