mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
2 Commits
methodical
...
moveCoreMe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c18e22e861 | ||
|
|
6f8ff002f3 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -35,6 +35,3 @@ bin
|
||||
|
||||
# p2p metaData
|
||||
metaData
|
||||
|
||||
# execution API authentication
|
||||
jwt.hex
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.1)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-alpha.9/src/engine)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.1.10)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
@@ -29,7 +29,7 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -98,7 +98,7 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
}
|
||||
reg := ð.SignedValidatorRegistrationV1{
|
||||
Message: ð.ValidatorRegistrationV1{
|
||||
FeeRecipient: ezDecode(t, params.BeaconConfig().EthBurnAddressHex),
|
||||
FeeRecipient: ezDecode(t, fieldparams.EthBurnAddressHex),
|
||||
GasLimit: 23,
|
||||
Timestamp: 42,
|
||||
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
|
||||
@@ -337,7 +337,7 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
@@ -349,19 +349,10 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
|
||||
}
|
||||
|
||||
//
|
||||
// A <- B <- C
|
||||
// \ \
|
||||
// \ ---------- E
|
||||
// ---------- D
|
||||
|
||||
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -269,10 +270,10 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
if feeRecipient.String() == params.BeaconConfig().EthBurnAddressHex {
|
||||
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": proposerID,
|
||||
"burnAddress": params.BeaconConfig().EthBurnAddressHex,
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
}).Warn("Fee recipient is currently using the burn address, " +
|
||||
"you will not be rewarded transaction fees on this setting. " +
|
||||
"Please set a different eth address as the fee recipient. " +
|
||||
|
||||
@@ -806,7 +806,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
|
||||
@@ -263,8 +263,8 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
currentEth1DataDepositCount.Set(float64(postState.Eth1Data().DepositCount))
|
||||
processedDepositsCount.Set(float64(postState.Eth1DepositIndex() + 1))
|
||||
|
||||
var b *precompute.Balance
|
||||
var v []*precompute.Validator
|
||||
var b *types.Balance
|
||||
var v []*types.Validator
|
||||
var err error
|
||||
switch headState.Version() {
|
||||
case version.Phase0:
|
||||
|
||||
@@ -459,12 +459,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Prune forkchoice store only if the new finalized checkpoint is higher
|
||||
// than the finalized checkpoint in forkchoice store.
|
||||
if fCheckpoints[len(blks)-1].Epoch > s.cfg.ForkChoiceStore.FinalizedEpoch() {
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(fCheckpoints[len(blks)-1].Root))); err != nil {
|
||||
return errors.Wrap(err, "could not prune fork choice nodes")
|
||||
}
|
||||
// Prune forkchoice store
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(fCheckpoints[len(blks)-1].Root))); err != nil {
|
||||
return errors.Wrap(err, "could not prune fork choice nodes")
|
||||
}
|
||||
|
||||
// Set their optimistic status
|
||||
|
||||
@@ -343,69 +343,6 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
|
||||
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedEpoch())
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch_PruneOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []interfaces.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState state.BeaconState
|
||||
for i := 1; i < 128; i++ {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
|
||||
if i == 32 {
|
||||
firstState = bState.Copy()
|
||||
}
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(root, wsb)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, wsb)
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[i]))
|
||||
}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: blkRoots[31][:], Epoch: 1}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: blkRoots[31][:], Epoch: 1}, [32]byte{'b'})
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[31], firstState))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, firstState, blkRoots[31]))
|
||||
err = service.onBlockBatch(ctx, blks[32:], blkRoots[32:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
@@ -206,7 +206,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
s.store = store.New(justified, finalized)
|
||||
|
||||
var forkChoicer f.ForkChoicer
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
fRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
forkChoicer = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
|
||||
} else {
|
||||
|
||||
@@ -11,7 +11,6 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
|
||||
opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
|
||||
@@ -40,7 +39,7 @@ type ChainService struct {
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
Balance *precompute.Balance
|
||||
Balance *types.Balance
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *ethpb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
@@ -62,7 +61,6 @@ type ChainService struct {
|
||||
Genesis time.Time
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
ReceiveBlockMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
@@ -339,7 +337,7 @@ func (s *ChainService) CurrentSlot() types.Slot {
|
||||
}
|
||||
|
||||
// Participation mocks the same method in the chain service.
|
||||
func (s *ChainService) Participation(_ uint64) *precompute.Balance {
|
||||
func (s *ChainService) Participation(_ uint64) *types.Balance {
|
||||
return s.Balance
|
||||
}
|
||||
|
||||
@@ -448,8 +446,7 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
|
||||
s.OptimisticCheckRootReceived = root
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool, error) {
|
||||
return s.Optimistic, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,8 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"metric.go",
|
||||
"option.go",
|
||||
"service.go",
|
||||
],
|
||||
@@ -12,19 +10,13 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/builder:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
package builder
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
ErrNotRunning = errors.New("builder is not running")
|
||||
)
|
||||
@@ -1,37 +0,0 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
submitBlindedBlockLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "submit_blinded_block_latency_milliseconds",
|
||||
Help: "Captures RPC latency for submitting blinded block in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
getHeaderLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_header_latency_milliseconds",
|
||||
Help: "Captures RPC latency for get header in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
getStatusLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_status_latency_milliseconds",
|
||||
Help: "Captures RPC latency for get status in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
registerValidatorLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "register_validator_latency_milliseconds",
|
||||
Help: "Captures RPC latency for register validator in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
)
|
||||
@@ -1,7 +1,6 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
@@ -27,14 +26,6 @@ func WithBuilderEndpoints(endpoint string) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithDatabase sets the database for the beacon chain builder service.
|
||||
func WithDatabase(database db.HeadAccessDatabase) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.beaconDB = database
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func covertEndPoint(ep string) network.Endpoint {
|
||||
return network.Endpoint{
|
||||
Url: ep,
|
||||
|
||||
@@ -2,33 +2,26 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/api/client/builder"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error)
|
||||
GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error)
|
||||
Status(ctx context.Context) error
|
||||
Status() error
|
||||
RegisterValidator(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) error
|
||||
}
|
||||
|
||||
// config defines a config struct for dependencies into the service.
|
||||
type config struct {
|
||||
builderEndpoint network.Endpoint
|
||||
beaconDB db.HeadAccessDatabase
|
||||
headFetcher blockchain.HeadFetcher
|
||||
}
|
||||
|
||||
// Service defines a service that provides a client for interacting with the beacon chain and MEV relay network.
|
||||
@@ -63,59 +56,22 @@ func (*Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock submits a blinded block to the builder relay network.
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
// SubmitBlindedBlock is currently a stub.
|
||||
func (*Service) SubmitBlindedBlock(context.Context, *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
|
||||
func (s *Service) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.GetHeader")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
getHeaderLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
return s.c.GetHeader(ctx, slot, parentHash, pubKey)
|
||||
// GetHeader is currently a stub.
|
||||
func (*Service) GetHeader(context.Context, types.Slot, [32]byte, [48]byte) (*ethpb.SignedBuilderBid, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Status retrieves the status of the builder relay network.
|
||||
func (s *Service) Status(ctx context.Context) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.Status")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
getStatusLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
return s.c.Status(ctx)
|
||||
// Status is currently a stub.
|
||||
func (*Service) Status() error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
// RegisterValidator registers a validator with the builder relay network.
|
||||
// It also saves the registration object to the DB.
|
||||
func (s *Service) RegisterValidator(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.RegisterValidator")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
registerValidatorLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
idx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(reg.Message.Pubkey))
|
||||
if !exists {
|
||||
return nil // If the pubkey is not found, it is not a validator. Do nothing.
|
||||
}
|
||||
if err := s.c.RegisterValidator(ctx, reg); err != nil {
|
||||
return errors.Wrap(err, "could not register validator")
|
||||
}
|
||||
|
||||
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{idx}, []*ethpb.ValidatorRegistrationV1{reg.Message})
|
||||
// RegisterValidator is currently a stub.
|
||||
func (*Service) RegisterValidator(context.Context, *ethpb.SignedValidatorRegistrationV1) error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
@@ -66,7 +66,6 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
|
||||
@@ -9,16 +9,17 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
|
||||
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*types.Validator, *types.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
|
||||
defer span.End()
|
||||
vals := make([]*precompute.Validator, beaconState.NumValidators())
|
||||
bal := &precompute.Balance{}
|
||||
vals := make([]*types.Validator, beaconState.NumValidators())
|
||||
bal := &types.Balance{}
|
||||
prevEpoch := time.PrevEpoch(beaconState)
|
||||
currentEpoch := time.CurrentEpoch(beaconState)
|
||||
inactivityScores, err := beaconState.InactivityScores()
|
||||
@@ -33,7 +34,7 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
|
||||
}
|
||||
if err := beaconState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
// Set validator's balance, inactivity score and slashed/withdrawable status.
|
||||
v := &precompute.Validator{
|
||||
v := &types.Validator{
|
||||
CurrentEpochEffectiveBalance: val.EffectiveBalance(),
|
||||
InactivityScore: inactivityScores[idx],
|
||||
IsSlashed: val.Slashed(),
|
||||
@@ -74,57 +75,18 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
|
||||
func ProcessInactivityScores(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
vals []*precompute.Validator,
|
||||
) (state.BeaconState, []*precompute.Validator, error) {
|
||||
vals []*types.Validator,
|
||||
) (state.BeaconState, []*types.Validator, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
|
||||
defer span.End()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
if time.CurrentEpoch(beaconState) == cfg.GenesisEpoch {
|
||||
return beaconState, vals, nil
|
||||
}
|
||||
|
||||
inactivityScores, err := beaconState.InactivityScores()
|
||||
currEpoch := time.CurrentEpoch(beaconState)
|
||||
prevEpoch := time.PrevEpoch(beaconState)
|
||||
finalizedEpoch := beaconState.FinalizedCheckpointEpoch()
|
||||
vals, err := beaconState.ProcessInactivityScores(ctx, currEpoch, prevEpoch, finalizedEpoch, vals)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
bias := cfg.InactivityScoreBias
|
||||
recoveryRate := cfg.InactivityScoreRecoveryRate
|
||||
prevEpoch := time.PrevEpoch(beaconState)
|
||||
finalizedEpoch := beaconState.FinalizedCheckpointEpoch()
|
||||
for i, v := range vals {
|
||||
if !precompute.EligibleForRewards(v) {
|
||||
continue
|
||||
}
|
||||
|
||||
if v.IsPrevEpochTargetAttester && !v.IsSlashed {
|
||||
// Decrease inactivity score when validator gets target correct.
|
||||
if v.InactivityScore > 0 {
|
||||
v.InactivityScore -= 1
|
||||
}
|
||||
} else {
|
||||
v.InactivityScore, err = math.Add64(v.InactivityScore, bias)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
|
||||
score := recoveryRate
|
||||
// Prevents underflow below 0.
|
||||
if score > v.InactivityScore {
|
||||
score = v.InactivityScore
|
||||
}
|
||||
v.InactivityScore -= score
|
||||
}
|
||||
inactivityScores[i] = v.InactivityScore
|
||||
}
|
||||
|
||||
if err := beaconState.SetInactivityScores(inactivityScores); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return beaconState, vals, nil
|
||||
}
|
||||
|
||||
@@ -141,9 +103,9 @@ func ProcessInactivityScores(
|
||||
func ProcessEpochParticipation(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
bal *precompute.Balance,
|
||||
vals []*precompute.Validator,
|
||||
) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
bal *types.Balance,
|
||||
vals []*types.Validator,
|
||||
) ([]*types.Validator, *types.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
defer span.End()
|
||||
|
||||
@@ -209,8 +171,8 @@ func ProcessEpochParticipation(
|
||||
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
|
||||
func ProcessRewardsAndPenaltiesPrecompute(
|
||||
beaconState state.BeaconState,
|
||||
bal *precompute.Balance,
|
||||
vals []*precompute.Validator,
|
||||
bal *types.Balance,
|
||||
vals []*types.Validator,
|
||||
) (state.BeaconState, error) {
|
||||
// Don't process rewards and penalties in genesis epoch.
|
||||
cfg := params.BeaconConfig()
|
||||
@@ -253,7 +215,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
|
||||
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
|
||||
// voting records.
|
||||
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) (rewards, penalties []uint64, err error) {
|
||||
func AttestationsDelta(beaconState state.BeaconState, bal *types.Balance, vals []*types.Validator) (rewards, penalties []uint64, err error) {
|
||||
numOfVals := beaconState.NumValidators()
|
||||
rewards = make([]uint64, numOfVals)
|
||||
penalties = make([]uint64, numOfVals)
|
||||
@@ -285,8 +247,8 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
|
||||
}
|
||||
|
||||
func attestationDelta(
|
||||
bal *precompute.Balance,
|
||||
val *precompute.Validator,
|
||||
bal *types.Balance,
|
||||
val *types.Validator,
|
||||
baseRewardMultiplier, inactivityDenominator uint64,
|
||||
inactivityLeak bool) (reward, penalty uint64, err error) {
|
||||
eligible := val.IsActivePrevEpoch || (val.IsSlashed && !val.IsWithdrawableCurrentEpoch)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
@@ -35,29 +34,29 @@ func TestInitializeEpochValidators_Ok(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
v, b, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, &types.Validator{
|
||||
IsSlashed: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 0,
|
||||
}, v[0], "Incorrect validator 0 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, &types.Validator{
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 1,
|
||||
}, v[1], "Incorrect validator 1 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, &types.Validator{
|
||||
IsActivePrevEpoch: true,
|
||||
IsActiveCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 2,
|
||||
}, v[2], "Incorrect validator 2 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, &types.Validator{
|
||||
IsActivePrevEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 3,
|
||||
}, v[3], "Incorrect validator 3 status")
|
||||
|
||||
wantedBalances := &precompute.Balance{
|
||||
wantedBalances := &types.Balance{
|
||||
ActiveCurrentEpoch: 100,
|
||||
ActivePrevEpoch: 200,
|
||||
}
|
||||
@@ -96,13 +95,13 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, &types.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}, validators[0])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, &types.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
@@ -111,7 +110,7 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
}, validators[1])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, &types.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
@@ -122,7 +121,7 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
}, validators[2])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, &types.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
@@ -174,13 +173,13 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), st, balance, validators)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, &types.Validator{
|
||||
IsActiveCurrentEpoch: false,
|
||||
IsActivePrevEpoch: false,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}, validators[0])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, &types.Validator{
|
||||
IsActiveCurrentEpoch: false,
|
||||
IsActivePrevEpoch: true,
|
||||
IsPrevEpochAttester: true,
|
||||
@@ -189,7 +188,7 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}, validators[1])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, &types.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
@@ -413,7 +412,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_BadState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
_, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, []*precompute.Validator{})
|
||||
_, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, []*types.Validator{})
|
||||
require.ErrorContains(t, "validator registries not the same length as state's validator registries", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ go_library(
|
||||
"new.go",
|
||||
"reward_penalty.go",
|
||||
"slashing.go",
|
||||
"type.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute",
|
||||
visibility = [
|
||||
|
||||
@@ -22,13 +22,13 @@ import (
|
||||
func ProcessAttestations(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
vp []*Validator,
|
||||
pBal *Balance,
|
||||
) ([]*Validator, *Balance, error) {
|
||||
vp []*types.Validator,
|
||||
pBal *types.Balance,
|
||||
) ([]*types.Validator, *types.Balance, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "precomputeEpoch.ProcessAttestations")
|
||||
defer span.End()
|
||||
|
||||
v := &Validator{}
|
||||
v := &types.Validator{}
|
||||
var err error
|
||||
|
||||
prevAtt, err := state.PreviousEpochAttestations()
|
||||
@@ -141,7 +141,7 @@ func SameHead(state state.ReadOnlyBeaconState, a *ethpb.PendingAttestation) (boo
|
||||
}
|
||||
|
||||
// UpdateValidator updates pre computed validator store.
|
||||
func UpdateValidator(vp []*Validator, record *Validator, indices []uint64, a *ethpb.PendingAttestation, aSlot types.Slot) []*Validator {
|
||||
func UpdateValidator(vp []*types.Validator, record *types.Validator, indices []uint64, a *ethpb.PendingAttestation, aSlot types.Slot) []*types.Validator {
|
||||
inclusionSlot := aSlot + a.InclusionDelay
|
||||
|
||||
for _, i := range indices {
|
||||
@@ -171,7 +171,7 @@ func UpdateValidator(vp []*Validator, record *Validator, indices []uint64, a *et
|
||||
}
|
||||
|
||||
// UpdateBalance updates pre computed balance store.
|
||||
func UpdateBalance(vp []*Validator, bBal *Balance, stateVersion int) *Balance {
|
||||
func UpdateBalance(vp []*types.Validator, bBal *types.Balance, stateVersion int) *types.Balance {
|
||||
for _, v := range vp {
|
||||
if !v.IsSlashed {
|
||||
if v.IsCurrentEpochAttester {
|
||||
@@ -200,7 +200,7 @@ func UpdateBalance(vp []*Validator, bBal *Balance, stateVersion int) *Balance {
|
||||
|
||||
// EnsureBalancesLowerBound ensures all the balances such as active current epoch, active previous epoch and more
|
||||
// have EffectiveBalanceIncrement(1 eth) as a lower bound.
|
||||
func EnsureBalancesLowerBound(bBal *Balance) *Balance {
|
||||
func EnsureBalancesLowerBound(bBal *types.Balance) *types.Balance {
|
||||
ebi := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
if ebi > bBal.ActiveCurrentEpoch {
|
||||
bBal.ActiveCurrentEpoch = ebi
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
@@ -18,36 +19,36 @@ import (
|
||||
|
||||
func TestUpdateValidator_Works(t *testing.T) {
|
||||
e := params.BeaconConfig().FarFutureSlot
|
||||
vp := []*precompute.Validator{{}, {InclusionSlot: e}, {}, {InclusionSlot: e}, {}, {InclusionSlot: e}}
|
||||
record := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true,
|
||||
vp := []*types.Validator{{}, {InclusionSlot: e}, {}, {InclusionSlot: e}, {}, {InclusionSlot: e}}
|
||||
record := &types.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochAttester: true, IsPrevEpochTargetAttester: true, IsPrevEpochHeadAttester: true}
|
||||
a := ðpb.PendingAttestation{InclusionDelay: 1, ProposerIndex: 2}
|
||||
|
||||
// Indices 1 3 and 5 attested
|
||||
vp = precompute.UpdateValidator(vp, record, []uint64{1, 3, 5}, a, 100)
|
||||
|
||||
wanted := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true,
|
||||
wanted := &types.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochAttester: true, IsPrevEpochTargetAttester: true, IsPrevEpochHeadAttester: true,
|
||||
ProposerIndex: 2, InclusionDistance: 1, InclusionSlot: 101}
|
||||
wantedVp := []*precompute.Validator{{}, wanted, {}, wanted, {}, wanted}
|
||||
wantedVp := []*types.Validator{{}, wanted, {}, wanted, {}, wanted}
|
||||
assert.DeepEqual(t, wantedVp, vp, "Incorrect attesting validator calculations")
|
||||
}
|
||||
|
||||
func TestUpdateValidator_InclusionOnlyCountsPrevEpoch(t *testing.T) {
|
||||
e := params.BeaconConfig().FarFutureSlot
|
||||
vp := []*precompute.Validator{{InclusionSlot: e}}
|
||||
record := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true}
|
||||
vp := []*types.Validator{{InclusionSlot: e}}
|
||||
record := &types.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true}
|
||||
a := ðpb.PendingAttestation{InclusionDelay: 1, ProposerIndex: 2}
|
||||
|
||||
// Verify inclusion info doesnt get updated.
|
||||
vp = precompute.UpdateValidator(vp, record, []uint64{0}, a, 100)
|
||||
wanted := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true, InclusionSlot: e}
|
||||
wantedVp := []*precompute.Validator{wanted}
|
||||
wanted := &types.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true, InclusionSlot: e}
|
||||
wantedVp := []*types.Validator{wanted}
|
||||
assert.DeepEqual(t, wantedVp, vp, "Incorrect attesting validator calculations")
|
||||
}
|
||||
|
||||
func TestUpdateBalance(t *testing.T) {
|
||||
vp := []*precompute.Validator{
|
||||
vp := []*types.Validator{
|
||||
{IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{IsCurrentEpochTargetAttester: true, IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{IsCurrentEpochTargetAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
@@ -57,7 +58,7 @@ func TestUpdateBalance(t *testing.T) {
|
||||
{IsPrevEpochAttester: true, IsPrevEpochHeadAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{IsSlashed: true, IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
}
|
||||
wantedPBal := &precompute.Balance{
|
||||
wantedPBal := &types.Balance{
|
||||
ActiveCurrentEpoch: params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
ActivePrevEpoch: params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
CurrentEpochAttested: 200 * params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
@@ -66,12 +67,12 @@ func TestUpdateBalance(t *testing.T) {
|
||||
PrevEpochTargetAttested: 100 * params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
PrevEpochHeadAttested: 200 * params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
}
|
||||
pBal := precompute.UpdateBalance(vp, &precompute.Balance{}, version.Phase0)
|
||||
pBal := precompute.UpdateBalance(vp, &types.Balance{}, version.Phase0)
|
||||
assert.DeepEqual(t, wantedPBal, pBal, "Incorrect balance calculations")
|
||||
}
|
||||
|
||||
func TestUpdateBalanceBellatrixVersion(t *testing.T) {
|
||||
vp := []*precompute.Validator{
|
||||
vp := []*types.Validator{
|
||||
{IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{IsCurrentEpochTargetAttester: true, IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{IsCurrentEpochTargetAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
@@ -81,7 +82,7 @@ func TestUpdateBalanceBellatrixVersion(t *testing.T) {
|
||||
{IsPrevEpochAttester: true, IsPrevEpochHeadAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{IsSlashed: true, IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
}
|
||||
wantedPBal := &precompute.Balance{
|
||||
wantedPBal := &types.Balance{
|
||||
ActiveCurrentEpoch: params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
ActivePrevEpoch: params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
CurrentEpochAttested: 200 * params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
@@ -90,7 +91,7 @@ func TestUpdateBalanceBellatrixVersion(t *testing.T) {
|
||||
PrevEpochTargetAttested: 100 * params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
PrevEpochHeadAttested: 200 * params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
}
|
||||
pBal := precompute.UpdateBalance(vp, &precompute.Balance{}, version.Bellatrix)
|
||||
pBal := precompute.UpdateBalance(vp, &types.Balance{}, version.Bellatrix)
|
||||
assert.DeepEqual(t, wantedPBal, pBal, "Incorrect balance calculations")
|
||||
}
|
||||
|
||||
@@ -199,11 +200,11 @@ func TestProcessAttestations(t *testing.T) {
|
||||
err = beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{Data: att2.Data, AggregationBits: bf, InclusionDelay: 1})
|
||||
require.NoError(t, err)
|
||||
|
||||
pVals := make([]*precompute.Validator, validators)
|
||||
pVals := make([]*types.Validator, validators)
|
||||
for i := 0; i < len(pVals); i++ {
|
||||
pVals[i] = &precompute.Validator{CurrentEpochEffectiveBalance: 100}
|
||||
pVals[i] = &types.Validator{CurrentEpochEffectiveBalance: 100}
|
||||
}
|
||||
pVals, _, err = precompute.ProcessAttestations(context.Background(), beaconState, pVals, &precompute.Balance{})
|
||||
pVals, _, err = precompute.ProcessAttestations(context.Background(), beaconState, pVals, &types.Balance{})
|
||||
require.NoError(t, err)
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
|
||||
@@ -227,7 +228,7 @@ func TestProcessAttestations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEnsureBalancesLowerBound(t *testing.T) {
|
||||
b := &precompute.Balance{}
|
||||
b := &types.Balance{}
|
||||
b = precompute.EnsureBalancesLowerBound(b)
|
||||
balanceIncrement := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
assert.Equal(t, balanceIncrement, b.ActiveCurrentEpoch, "Did not get wanted active current balance")
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
@@ -44,7 +45,7 @@ func UnrealizedCheckpoints(st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Chec
|
||||
// previous_target_balance = get_attesting_balance(state, previous_attestations)
|
||||
// current_target_balance = get_attesting_balance(state, current_attestations)
|
||||
// weigh_justification_and_finalization(state, total_active_balance, previous_target_balance, current_target_balance)
|
||||
func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal *Balance) (state.BeaconState, error) {
|
||||
func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal *types.Balance) (state.BeaconState, error) {
|
||||
canProcessSlot, err := slots.EpochStart(2 /*epoch*/)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -43,7 +43,7 @@ func TestProcessJustificationAndFinalizationPreCompute_ConsecutiveEpochs(t *test
|
||||
state, err := v1.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
attestedBalance := 4 * uint64(e) * 3 / 2
|
||||
b := &precompute.Balance{PrevEpochTargetAttested: attestedBalance}
|
||||
b := &types.Balance{PrevEpochTargetAttested: attestedBalance}
|
||||
newState, err := precompute.ProcessJustificationAndFinalizationPreCompute(state, b)
|
||||
require.NoError(t, err)
|
||||
rt := [32]byte{byte(64)}
|
||||
@@ -80,7 +80,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyCurrentEpoch(t *te
|
||||
state, err := v1.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
attestedBalance := 4 * uint64(e) * 3 / 2
|
||||
b := &precompute.Balance{PrevEpochTargetAttested: attestedBalance}
|
||||
b := &types.Balance{PrevEpochTargetAttested: attestedBalance}
|
||||
newState, err := precompute.ProcessJustificationAndFinalizationPreCompute(state, b)
|
||||
require.NoError(t, err)
|
||||
rt := [32]byte{byte(64)}
|
||||
@@ -116,7 +116,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
state, err := v1.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
attestedBalance := 4 * uint64(e) * 3 / 2
|
||||
b := &precompute.Balance{PrevEpochTargetAttested: attestedBalance}
|
||||
b := &types.Balance{PrevEpochTargetAttested: attestedBalance}
|
||||
newState, err := precompute.ProcessJustificationAndFinalizationPreCompute(state, b)
|
||||
require.NoError(t, err)
|
||||
rt := [32]byte{byte(64)}
|
||||
|
||||
@@ -11,18 +11,19 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// New gets called at the beginning of process epoch cycle to return
|
||||
// pre computed instances of validators attesting records and total
|
||||
// balances attested in an epoch.
|
||||
func New(ctx context.Context, s state.BeaconState) ([]*Validator, *Balance, error) {
|
||||
func New(ctx context.Context, s state.BeaconState) ([]*types.Validator, *types.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "precomputeEpoch.New")
|
||||
defer span.End()
|
||||
|
||||
pValidators := make([]*Validator, s.NumValidators())
|
||||
pBal := &Balance{}
|
||||
pValidators := make([]*types.Validator, s.NumValidators())
|
||||
pBal := &types.Balance{}
|
||||
|
||||
currentEpoch := time.CurrentEpoch(s)
|
||||
prevEpoch := time.PrevEpoch(s)
|
||||
@@ -30,7 +31,7 @@ func New(ctx context.Context, s state.BeaconState) ([]*Validator, *Balance, erro
|
||||
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
// Was validator withdrawable or slashed
|
||||
withdrawable := prevEpoch+1 >= val.WithdrawableEpoch()
|
||||
pVal := &Validator{
|
||||
pVal := &types.Validator{
|
||||
IsSlashed: val.Slashed(),
|
||||
IsWithdrawableCurrentEpoch: withdrawable,
|
||||
CurrentEpochEffectiveBalance: val.EffectiveBalance(),
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -31,33 +32,33 @@ func TestNew(t *testing.T) {
|
||||
e := params.BeaconConfig().FarFutureSlot
|
||||
v, b, err := precompute.New(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, &types.Validator{
|
||||
IsSlashed: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InclusionDistance: e,
|
||||
InclusionSlot: e,
|
||||
}, v[0], "Incorrect validator 0 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, &types.Validator{
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InclusionDistance: e,
|
||||
InclusionSlot: e,
|
||||
}, v[1], "Incorrect validator 1 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, &types.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InclusionDistance: e,
|
||||
InclusionSlot: e,
|
||||
}, v[2], "Incorrect validator 2 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, &types.Validator{
|
||||
IsActivePrevEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InclusionDistance: e,
|
||||
InclusionSlot: e,
|
||||
}, v[3], "Incorrect validator 3 status")
|
||||
|
||||
wantedBalances := &precompute.Balance{
|
||||
wantedBalances := &types.Balance{
|
||||
ActiveCurrentEpoch: 100,
|
||||
ActivePrevEpoch: 200,
|
||||
}
|
||||
|
||||
@@ -10,15 +10,15 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
)
|
||||
|
||||
type attesterRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []*Validator) ([]uint64, []uint64, error)
|
||||
type proposerRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []*Validator) ([]uint64, error)
|
||||
type attesterRewardsFunc func(state.ReadOnlyBeaconState, *types.Balance, []*types.Validator) ([]uint64, []uint64, error)
|
||||
type proposerRewardsFunc func(state.ReadOnlyBeaconState, *types.Balance, []*types.Validator) ([]uint64, error)
|
||||
|
||||
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
|
||||
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
|
||||
func ProcessRewardsAndPenaltiesPrecompute(
|
||||
state state.BeaconState,
|
||||
pBal *Balance,
|
||||
vp []*Validator,
|
||||
pBal *types.Balance,
|
||||
vp []*types.Validator,
|
||||
attRewardsFunc attesterRewardsFunc,
|
||||
proRewardsFunc proposerRewardsFunc,
|
||||
) (state.BeaconState, error) {
|
||||
@@ -65,7 +65,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
|
||||
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
|
||||
// voting records.
|
||||
func AttestationsDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Validator) ([]uint64, []uint64, error) {
|
||||
func AttestationsDelta(state state.ReadOnlyBeaconState, pBal *types.Balance, vp []*types.Validator) ([]uint64, []uint64, error) {
|
||||
numOfVals := state.NumValidators()
|
||||
rewards := make([]uint64, numOfVals)
|
||||
penalties := make([]uint64, numOfVals)
|
||||
@@ -79,7 +79,7 @@ func AttestationsDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Val
|
||||
return rewards, penalties, nil
|
||||
}
|
||||
|
||||
func attestationDelta(pBal *Balance, sqrtActiveCurrentEpoch uint64, v *Validator, prevEpoch, finalizedEpoch types.Epoch) (uint64, uint64) {
|
||||
func attestationDelta(pBal *types.Balance, sqrtActiveCurrentEpoch uint64, v *types.Validator, prevEpoch, finalizedEpoch types.Epoch) (uint64, uint64) {
|
||||
if !EligibleForRewards(v) || pBal.ActiveCurrentEpoch == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
@@ -156,7 +156,7 @@ func attestationDelta(pBal *Balance, sqrtActiveCurrentEpoch uint64, v *Validator
|
||||
|
||||
// ProposersDelta computes and returns the rewards and penalties differences for individual validators based on the
|
||||
// proposer inclusion records.
|
||||
func ProposersDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Validator) ([]uint64, error) {
|
||||
func ProposersDelta(state state.ReadOnlyBeaconState, pBal *types.Balance, vp []*types.Validator) ([]uint64, error) {
|
||||
numofVals := state.NumValidators()
|
||||
rewards := make([]uint64, numofVals)
|
||||
|
||||
@@ -190,6 +190,6 @@ func ProposersDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Valida
|
||||
//
|
||||
// Spec code:
|
||||
// if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
|
||||
func EligibleForRewards(v *Validator) bool {
|
||||
func EligibleForRewards(v *types.Validator) bool {
|
||||
return v.IsActivePrevEpoch || (v.IsSlashed && !v.IsWithdrawableCurrentEpoch)
|
||||
}
|
||||
|
||||
@@ -307,8 +307,8 @@ func TestProposerDeltaPrecompute_HappyCase(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerIndex := types.ValidatorIndex(1)
|
||||
b := &Balance{ActiveCurrentEpoch: 1000}
|
||||
v := []*Validator{
|
||||
b := &types.Balance{ActiveCurrentEpoch: 1000}
|
||||
v := []*types.Validator{
|
||||
{IsPrevEpochAttester: true, CurrentEpochEffectiveBalance: 32, ProposerIndex: proposerIndex},
|
||||
}
|
||||
r, err := ProposersDelta(beaconState, b, v)
|
||||
@@ -329,8 +329,8 @@ func TestProposerDeltaPrecompute_ValidatorIndexOutOfRange(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerIndex := types.ValidatorIndex(validatorCount)
|
||||
b := &Balance{ActiveCurrentEpoch: 1000}
|
||||
v := []*Validator{
|
||||
b := &types.Balance{ActiveCurrentEpoch: 1000}
|
||||
v := []*types.Validator{
|
||||
{IsPrevEpochAttester: true, CurrentEpochEffectiveBalance: 32, ProposerIndex: proposerIndex},
|
||||
}
|
||||
_, err = ProposersDelta(beaconState, b, v)
|
||||
@@ -345,8 +345,8 @@ func TestProposerDeltaPrecompute_SlashedCase(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerIndex := types.ValidatorIndex(1)
|
||||
b := &Balance{ActiveCurrentEpoch: 1000}
|
||||
v := []*Validator{
|
||||
b := &types.Balance{ActiveCurrentEpoch: 1000}
|
||||
v := []*types.Validator{
|
||||
{IsPrevEpochAttester: true, CurrentEpochEffectiveBalance: 32, ProposerIndex: proposerIndex, IsSlashed: true},
|
||||
}
|
||||
r, err := ProposersDelta(beaconState, b, v)
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
// ProcessSlashingsPrecompute processes the slashed validators during epoch processing.
|
||||
// This is an optimized version by passing in precomputed total epoch balances.
|
||||
func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
|
||||
func ProcessSlashingsPrecompute(s state.BeaconState, pBal *types.Balance) error {
|
||||
currentEpoch := time.CurrentEpoch(s)
|
||||
exitLength := params.BeaconConfig().EpochsPerSlashingsVector
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -21,7 +22,7 @@ func TestProcessSlashingsPrecompute_NotSlashedWithSlashedTrue(t *testing.T) {
|
||||
Slashings: []uint64{0, 1e9},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
pBal := &precompute.Balance{ActiveCurrentEpoch: params.BeaconConfig().MaxEffectiveBalance}
|
||||
pBal := &types.Balance{ActiveCurrentEpoch: params.BeaconConfig().MaxEffectiveBalance}
|
||||
require.NoError(t, precompute.ProcessSlashingsPrecompute(s, pBal))
|
||||
|
||||
wanted := params.BeaconConfig().MaxEffectiveBalance
|
||||
@@ -36,7 +37,7 @@ func TestProcessSlashingsPrecompute_NotSlashedWithSlashedFalse(t *testing.T) {
|
||||
Slashings: []uint64{0, 1e9},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
pBal := &precompute.Balance{ActiveCurrentEpoch: params.BeaconConfig().MaxEffectiveBalance}
|
||||
pBal := &types.Balance{ActiveCurrentEpoch: params.BeaconConfig().MaxEffectiveBalance}
|
||||
require.NoError(t, precompute.ProcessSlashingsPrecompute(s, pBal))
|
||||
|
||||
wanted := params.BeaconConfig().MaxEffectiveBalance
|
||||
@@ -120,7 +121,7 @@ func TestProcessSlashingsPrecompute_SlashedLess(t *testing.T) {
|
||||
}
|
||||
ab += b
|
||||
}
|
||||
pBal := &precompute.Balance{ActiveCurrentEpoch: ab}
|
||||
pBal := &types.Balance{ActiveCurrentEpoch: ab}
|
||||
|
||||
original := proto.Clone(tt.state)
|
||||
state, err := v1.InitializeFromProto(tt.state)
|
||||
|
||||
@@ -23,14 +23,14 @@ type ReadOnlyDatabase interface {
|
||||
Block(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error)
|
||||
Blocks(ctx context.Context, f *filters.QueryFilter) ([]interfaces.SignedBeaconBlock, [][32]byte, error)
|
||||
BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]byte, error)
|
||||
BlocksBySlot(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error)
|
||||
BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []interfaces.SignedBeaconBlock, error)
|
||||
BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, [][32]byte, error)
|
||||
HasBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
GenesisBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error)
|
||||
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error)
|
||||
HighestRootsBelowSlot(ctx context.Context, slot types.Slot) (types.Slot, [][32]byte, error)
|
||||
HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error)
|
||||
// State related methods.
|
||||
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
@@ -86,7 +86,6 @@ type NoHeadAccessDatabase interface {
|
||||
RunMigrations(ctx context.Context) error
|
||||
// Fee reicipients operations.
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, addrs []common.Address) error
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error
|
||||
}
|
||||
|
||||
@@ -185,19 +185,17 @@ func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
}
|
||||
|
||||
// BlocksBySlot retrieves a list of beacon blocks and its respective roots by slot.
|
||||
func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []interfaces.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlocksBySlot")
|
||||
defer span.End()
|
||||
|
||||
blocks := make([]interfaces.SignedBeaconBlock, 0)
|
||||
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
roots, err := blockRootsBySlot(ctx, tx, slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve blocks by slot")
|
||||
}
|
||||
for _, r := range roots {
|
||||
encoded := bkt.Get(r[:])
|
||||
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
encoded := bkt.Get(keys[i])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -206,7 +204,7 @@ func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) ([]interfaces
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return blocks, err
|
||||
return len(blocks) > 0, blocks, err
|
||||
}
|
||||
|
||||
// BlockRootsBySlot retrieves a list of beacon block roots by slot
|
||||
@@ -215,9 +213,11 @@ func (s *Store) BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, []
|
||||
defer span.End()
|
||||
blockRoots := make([][32]byte, 0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
var err error
|
||||
blockRoots, err = blockRootsBySlot(ctx, tx, slot)
|
||||
return err
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "could not retrieve block roots by slot")
|
||||
@@ -398,17 +398,14 @@ func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) e
|
||||
})
|
||||
}
|
||||
|
||||
// HighestRootsBelowSlot returns roots from the database slot index from the highest slot below the input slot.
|
||||
// The slot value at the beginning of the return list is the slot where the roots were found. This is helpful so that
|
||||
// calling code can make decisions based on the slot without resolving the blocks to discover their slot (for instance
|
||||
// checking which root is canonical in fork choice, which operates purely on roots,
|
||||
// then if no canonical block is found, continuing to search through lower slots).
|
||||
func (s *Store) HighestRootsBelowSlot(ctx context.Context, slot types.Slot) (fs types.Slot, roots [][32]byte, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestRootsBelowSlot")
|
||||
// HighestSlotBlocksBelow returns the block with the highest slot below the input slot from the db.
|
||||
func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotBlocksBelow")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
sk := bytesutil.Uint64ToBytesBigEndian(uint64(slot))
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
c := bkt.Cursor()
|
||||
// The documentation for Seek says:
|
||||
@@ -433,28 +430,34 @@ func (s *Store) HighestRootsBelowSlot(ctx context.Context, slot types.Slot) (fs
|
||||
if r == nil {
|
||||
continue
|
||||
}
|
||||
fs = bytesutil.BytesToSlotBigEndian(sl)
|
||||
bs := bytesutil.BytesToSlotBigEndian(sl)
|
||||
// Iterating through the index using .Prev will move from higher to lower, so the first key we find behind
|
||||
// the requested slot must be the highest block below that slot.
|
||||
if slot > fs {
|
||||
roots, err = splitRoots(r)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing packed roots %#x", r)
|
||||
}
|
||||
return nil
|
||||
if slot > bs {
|
||||
root = bytesutil.ToBytes32(r)
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if len(roots) == 0 || (len(roots) == 1 && roots[0] == params.BeaconConfig().ZeroHash) {
|
||||
gr, err := s.GenesisBlockRoot(ctx)
|
||||
return 0, [][32]byte{gr}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fs, roots, nil
|
||||
var blk interfaces.SignedBeaconBlock
|
||||
if root != params.BeaconConfig().ZeroHash {
|
||||
blk, err = s.Block(ctx, root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
blk, err = s.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return []interfaces.SignedBeaconBlock{blk}, nil
|
||||
}
|
||||
|
||||
// FeeRecipientByValidatorID returns the fee recipient for a validator id.
|
||||
@@ -678,22 +681,21 @@ func blockRootsBySlotRange(
|
||||
}
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][32]byte, error) {
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) [][]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
defer span.End()
|
||||
|
||||
roots := make([][]byte, 0)
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
key := bytesutil.SlotToBytesBigEndian(slot)
|
||||
c := bkt.Cursor()
|
||||
k, v := c.Seek(key)
|
||||
if k != nil && bytes.Equal(k, key) {
|
||||
r, err := splitRoots(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "corrupt value in block slot index for slot=%d", slot)
|
||||
for i := 0; i < len(v); i += 32 {
|
||||
roots = append(roots, v[i:i+32])
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
return [][32]byte{}, nil
|
||||
return roots
|
||||
}
|
||||
|
||||
// createBlockIndicesFromBlock takes in a beacon block and returns
|
||||
|
||||
@@ -517,32 +517,18 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, block2))
|
||||
require.NoError(t, db.SaveBlock(ctx, block3))
|
||||
|
||||
_, roots, err := db.HighestRootsBelowSlot(ctx, 2)
|
||||
highestAt, err := db.HighestSlotBlocksBelow(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(roots) <= 0, "Got empty highest at slice")
|
||||
require.Equal(t, 1, len(roots))
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block1, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 11)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 11)
|
||||
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
|
||||
assert.Equal(t, true, proto.Equal(block2.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block2, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 101)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(roots) <= 0, "Got empty highest at slice")
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block2.Proto(), b.Proto()), "Wanted: %v, received: %v", block2, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 101)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(roots) <= 0, "Got empty highest at slice")
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block3.Proto(), b.Proto()), "Wanted: %v, received: %v", block3, b)
|
||||
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
|
||||
assert.Equal(t, true, proto.Equal(block3.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block3, highestAt[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -563,29 +549,15 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, block1))
|
||||
|
||||
_, roots, err := db.HighestRootsBelowSlot(ctx, 2)
|
||||
highestAt, err := db.HighestSlotBlocksBelow(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block1, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 1)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", genesisBlock, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", genesisBlock, highestAt[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -666,21 +638,22 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
r3, err := b3.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedBlocks, err := db.BlocksBySlot(ctx, 1)
|
||||
hasBlocks, retrievedBlocks, err := db.BlocksBySlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(retrievedBlocks), "Unexpected number of blocks received, expected none")
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
|
||||
assert.Equal(t, false, hasBlocks, "Expected no blocks")
|
||||
hasBlocks, retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(b1.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b1, retrievedBlocks[0])
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
|
||||
assert.Equal(t, true, hasBlocks, "Expected to have blocks")
|
||||
hasBlocks, retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
|
||||
require.NoError(t, err)
|
||||
if len(retrievedBlocks) != 2 {
|
||||
t.Fatalf("Expected 2 blocks, received %d blocks", len(retrievedBlocks))
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(b2.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b2, retrievedBlocks[0])
|
||||
assert.Equal(t, true, proto.Equal(b3.Proto(), retrievedBlocks[1].Proto()), "Wanted: %v, received: %v", b3, retrievedBlocks[1])
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
assert.Equal(t, true, hasBlocks, "Expected to have blocks")
|
||||
|
||||
hasBlockRoots, retrievedBlockRoots, err := db.BlockRootsBySlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -101,16 +99,3 @@ func deleteValueForIndices(ctx context.Context, indicesByBucket map[string][]byt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errMisalignedRootList = errors.New("incorrectly packed root list, length is not a multiple of 32")
|
||||
|
||||
func splitRoots(b []byte) ([][32]byte, error) {
|
||||
rl := make([][32]byte, 0)
|
||||
if len(b)%32 != 0 {
|
||||
return nil, errors.Wrapf(errMisalignedRootList, "root list len=%d", len(b))
|
||||
}
|
||||
for s, f := 0, 32; f <= len(b); s, f = f, f+32 {
|
||||
rl = append(rl, bytesutil.ToBytes32(b[s:f]))
|
||||
}
|
||||
return rl, nil
|
||||
}
|
||||
|
||||
@@ -138,60 +138,3 @@ func Test_deleteValueForIndices(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testPack(bs [][32]byte) []byte {
|
||||
r := make([]byte, 0)
|
||||
for _, b := range bs {
|
||||
r = append(r, b[:]...)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestSplitRoots(t *testing.T) {
|
||||
bt := make([][32]byte, 0)
|
||||
for _, x := range []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} {
|
||||
var b [32]byte
|
||||
for i := 0; i < 32; i++ {
|
||||
b[i] = x
|
||||
}
|
||||
bt = append(bt, b)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
b []byte
|
||||
expect [][32]byte
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "misaligned",
|
||||
b: make([]byte, 61),
|
||||
err: errMisalignedRootList,
|
||||
},
|
||||
{
|
||||
name: "happy",
|
||||
b: testPack(bt[0:5]),
|
||||
expect: bt[0:5],
|
||||
},
|
||||
{
|
||||
name: "single",
|
||||
b: testPack([][32]byte{bt[0]}),
|
||||
expect: [][32]byte{bt[0]},
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
b: []byte{},
|
||||
expect: [][32]byte{},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
r, err := splitRoots(c.b)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, c.expect, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -435,8 +435,6 @@ func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32
|
||||
if n1.slot > n2.slot {
|
||||
n1 = n1.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
// This should not happen at runtime as the finalized
|
||||
// node has to be a common ancestor
|
||||
if n1 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
@@ -535,17 +535,9 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
require.ErrorIs(t, err, ErrNilNode)
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, ErrNilNode)
|
||||
n := &Node{
|
||||
slot: 100,
|
||||
root: [32]byte{'y'},
|
||||
justifiedEpoch: 1,
|
||||
unrealizedJustifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
unrealizedFinalizedEpoch: 1,
|
||||
optimistic: true,
|
||||
}
|
||||
|
||||
f.store.nodeByRoot[[32]byte{'y'}] = n
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'y'}, [32]byte{'z'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
// broken link
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
|
||||
@@ -125,20 +125,19 @@ func (s *Store) insert(ctx context.Context,
|
||||
|
||||
s.nodeByPayload[payloadHash] = n
|
||||
s.nodeByRoot[root] = n
|
||||
if parent == nil {
|
||||
if s.treeRootNode == nil {
|
||||
s.treeRootNode = n
|
||||
s.headNode = n
|
||||
} else {
|
||||
return errInvalidParentRoot
|
||||
}
|
||||
} else {
|
||||
if parent != nil {
|
||||
parent.children = append(parent.children, n)
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, s.justifiedEpoch, s.finalizedEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Set the node as root if the store was empty
|
||||
if s.treeRootNode == nil {
|
||||
s.treeRootNode = n
|
||||
s.headNode = n
|
||||
}
|
||||
|
||||
// Update metrics.
|
||||
processedBlockCount.Inc()
|
||||
nodeCount.Set(float64(len(s.nodeByRoot)))
|
||||
|
||||
@@ -10,21 +10,21 @@ var (
|
||||
prometheus.HistogramOpts{
|
||||
Name: "new_payload_v1_latency_milliseconds",
|
||||
Help: "Captures RPC latency for newPayloadV1 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
getPayloadLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_payload_v1_latency_milliseconds",
|
||||
Help: "Captures RPC latency for getPayloadV1 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
forkchoiceUpdatedLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "forkchoice_updated_v1_latency_milliseconds",
|
||||
Help: "Captures RPC latency for forkchoiceUpdatedV1 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -70,14 +70,10 @@ func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*eth
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "could not get weak subjectivity slot: %v", err)
|
||||
}
|
||||
cbr, err := bs.CanonicalHistory.BlockRootForSlot(ctx, wsSlot)
|
||||
cbr, cb, err := bs.CanonicalHistory.BlockForSlot(ctx, wsSlot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("could not find highest block below slot %d", wsSlot))
|
||||
}
|
||||
cb, err := bs.BeaconDB.Block(ctx, cbr)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("block with root %#x from slot index %d not found in db", cbr, wsSlot))
|
||||
}
|
||||
stateRoot := bytesutil.ToBytes32(cb.Block().StateRoot())
|
||||
log.Printf("weak subjectivity checkpoint reported as epoch=%d, block root=%#x, state root=%#x", wsEpoch, cbr, stateRoot)
|
||||
return ðpbv1.WeakSubjectivityResponse{
|
||||
@@ -154,7 +150,7 @@ func (bs *Server) ListBlockHeaders(ctx context.Context, req *ethpbv1.BlockHeader
|
||||
if req.Slot != nil {
|
||||
slot = *req.Slot
|
||||
}
|
||||
blks, err = bs.BeaconDB.BlocksBySlot(ctx, slot)
|
||||
_, blks, err = bs.BeaconDB.BlocksBySlot(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", req.Slot, err)
|
||||
}
|
||||
@@ -758,7 +754,7 @@ func (bs *Server) blockFromBlockID(ctx context.Context, blockId []byte) (interfa
|
||||
e := newBlockIdParseError(err)
|
||||
return nil, &e
|
||||
}
|
||||
blks, err := bs.BeaconDB.BlocksBySlot(ctx, types.Slot(slot))
|
||||
_, blks, err := bs.BeaconDB.BlocksBySlot(ctx, types.Slot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not retrieve blocks for slot %d", slot)
|
||||
}
|
||||
|
||||
@@ -1527,8 +1527,8 @@ func TestServer_GetBlockSSZ(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
blocks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blocks) > 0)
|
||||
ok, blocks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, ok)
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blocks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -1567,8 +1567,8 @@ func TestServer_GetBlockSSZV2(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
blocks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blocks) > 0)
|
||||
ok, blocks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, ok)
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blocks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -1606,8 +1606,8 @@ func TestServer_GetBlockSSZV2(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
blocks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blocks) > 0)
|
||||
ok, blocks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, ok)
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blocks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -1645,8 +1645,8 @@ func TestServer_GetBlockSSZV2(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
blocks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blocks) > 0)
|
||||
ok, blocks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, ok)
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blocks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
@@ -10,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -42,15 +40,12 @@ func ValidateSync(ctx context.Context, syncChecker sync.Checker, headFetcher blo
|
||||
|
||||
// IsOptimistic checks whether the latest block header of the passed in beacon state is the header of an optimistic block.
|
||||
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticSyncFetcher blockchain.OptimisticModeFetcher) (bool, error) {
|
||||
header := st.LatestBlockHeader()
|
||||
// This happens when the block at the state's slot is not missing.
|
||||
if bytes.Equal(header.StateRoot, params.BeaconConfig().ZeroHash[:]) {
|
||||
root, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get state root")
|
||||
}
|
||||
header.StateRoot = root[:]
|
||||
root, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get state root")
|
||||
}
|
||||
header := st.LatestBlockHeader()
|
||||
header.StateRoot = root[:]
|
||||
headRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get header root")
|
||||
|
||||
@@ -69,29 +69,4 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
t.Run("zero state root", func(t *testing.T) {
|
||||
zeroRootSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h := zeroRootSt.LatestBlockHeader()
|
||||
h.StateRoot = make([]byte, 32)
|
||||
require.NoError(t, zeroRootSt.SetLatestBlockHeader(h))
|
||||
mockOptSyncFetcher := &chainmock.ChainService{}
|
||||
_, err = IsOptimistic(ctx, st, mockOptSyncFetcher)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(
|
||||
t,
|
||||
[32]byte{0xfc, 0x0, 0xe9, 0x6d, 0xb, 0x8b, 0x2, 0x2f, 0x61, 0xeb, 0x92, 0x10, 0xfd, 0x80, 0x84, 0x2b, 0x26, 0x61, 0xdc, 0x94, 0x5f, 0x7a, 0xf0, 0x0, 0xbc, 0x38, 0x6, 0x38, 0x71, 0x95, 0x43, 0x1},
|
||||
mockOptSyncFetcher.OptimisticCheckRootReceived,
|
||||
)
|
||||
})
|
||||
t.Run("non-zero state root", func(t *testing.T) {
|
||||
mockOptSyncFetcher := &chainmock.ChainService{}
|
||||
_, err = IsOptimistic(ctx, st, mockOptSyncFetcher)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(
|
||||
t,
|
||||
[32]byte{0xfc, 0x0, 0xe9, 0x6d, 0xb, 0x8b, 0x2, 0x2f, 0x61, 0xeb, 0x92, 0x10, 0xfd, 0x80, 0x84, 0x2b, 0x26, 0x61, 0xdc, 0x94, 0x5f, 0x7a, 0xf0, 0x0, 0xbc, 0x38, 0x6, 0x38, 0x71, 0x95, 0x43, 0x1},
|
||||
mockOptSyncFetcher.OptimisticCheckRootReceived,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -209,11 +209,11 @@ func (bs *Server) listBlocksForRoot(ctx context.Context, _ *ethpb.ListBlocksRequ
|
||||
|
||||
// listBlocksForSlot retrieves all blocks for the provided slot.
|
||||
func (bs *Server) listBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Slot) ([]blockContainer, int, string, error) {
|
||||
blks, err := bs.BeaconDB.BlocksBySlot(ctx, q.Slot)
|
||||
hasBlocks, blks, err := bs.BeaconDB.BlocksBySlot(ctx, q.Slot)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", q.Slot, err)
|
||||
}
|
||||
if len(blks) == 0 {
|
||||
if !hasBlocks {
|
||||
return []blockContainer{}, 0, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
@@ -393,10 +393,6 @@ func (bs *Server) chainHeadRetrieval(ctx context.Context) (*ethpb.ChainHead, err
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get head block")
|
||||
}
|
||||
optimisticStatus, err := bs.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get optimistic status")
|
||||
}
|
||||
if err := wrapper.BeaconBlockIsNil(headBlock); err != nil {
|
||||
return nil, status.Errorf(codes.NotFound, "Head block of chain was nil: %v", err)
|
||||
}
|
||||
@@ -478,6 +474,5 @@ func (bs *Server) chainHeadRetrieval(ctx context.Context) (*ethpb.ChainHead, err
|
||||
PreviousJustifiedSlot: pjSlot,
|
||||
PreviousJustifiedEpoch: prevJustifiedCheckpoint.Epoch,
|
||||
PreviousJustifiedBlockRoot: prevJustifiedCheckpoint.Root,
|
||||
OptimisticStatus: optimisticStatus,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -426,7 +426,6 @@ func TestServer_GetChainHead_NoGenesis(t *testing.T) {
|
||||
FinalizedCheckPoint: s.FinalizedCheckpoint(),
|
||||
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
|
||||
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
|
||||
OptimisticModeFetcher: &chainMock.ChainService{},
|
||||
}
|
||||
_, err = bs.GetChainHead(context.Background(), nil)
|
||||
require.ErrorContains(t, "Could not get genesis block", err)
|
||||
@@ -462,7 +461,6 @@ func TestServer_GetChainHead_NoFinalizedBlock(t *testing.T) {
|
||||
FinalizedCheckPoint: s.FinalizedCheckpoint(),
|
||||
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
|
||||
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
|
||||
OptimisticModeFetcher: &chainMock.ChainService{},
|
||||
}
|
||||
|
||||
_, err = bs.GetChainHead(context.Background(), nil)
|
||||
@@ -471,8 +469,7 @@ func TestServer_GetChainHead_NoFinalizedBlock(t *testing.T) {
|
||||
|
||||
func TestServer_GetChainHead_NoHeadBlock(t *testing.T) {
|
||||
bs := &Server{
|
||||
HeadFetcher: &chainMock.ChainService{Block: nil},
|
||||
OptimisticModeFetcher: &chainMock.ChainService{},
|
||||
HeadFetcher: &chainMock.ChainService{Block: nil},
|
||||
}
|
||||
_, err := bs.GetChainHead(context.Background(), nil)
|
||||
assert.ErrorContains(t, "Head block of chain was nil", err)
|
||||
@@ -534,9 +531,8 @@ func TestServer_GetChainHead(t *testing.T) {
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
|
||||
OptimisticModeFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
|
||||
FinalizationFetcher: &chainMock.ChainService{
|
||||
FinalizedCheckPoint: s.FinalizedCheckpoint(),
|
||||
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
|
||||
@@ -554,7 +550,6 @@ func TestServer_GetChainHead(t *testing.T) {
|
||||
assert.DeepEqual(t, pjRoot[:], head.PreviousJustifiedBlockRoot, "Unexpected PreviousJustifiedBlockRoot")
|
||||
assert.DeepEqual(t, jRoot[:], head.JustifiedBlockRoot, "Unexpected JustifiedBlockRoot")
|
||||
assert.DeepEqual(t, fRoot[:], head.FinalizedBlockRoot, "Unexpected FinalizedBlockRoot")
|
||||
assert.Equal(t, false, head.OptimisticStatus)
|
||||
}
|
||||
|
||||
func TestServer_StreamChainHead_ContextCanceled(t *testing.T) {
|
||||
@@ -650,7 +645,6 @@ func TestServer_StreamChainHead_OnHeadUpdated(t *testing.T) {
|
||||
FinalizedCheckPoint: s.FinalizedCheckpoint(),
|
||||
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
|
||||
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
|
||||
OptimisticModeFetcher: &chainMock.ChainService{},
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
|
||||
@@ -47,5 +47,4 @@ type Server struct {
|
||||
SyncChecker sync.Checker
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
}
|
||||
|
||||
@@ -516,8 +516,8 @@ func (bs *Server) GetValidatorParticipation(
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", endSlot, err))
|
||||
}
|
||||
var v []*precompute.Validator
|
||||
var b *precompute.Balance
|
||||
var v []*types.Validator
|
||||
var b *types.Balance
|
||||
switch beaconState.Version() {
|
||||
case version.Phase0:
|
||||
v, b, err = precompute.New(ctx, beaconState)
|
||||
@@ -680,7 +680,7 @@ func (bs *Server) GetValidatorPerformance(
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", currSlot, err)
|
||||
}
|
||||
}
|
||||
var validatorSummary []*precompute.Validator
|
||||
var validatorSummary []*types.Validator
|
||||
switch headState.Version() {
|
||||
case version.Phase0:
|
||||
vp, bp, err := precompute.New(ctx, headState)
|
||||
@@ -862,8 +862,8 @@ func (bs *Server) GetIndividualVotes(
|
||||
return filteredIndices[i] < filteredIndices[j]
|
||||
})
|
||||
|
||||
var v []*precompute.Validator
|
||||
var bal *precompute.Balance
|
||||
var v []*types.Validator
|
||||
var bal *types.Balance
|
||||
switch st.Version() {
|
||||
case version.Phase0:
|
||||
v, bal, err = precompute.New(ctx, st)
|
||||
|
||||
@@ -127,10 +127,10 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
|
||||
case errors.As(err, kv.ErrNotFoundFeeRecipient):
|
||||
// If fee recipient is not found in DB and not set from beacon node CLI,
|
||||
// use the burn address.
|
||||
if feeRecipient.String() == params.BeaconConfig().EthBurnAddressHex {
|
||||
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": vIdx,
|
||||
"burnAddress": params.BeaconConfig().EthBurnAddressHex,
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
}).Warn("Fee recipient is currently using the burn address, " +
|
||||
"you will not be rewarded transaction fees on this setting. " +
|
||||
"Please set a different eth address as the fee recipient. " +
|
||||
|
||||
@@ -268,7 +268,6 @@ func (s *Service) Start() {
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
HeadUpdater: s.cfg.HeadUpdater,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
CanonicalFetcher: s.cfg.CanonicalFetcher,
|
||||
|
||||
@@ -292,11 +292,11 @@ func (p *StateProvider) stateRootBySlot(ctx context.Context, slot types.Slot) ([
|
||||
if slot > currentSlot {
|
||||
return nil, errors.New("slot cannot be in the future")
|
||||
}
|
||||
blks, err := p.BeaconDB.BlocksBySlot(ctx, slot)
|
||||
found, blks, err := p.BeaconDB.BlocksBySlot(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get blocks")
|
||||
}
|
||||
if len(blks) == 0 {
|
||||
if !found {
|
||||
return nil, errors.New("no block exists")
|
||||
}
|
||||
if len(blks) != 1 {
|
||||
|
||||
15
beacon-chain/state/core/BUILD.bazel
Normal file
15
beacon-chain/state/core/BUILD.bazel
Normal file
@@ -0,0 +1,15 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["inactivity_scores.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/core",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
],
|
||||
)
|
||||
57
beacon-chain/state/core/inactivity_scores.go
Normal file
57
beacon-chain/state/core/inactivity_scores.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
)
|
||||
|
||||
func ProcessInactivityScores(ctx context.Context,
|
||||
inactivityScores []uint64,
|
||||
currentEpoch, previousEpoch, finalizedEpoch types.Epoch,
|
||||
vals []*types.Validator,
|
||||
) ([]uint64, []*types.Validator, error) {
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
if currentEpoch == cfg.GenesisEpoch {
|
||||
return inactivityScores, vals, nil
|
||||
}
|
||||
|
||||
bias := cfg.InactivityScoreBias
|
||||
recoveryRate := cfg.InactivityScoreRecoveryRate
|
||||
|
||||
var err error
|
||||
for i, v := range vals {
|
||||
if !precompute.EligibleForRewards(v) {
|
||||
continue
|
||||
}
|
||||
|
||||
if v.IsPrevEpochTargetAttester && !v.IsSlashed {
|
||||
// Decrease inactivity score when validator gets target correct.
|
||||
if v.InactivityScore > 0 {
|
||||
v.InactivityScore -= 1
|
||||
}
|
||||
} else {
|
||||
v.InactivityScore, err = math.Add64(v.InactivityScore, bias)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !helpers.IsInInactivityLeak(previousEpoch, finalizedEpoch) {
|
||||
score := recoveryRate
|
||||
// Prevents underflow below 0.
|
||||
if score > v.InactivityScore {
|
||||
score = v.InactivityScore
|
||||
}
|
||||
v.InactivityScore -= score
|
||||
}
|
||||
inactivityScores[i] = v.InactivityScore
|
||||
}
|
||||
|
||||
return inactivityScores, vals, nil
|
||||
}
|
||||
@@ -73,6 +73,7 @@ type WriteOnlyBeaconState interface {
|
||||
WriteOnlyBalances
|
||||
WriteOnlyCheckpoint
|
||||
WriteOnlyAttestations
|
||||
CoreProcessors
|
||||
SetGenesisTime(val uint64) error
|
||||
SetGenesisValidatorsRoot(val []byte) error
|
||||
SetSlot(val types.Slot) error
|
||||
@@ -235,3 +236,9 @@ type FutureForkStub interface {
|
||||
NextSyncCommittee() (*ethpb.SyncCommittee, error)
|
||||
SetNextSyncCommittee(val *ethpb.SyncCommittee) error
|
||||
}
|
||||
|
||||
type CoreProcessors interface {
|
||||
ProcessInactivityScores(ctx context.Context,
|
||||
currentEpoch, previousEpoch, finalizedEpoch types.Epoch,
|
||||
vals []*types.Validator) ([]*types.Validator, error)
|
||||
}
|
||||
|
||||
@@ -57,6 +57,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/core:go_default_library",
|
||||
"//beacon-chain/state/fieldtrie:go_default_library",
|
||||
"//beacon-chain/state/state-native/custom-types:go_default_library",
|
||||
"//beacon-chain/state/state-native/types:go_default_library",
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/core"
|
||||
nativetypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -240,3 +243,26 @@ func (b *BeaconState) SetInactivityScores(val []uint64) error {
|
||||
b.markFieldAsDirty(nativetypes.InactivityScores)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) ProcessInactivityScores(ctx context.Context,
|
||||
currentEpoch, previousEpoch, finalizedEpoch types.Epoch, vals []*types.Validator) ([]*types.Validator, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
if b.version == version.Phase0 {
|
||||
return nil, errNotSupported("ProcessInactivityScores", b.version)
|
||||
}
|
||||
scores := b.inactivityScores
|
||||
if b.sharedFieldReferences[nativetypes.InactivityScores].Refs() > 1 {
|
||||
scores = b.inactivityScoresVal()
|
||||
b.sharedFieldReferences[nativetypes.InactivityScores].MinusRef()
|
||||
b.sharedFieldReferences[nativetypes.InactivityScores] = stateutil.NewRef(1)
|
||||
}
|
||||
var err error
|
||||
scores, vals, err = core.ProcessInactivityScores(ctx, scores, currentEpoch, previousEpoch, finalizedEpoch, vals)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.inactivityScores = scores
|
||||
b.markFieldAsDirty(nativetypes.InactivityScores)
|
||||
return vals, err
|
||||
}
|
||||
|
||||
@@ -199,8 +199,6 @@ func InitializeFromProtoUnsafePhase0(st *ethpb.BeaconState) (state.BeaconState,
|
||||
b.sharedFieldReferences[nativetypes.CurrentEpochAttestations] = stateutil.NewRef(1)
|
||||
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(b, finalizerCleanup)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
@@ -289,8 +287,6 @@ func InitializeFromProtoUnsafeAltair(st *ethpb.BeaconStateAltair) (state.BeaconS
|
||||
b.sharedFieldReferences[nativetypes.InactivityScores] = stateutil.NewRef(1) // New in Altair.
|
||||
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(b, finalizerCleanup)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
@@ -381,8 +377,6 @@ func InitializeFromProtoUnsafeBellatrix(st *ethpb.BeaconStateBellatrix) (state.B
|
||||
b.sharedFieldReferences[nativetypes.LatestExecutionPayloadHeader] = stateutil.NewRef(1) // New in Bellatrix.
|
||||
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(b, finalizerCleanup)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
@@ -501,7 +495,31 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(dst, finalizerCleanup)
|
||||
runtime.SetFinalizer(dst, func(b *BeaconState) {
|
||||
for field, v := range b.sharedFieldReferences {
|
||||
v.MinusRef()
|
||||
if b.stateFieldLeaves[field].FieldReference() != nil {
|
||||
b.stateFieldLeaves[field].FieldReference().MinusRef()
|
||||
}
|
||||
|
||||
}
|
||||
for i := range b.dirtyFields {
|
||||
delete(b.dirtyFields, i)
|
||||
}
|
||||
for i := range b.rebuildTrie {
|
||||
delete(b.rebuildTrie, i)
|
||||
}
|
||||
for i := range b.dirtyIndices {
|
||||
delete(b.dirtyIndices, i)
|
||||
}
|
||||
for i := range b.sharedFieldReferences {
|
||||
delete(b.sharedFieldReferences, i)
|
||||
}
|
||||
for i := range b.stateFieldLeaves {
|
||||
delete(b.stateFieldLeaves, i)
|
||||
}
|
||||
state.StateCount.Sub(1)
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
@@ -774,29 +792,3 @@ func (b *BeaconState) resetFieldTrie(index nativetypes.FieldIndex, elements inte
|
||||
b.dirtyIndices[index] = []uint64{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func finalizerCleanup(b *BeaconState) {
|
||||
for field, v := range b.sharedFieldReferences {
|
||||
v.MinusRef()
|
||||
if b.stateFieldLeaves[field].FieldReference() != nil {
|
||||
b.stateFieldLeaves[field].FieldReference().MinusRef()
|
||||
}
|
||||
|
||||
}
|
||||
for i := range b.dirtyFields {
|
||||
delete(b.dirtyFields, i)
|
||||
}
|
||||
for i := range b.rebuildTrie {
|
||||
delete(b.rebuildTrie, i)
|
||||
}
|
||||
for i := range b.dirtyIndices {
|
||||
delete(b.dirtyIndices, i)
|
||||
}
|
||||
for i := range b.sharedFieldReferences {
|
||||
delete(b.sharedFieldReferences, i)
|
||||
}
|
||||
for i := range b.stateFieldLeaves {
|
||||
delete(b.stateFieldLeaves, i)
|
||||
}
|
||||
state.StateCount.Sub(1)
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -157,9 +157,10 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.loadStateByRoot")
|
||||
defer span.End()
|
||||
|
||||
val := ctx.Value("migrate")
|
||||
// First, it checks if the state exists in hot state cache.
|
||||
cachedState := s.hotStateCache.get(blockRoot)
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
if cachedState != nil && !cachedState.IsNil() && val == nil {
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
@@ -168,7 +169,7 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
if ok && val == nil {
|
||||
return cachedInfo.state, nil
|
||||
}
|
||||
|
||||
@@ -222,6 +223,7 @@ func (s *State) LastAncestorState(ctx context.Context, blockRoot [32]byte) (stat
|
||||
if s.isFinalizedRoot(blockRoot) && s.finalizedState() != nil {
|
||||
return s.finalizedState(), nil
|
||||
}
|
||||
val := ctx.Value("migrate")
|
||||
|
||||
b, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
@@ -248,7 +250,7 @@ func (s *State) LastAncestorState(ctx context.Context, blockRoot [32]byte) (stat
|
||||
return nil, errors.Wrapf(ErrNoDataForSlot, "slot %d not in db due to checkpoint sync", ps)
|
||||
}
|
||||
// Does the state exist in the hot state cache.
|
||||
if s.hotStateCache.has(parentRoot) {
|
||||
if s.hotStateCache.has(parentRoot) && val == nil {
|
||||
return s.hotStateCache.get(parentRoot), nil
|
||||
}
|
||||
|
||||
@@ -262,7 +264,7 @@ func (s *State) LastAncestorState(ctx context.Context, blockRoot [32]byte) (stat
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
if ok && val == nil {
|
||||
return cachedInfo.state, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -46,58 +46,73 @@ func (c *CanonicalHistory) ReplayerForSlot(target types.Slot) Replayer {
|
||||
return &stateReplayer{chainer: c, method: forSlot, target: target}
|
||||
}
|
||||
|
||||
func (c *CanonicalHistory) BlockRootForSlot(ctx context.Context, target types.Slot) ([32]byte, error) {
|
||||
if currentSlot := c.cs.CurrentSlot(); target > currentSlot {
|
||||
return [32]byte{}, errors.Wrap(ErrFutureSlotRequested, fmt.Sprintf("requested=%d, current=%d", target, currentSlot))
|
||||
func (c *CanonicalHistory) BlockForSlot(ctx context.Context, target types.Slot) ([32]byte, interfaces.SignedBeaconBlock, error) {
|
||||
currentSlot := c.cs.CurrentSlot()
|
||||
if target > currentSlot {
|
||||
return [32]byte{}, nil, errors.Wrap(ErrFutureSlotRequested, fmt.Sprintf("requested=%d, current=%d", target, currentSlot))
|
||||
}
|
||||
|
||||
slotAbove := target + 1
|
||||
// don't bother searching for candidate roots when we know the target slot is genesis
|
||||
for slotAbove > 1 {
|
||||
for target > 0 {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, errors.Wrap(ctx.Err(), "context canceled during canonicalBlockForSlot")
|
||||
return [32]byte{}, nil, errors.Wrap(ctx.Err(), "context canceled during canonicalBlockForSlot")
|
||||
}
|
||||
slot, roots, err := c.h.HighestRootsBelowSlot(ctx, slotAbove)
|
||||
hbs, err := c.h.HighestSlotBlocksBelow(ctx, target+1)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, fmt.Sprintf("error finding highest block w/ slot < %d", slotAbove))
|
||||
return [32]byte{}, nil, errors.Wrap(err, fmt.Sprintf("error finding highest block w/ slot <= %d", target))
|
||||
}
|
||||
if len(roots) == 0 {
|
||||
return [32]byte{}, errors.Wrap(ErrNoBlocksBelowSlot, fmt.Sprintf("slot=%d", slotAbove))
|
||||
if len(hbs) == 0 {
|
||||
return [32]byte{}, nil, errors.Wrap(ErrNoBlocksBelowSlot, fmt.Sprintf("slot=%d", target))
|
||||
}
|
||||
r, err := c.bestForSlot(ctx, roots)
|
||||
r, b, err := c.bestForSlot(ctx, hbs)
|
||||
if err == nil {
|
||||
// we found a valid, canonical block!
|
||||
return r, nil
|
||||
return r, b, nil
|
||||
}
|
||||
|
||||
// we found a block, but it wasn't considered canonical - keep looking
|
||||
if errors.Is(err, ErrNoCanonicalBlockForSlot) {
|
||||
// break once we've seen slot 0 (and prevent underflow)
|
||||
if slot == params.BeaconConfig().GenesisSlot {
|
||||
if hbs[0].Block().Slot() == params.BeaconConfig().GenesisSlot {
|
||||
break
|
||||
}
|
||||
slotAbove = slot
|
||||
target = hbs[0].Block().Slot() - 1
|
||||
continue
|
||||
}
|
||||
return [32]byte{}, err
|
||||
return [32]byte{}, nil, err
|
||||
}
|
||||
|
||||
return c.h.GenesisBlockRoot(ctx)
|
||||
b, err := c.h.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrap(err, "db error while retrieving genesis block")
|
||||
}
|
||||
root, _, err := c.bestForSlot(ctx, []interfaces.SignedBeaconBlock{b})
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrap(err, "problem retrieving genesis block")
|
||||
}
|
||||
return root, b, nil
|
||||
}
|
||||
|
||||
// bestForSlot encapsulates several messy realities of the underlying db code, looping through multiple blocks,
|
||||
// performing null/validity checks, and using CanonicalChecker to only pick canonical blocks.
|
||||
func (c *CanonicalHistory) bestForSlot(ctx context.Context, roots [][32]byte) ([32]byte, error) {
|
||||
for _, root := range roots {
|
||||
func (c *CanonicalHistory) bestForSlot(ctx context.Context, hbs []interfaces.SignedBeaconBlock) ([32]byte, interfaces.SignedBeaconBlock, error) {
|
||||
for _, b := range hbs {
|
||||
if wrapper.BeaconBlockIsNil(b) != nil {
|
||||
continue
|
||||
}
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
// use this error message to wrap a sentinel error for error type matching
|
||||
wrapped := errors.Wrap(ErrInvalidDBBlock, err.Error())
|
||||
msg := fmt.Sprintf("could not compute hash_tree_root for block at slot=%d", b.Block().Slot())
|
||||
return [32]byte{}, nil, errors.Wrap(wrapped, msg)
|
||||
}
|
||||
canon, err := c.cc.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "replayer could not check if block is canonical")
|
||||
return [32]byte{}, nil, errors.Wrap(err, "replayer could not check if block is canonical")
|
||||
}
|
||||
if canon {
|
||||
return root, nil
|
||||
return root, b, nil
|
||||
}
|
||||
}
|
||||
return [32]byte{}, errors.Wrap(ErrNoCanonicalBlockForSlot, "no good block for slot")
|
||||
return [32]byte{}, nil, errors.Wrap(ErrNoCanonicalBlockForSlot, "no good block for slot")
|
||||
}
|
||||
|
||||
// ChainForSlot creates a value that satisfies the Replayer interface via db queries
|
||||
@@ -107,13 +122,9 @@ func (c *CanonicalHistory) bestForSlot(ctx context.Context, roots [][32]byte) ([
|
||||
func (c *CanonicalHistory) chainForSlot(ctx context.Context, target types.Slot) (state.BeaconState, []interfaces.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "canonicalChainer.chainForSlot")
|
||||
defer span.End()
|
||||
r, err := c.BlockRootForSlot(ctx, target)
|
||||
_, b, err := c.BlockForSlot(ctx, target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "no canonical block root found below slot=%d", target)
|
||||
}
|
||||
b, err := c.h.Block(ctx, r)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to retrieve canonical block for slot, root=%#x", r)
|
||||
return nil, nil, errors.Wrap(err, fmt.Sprintf("unable to find replay data for slot=%d", target))
|
||||
}
|
||||
s, descendants, err := c.ancestorChain(ctx, b)
|
||||
if err != nil {
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
@@ -19,7 +21,7 @@ func TestBlockForSlotFuture(t *testing.T) {
|
||||
ch := &CanonicalHistory{
|
||||
cs: &mockCurrentSlotter{Slot: 0},
|
||||
}
|
||||
_, err := ch.BlockRootForSlot(context.Background(), 1)
|
||||
_, _, err := ch.BlockForSlot(context.Background(), 1)
|
||||
require.ErrorIs(t, err, ErrFutureSlotRequested)
|
||||
}
|
||||
|
||||
@@ -32,54 +34,84 @@ func TestChainForSlotFuture(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBestForSlot(t *testing.T) {
|
||||
nilBlock, err := wrapper.WrappedSignedBeaconBlock(ðpb.SignedBeaconBlock{})
|
||||
require.NoError(t, err)
|
||||
nilBody, err := wrapper.WrappedSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}})
|
||||
require.NoError(t, err)
|
||||
derp := errors.New("fake hash tree root method no hash good")
|
||||
badHTR := &mock.SignedBeaconBlock{BeaconBlock: &mock.BeaconBlock{HtrErr: derp, BeaconBlockBody: &mock.BeaconBlockBody{}}}
|
||||
var goodHTR [32]byte
|
||||
copy(goodHTR[:], []byte{23})
|
||||
var betterHTR [32]byte
|
||||
copy(betterHTR[:], []byte{42})
|
||||
good := &mock.SignedBeaconBlock{BeaconBlock: &mock.BeaconBlock{BeaconBlockBody: &mock.BeaconBlockBody{}, Htr: goodHTR}}
|
||||
better := &mock.SignedBeaconBlock{BeaconBlock: &mock.BeaconBlock{BeaconBlockBody: &mock.BeaconBlockBody{}, Htr: betterHTR}}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
blocks []interfaces.SignedBeaconBlock
|
||||
roots [][32]byte
|
||||
best interfaces.SignedBeaconBlock
|
||||
root [32]byte
|
||||
cc CanonicalChecker
|
||||
}{
|
||||
{
|
||||
name: "empty list",
|
||||
err: ErrNoCanonicalBlockForSlot,
|
||||
roots: [][32]byte{},
|
||||
name: "empty list",
|
||||
err: ErrNoCanonicalBlockForSlot,
|
||||
blocks: []interfaces.SignedBeaconBlock{},
|
||||
},
|
||||
{
|
||||
name: "IsCanonical fail",
|
||||
roots: [][32]byte{goodHTR, betterHTR},
|
||||
cc: &mockCanonicalChecker{is: true, err: derp},
|
||||
err: derp,
|
||||
name: "empty SignedBeaconBlock",
|
||||
err: ErrNoCanonicalBlockForSlot,
|
||||
blocks: []interfaces.SignedBeaconBlock{nil},
|
||||
},
|
||||
{
|
||||
name: "all non-canonical",
|
||||
err: ErrNoCanonicalBlockForSlot,
|
||||
roots: [][32]byte{goodHTR, betterHTR},
|
||||
cc: &mockCanonicalChecker{is: false},
|
||||
name: "empty BeaconBlock",
|
||||
err: ErrNoCanonicalBlockForSlot,
|
||||
blocks: []interfaces.SignedBeaconBlock{nilBlock},
|
||||
},
|
||||
{
|
||||
name: "one canonical",
|
||||
cc: &mockCanonicalChecker{is: true},
|
||||
root: goodHTR,
|
||||
roots: [][32]byte{goodHTR},
|
||||
name: "empty BeaconBlockBody",
|
||||
err: ErrNoCanonicalBlockForSlot,
|
||||
blocks: []interfaces.SignedBeaconBlock{nilBody},
|
||||
},
|
||||
{
|
||||
name: "all canonical",
|
||||
cc: &mockCanonicalChecker{is: true},
|
||||
root: betterHTR,
|
||||
roots: [][32]byte{betterHTR, goodHTR},
|
||||
name: "bad HTR",
|
||||
err: ErrInvalidDBBlock,
|
||||
blocks: []interfaces.SignedBeaconBlock{badHTR},
|
||||
},
|
||||
{
|
||||
name: "first wins",
|
||||
cc: &mockCanonicalChecker{is: true},
|
||||
root: goodHTR,
|
||||
roots: [][32]byte{goodHTR, betterHTR},
|
||||
name: "IsCanonical fail",
|
||||
blocks: []interfaces.SignedBeaconBlock{good, better},
|
||||
cc: &mockCanonicalChecker{is: true, err: derp},
|
||||
err: derp,
|
||||
},
|
||||
{
|
||||
name: "all non-canonical",
|
||||
err: ErrNoCanonicalBlockForSlot,
|
||||
blocks: []interfaces.SignedBeaconBlock{good, better},
|
||||
cc: &mockCanonicalChecker{is: false},
|
||||
},
|
||||
{
|
||||
name: "one canonical",
|
||||
blocks: []interfaces.SignedBeaconBlock{good},
|
||||
cc: &mockCanonicalChecker{is: true},
|
||||
root: goodHTR,
|
||||
best: good,
|
||||
},
|
||||
{
|
||||
name: "all canonical",
|
||||
blocks: []interfaces.SignedBeaconBlock{better, good},
|
||||
cc: &mockCanonicalChecker{is: true},
|
||||
root: betterHTR,
|
||||
best: better,
|
||||
},
|
||||
{
|
||||
name: "first wins",
|
||||
blocks: []interfaces.SignedBeaconBlock{good, better},
|
||||
cc: &mockCanonicalChecker{is: true},
|
||||
root: goodHTR,
|
||||
best: good,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -89,9 +121,10 @@ func TestBestForSlot(t *testing.T) {
|
||||
chk = c.cc
|
||||
}
|
||||
ch := &CanonicalHistory{cc: chk}
|
||||
r, err := ch.bestForSlot(context.Background(), c.roots)
|
||||
r, b, err := ch.bestForSlot(context.Background(), c.blocks)
|
||||
if c.err == nil {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, c.best, b)
|
||||
require.Equal(t, c.root, r)
|
||||
} else {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
@@ -131,11 +164,13 @@ func TestCanonicalBlockForSlotHappy(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
_, rs, err := hist.HighestRootsBelowSlot(ctx, c.slot+1)
|
||||
bs, err := hist.HighestSlotBlocksBelow(ctx, c.slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(rs), 1)
|
||||
require.Equal(t, hist.slotMap[c.highest], rs[0])
|
||||
cr, err := ch.BlockRootForSlot(ctx, c.slot)
|
||||
require.Equal(t, len(bs), 1)
|
||||
r, err := bs[0].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, hist.slotMap[c.highest], r)
|
||||
cr, _, err := ch.BlockForSlot(ctx, c.slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, hist.slotMap[c.canon], cr)
|
||||
})
|
||||
@@ -152,49 +187,47 @@ func TestCanonicalBlockForSlotNonHappy(t *testing.T) {
|
||||
}
|
||||
hist := newMockHistory(t, specs, end+1)
|
||||
|
||||
genesis, err := hist.GenesisBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
slotOrderObserved := make([]types.Slot, 0)
|
||||
derp := errors.New("HighestRootsBelowSlot don't work")
|
||||
derp := errors.New("HighestSlotBlocksBelow don't work")
|
||||
// since only the end block and genesis are canonical, once the slot drops below
|
||||
// end, we should always get genesis
|
||||
cases := []struct {
|
||||
name string
|
||||
slot types.Slot
|
||||
canon CanonicalChecker
|
||||
overrideHighest func(context.Context, types.Slot) (types.Slot, [][32]byte, error)
|
||||
overrideHighest func(context.Context, types.Slot) ([]interfaces.SignedBeaconBlock, error)
|
||||
slotOrderExpected []types.Slot
|
||||
err error
|
||||
root [32]byte
|
||||
}{
|
||||
{
|
||||
name: "HighestRootsBelowSlot not called for genesis",
|
||||
overrideHighest: func(_ context.Context, _ types.Slot) (types.Slot, [][32]byte, error) {
|
||||
return 0, [][32]byte{}, derp
|
||||
name: "HighestSlotBlocksBelow not called for genesis",
|
||||
overrideHighest: func(_ context.Context, _ types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
return nil, derp
|
||||
},
|
||||
root: hist.slotMap[0],
|
||||
},
|
||||
{
|
||||
name: "wrapped error from HighestRootsBelowSlot returned",
|
||||
name: "wrapped error from HighestSlotBlocksBelow returned",
|
||||
err: derp,
|
||||
overrideHighest: func(_ context.Context, _ types.Slot) (types.Slot, [][32]byte, error) {
|
||||
return 0, [][32]byte{}, derp
|
||||
overrideHighest: func(_ context.Context, _ types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
return nil, derp
|
||||
},
|
||||
slot: end,
|
||||
},
|
||||
{
|
||||
name: "HighestRootsBelowSlot empty list",
|
||||
name: "HighestSlotBlocksBelow empty list",
|
||||
err: ErrNoBlocksBelowSlot,
|
||||
overrideHighest: func(_ context.Context, _ types.Slot) (types.Slot, [][32]byte, error) {
|
||||
return 0, [][32]byte{}, nil
|
||||
overrideHighest: func(_ context.Context, _ types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
return []interfaces.SignedBeaconBlock{}, nil
|
||||
},
|
||||
slot: end,
|
||||
},
|
||||
{
|
||||
name: "HighestRootsBelowSlot no canonical",
|
||||
name: "HighestSlotBlocksBelow no canonical",
|
||||
err: ErrNoCanonicalBlockForSlot,
|
||||
canon: &mockCanonicalChecker{is: false},
|
||||
slot: end,
|
||||
root: genesis,
|
||||
},
|
||||
{
|
||||
name: "slot ordering correct - only genesis canonical",
|
||||
@@ -204,11 +237,11 @@ func TestCanonicalBlockForSlotNonHappy(t *testing.T) {
|
||||
}
|
||||
return false, nil
|
||||
}},
|
||||
overrideHighest: func(_ context.Context, s types.Slot) (types.Slot, [][32]byte, error) {
|
||||
overrideHighest: func(_ context.Context, s types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
slotOrderObserved = append(slotOrderObserved, s)
|
||||
// this allows the mock HighestRootsBelowSlot to continue to execute now that we've recorded
|
||||
// this allows the mock HighestSlotBlocksBelow to continue to execute now that we've recorded
|
||||
// the slot in our channel
|
||||
return 0, nil, errFallThroughOverride
|
||||
return nil, errFallThroughOverride
|
||||
},
|
||||
slotOrderExpected: []types.Slot{156, 155, 150, 100},
|
||||
slot: end,
|
||||
@@ -222,11 +255,11 @@ func TestCanonicalBlockForSlotNonHappy(t *testing.T) {
|
||||
}
|
||||
return false, nil
|
||||
}},
|
||||
overrideHighest: func(_ context.Context, s types.Slot) (types.Slot, [][32]byte, error) {
|
||||
overrideHighest: func(_ context.Context, s types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
slotOrderObserved = append(slotOrderObserved, s)
|
||||
// this allows the mock HighestRootsBelowSlot to continue to execute now that we've recorded
|
||||
// this allows the mock HighestSlotBlocksBelow to continue to execute now that we've recorded
|
||||
// the slot in our channel
|
||||
return 0, nil, errFallThroughOverride
|
||||
return nil, errFallThroughOverride
|
||||
},
|
||||
slotOrderExpected: []types.Slot{156, 155, 150},
|
||||
slot: end,
|
||||
@@ -241,14 +274,14 @@ func TestCanonicalBlockForSlotNonHappy(t *testing.T) {
|
||||
}
|
||||
ch := &CanonicalHistory{h: hist, cc: canon, cs: hist}
|
||||
hist.overrideHighestSlotBlocksBelow = c.overrideHighest
|
||||
r, err := ch.BlockRootForSlot(ctx, c.slot)
|
||||
r, _, err := ch.BlockForSlot(ctx, c.slot)
|
||||
if c.err == nil {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
}
|
||||
if len(c.slotOrderExpected) > 0 {
|
||||
require.Equal(t, len(c.slotOrderExpected), len(slotOrderObserved), "HighestRootsBelowSlot not called the expected number of times")
|
||||
require.Equal(t, len(c.slotOrderExpected), len(slotOrderObserved), "HighestSlotBlocksBelow not called the expected number of times")
|
||||
for i := range c.slotOrderExpected {
|
||||
require.Equal(t, c.slotOrderExpected[i], slotOrderObserved[i])
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package stategen
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -39,11 +38,10 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
}
|
||||
|
||||
if slot%s.slotsPerArchivedPoint == 0 && slot != 0 {
|
||||
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get epoch boundary state for slot %d", slot)
|
||||
cached, exists := &rootStateInfo{}, false
|
||||
if s.slotsPerArchivedPoint == 10000 {
|
||||
exists = true
|
||||
}
|
||||
|
||||
var aRoot [32]byte
|
||||
var aState state.BeaconState
|
||||
|
||||
@@ -55,20 +53,25 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
aRoot = cached.root
|
||||
aState = cached.state
|
||||
} else {
|
||||
_, roots, err := s.beaconDB.HighestRootsBelowSlot(ctx, slot)
|
||||
blks, err := s.beaconDB.HighestSlotBlocksBelow(ctx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Given the block has been finalized, the db should not have more than one block in a given slot.
|
||||
// We should error out when this happens.
|
||||
if len(roots) != 1 {
|
||||
if len(blks) != 1 {
|
||||
return errUnknownBlock
|
||||
}
|
||||
aRoot = roots[0]
|
||||
missingRoot, err := blks[0].Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
aRoot = missingRoot
|
||||
// There's no need to generate the state if the state already exists in the DB.
|
||||
// We can skip saving the state.
|
||||
if !s.beaconDB.HasState(ctx, aRoot) {
|
||||
aState, err = s.StateByRoot(ctx, aRoot)
|
||||
ctx = context.WithValue(ctx, "migrate", "yes")
|
||||
aState, err = s.StateByRoot(ctx, missingRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ type mockHistory struct {
|
||||
states map[[32]byte]state.BeaconState
|
||||
hiddenStates map[[32]byte]state.BeaconState
|
||||
current types.Slot
|
||||
overrideHighestSlotBlocksBelow func(context.Context, types.Slot) (types.Slot, [][32]byte, error)
|
||||
overrideHighestSlotBlocksBelow func(context.Context, types.Slot) ([]interfaces.SignedBeaconBlock, error)
|
||||
}
|
||||
|
||||
type slotList []types.Slot
|
||||
@@ -98,13 +98,13 @@ func (m slotList) Swap(i, j int) {
|
||||
m[i], m[j] = m[j], m[i]
|
||||
}
|
||||
|
||||
var errFallThroughOverride = errors.New("override yielding control back to real HighestRootsBelowSlot")
|
||||
var errFallThroughOverride = errors.New("override yielding control back to real HighestSlotBlocksBelow")
|
||||
|
||||
func (m *mockHistory) HighestRootsBelowSlot(_ context.Context, slot types.Slot) (types.Slot, [][32]byte, error) {
|
||||
func (m *mockHistory) HighestSlotBlocksBelow(_ context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
if m.overrideHighestSlotBlocksBelow != nil {
|
||||
s, r, err := m.overrideHighestSlotBlocksBelow(context.Background(), slot)
|
||||
s, err := m.overrideHighestSlotBlocksBelow(context.Background(), slot)
|
||||
if !errors.Is(err, errFallThroughOverride) {
|
||||
return s, r, err
|
||||
return s, err
|
||||
}
|
||||
}
|
||||
if len(m.slotIndex) == 0 && len(m.slotMap) > 0 {
|
||||
@@ -115,20 +115,20 @@ func (m *mockHistory) HighestRootsBelowSlot(_ context.Context, slot types.Slot)
|
||||
}
|
||||
for _, s := range m.slotIndex {
|
||||
if s < slot {
|
||||
return s, [][32]byte{m.slotMap[s]}, nil
|
||||
return []interfaces.SignedBeaconBlock{m.blocks[m.slotMap[s]]}, nil
|
||||
}
|
||||
}
|
||||
return 0, [][32]byte{}, nil
|
||||
return []interfaces.SignedBeaconBlock{}, nil
|
||||
}
|
||||
|
||||
var errGenesisBlockNotFound = errors.New("canonical genesis block not found in db")
|
||||
|
||||
func (m *mockHistory) GenesisBlockRoot(_ context.Context) ([32]byte, error) {
|
||||
func (m *mockHistory) GenesisBlock(_ context.Context) (interfaces.SignedBeaconBlock, error) {
|
||||
genesisRoot, ok := m.slotMap[0]
|
||||
if !ok {
|
||||
return [32]byte{}, errGenesisBlockNotFound
|
||||
return nil, errGenesisBlockNotFound
|
||||
}
|
||||
return genesisRoot, nil
|
||||
return m.blocks[genesisRoot], nil
|
||||
}
|
||||
|
||||
func (m *mockHistory) Block(_ context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error) {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
@@ -41,6 +42,11 @@ func (_ *State) ReplayBlocks(
|
||||
"endSlot": targetSlot,
|
||||
"diff": targetSlot - state.Slot(),
|
||||
}).Debug("Replaying state")
|
||||
val := ctx.Value("migrate")
|
||||
if val != nil {
|
||||
log.Debug("Migrating state and replaying blocks")
|
||||
}
|
||||
gen := rand.NewDeterministicGenerator()
|
||||
// The input block list is sorted in decreasing slots order.
|
||||
if len(signed) > 0 {
|
||||
for i := len(signed) - 1; i >= 0; i-- {
|
||||
@@ -54,6 +60,10 @@ func (_ *State) ReplayBlocks(
|
||||
if state.Slot() >= signed[i].Block().Slot() {
|
||||
continue
|
||||
}
|
||||
if val != nil {
|
||||
nm := gen.Int63n(5500)
|
||||
time.Sleep(time.Duration(nm) * time.Millisecond)
|
||||
}
|
||||
state, err = executeStateTransitionStateGen(ctx, state, signed[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -143,9 +153,6 @@ func executeStateTransitionStateGen(
|
||||
state state.BeaconState,
|
||||
signed interfaces.SignedBeaconBlock,
|
||||
) (state.BeaconState, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
if err := wrapper.BeaconBlockIsNil(signed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -27,8 +27,8 @@ const (
|
||||
|
||||
// HistoryAccessor describes the minimum set of database methods needed to support the ReplayerBuilder.
|
||||
type HistoryAccessor interface {
|
||||
HighestRootsBelowSlot(ctx context.Context, slot types.Slot) (types.Slot, [][32]byte, error)
|
||||
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error)
|
||||
GenesisBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error)
|
||||
Block(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error)
|
||||
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/transition/stateutils:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -4,10 +4,12 @@ import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -84,34 +86,26 @@ func Uint64ListRootWithRegistryLimit(balances []uint64) ([32]byte, error) {
|
||||
|
||||
// PackUint64IntoChunks packs a list of uint64 values into 32 byte roots.
|
||||
func PackUint64IntoChunks(vals []uint64) ([][32]byte, error) {
|
||||
// Initialize how many uint64 values we can pack
|
||||
// into a single chunk(32 bytes). Each uint64 value
|
||||
// would take up 8 bytes.
|
||||
numOfElems := 4
|
||||
sizeOfElem := 32 / numOfElems
|
||||
// Determine total number of chunks to be
|
||||
// allocated to provided list of unsigned
|
||||
// 64-bit integers.
|
||||
numOfChunks := len(vals) / numOfElems
|
||||
// Add an extra chunk if the list size
|
||||
// is not a perfect multiple of the number
|
||||
// of elements.
|
||||
if len(vals)%numOfElems != 0 {
|
||||
numOfElems, err := types.Balances.ElemsInChunk()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iNumOfElems, err := pmath.Int(numOfElems)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
numOfChunks := len(vals) / iNumOfElems
|
||||
if len(vals)%iNumOfElems != 0 {
|
||||
numOfChunks++
|
||||
}
|
||||
chunkList := make([][32]byte, numOfChunks)
|
||||
balanceChunks := make([][32]byte, numOfChunks)
|
||||
for idx, b := range vals {
|
||||
// In order to determine how to pack in the uint64 value by index into
|
||||
// our chunk list we need to determine a few things.
|
||||
// 1) The chunk which the particular uint64 value corresponds to.
|
||||
// 2) The position of the value in the chunk itself.
|
||||
//
|
||||
// Once we have determined these 2 values we can simply find the correct
|
||||
// section of contiguous bytes to insert the value in the chunk.
|
||||
chunkIdx := idx / numOfElems
|
||||
idxInChunk := idx % numOfElems
|
||||
chunkPos := idxInChunk * sizeOfElem
|
||||
binary.LittleEndian.PutUint64(chunkList[chunkIdx][chunkPos:chunkPos+sizeOfElem], b)
|
||||
startIdx := idx / iNumOfElems
|
||||
chunkIdx := idx % iNumOfElems
|
||||
sizeOfElem := 32 / iNumOfElems
|
||||
chunkPos := chunkIdx * sizeOfElem
|
||||
binary.LittleEndian.PutUint64(balanceChunks[startIdx][chunkPos:chunkPos+8], b)
|
||||
}
|
||||
return chunkList, nil
|
||||
|
||||
return balanceChunks, nil
|
||||
}
|
||||
|
||||
@@ -77,8 +77,6 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconState) (state.BeaconState, error)
|
||||
b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1)
|
||||
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(b, finalizerCleanup)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
@@ -176,7 +174,24 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(dst, finalizerCleanup)
|
||||
runtime.SetFinalizer(dst, func(b *BeaconState) {
|
||||
for field, v := range b.sharedFieldReferences {
|
||||
v.MinusRef()
|
||||
if b.stateFieldLeaves[field].FieldReference() != nil {
|
||||
b.stateFieldLeaves[field].FieldReference().MinusRef()
|
||||
}
|
||||
|
||||
}
|
||||
for i := 0; i < fieldCount; i++ {
|
||||
field := types.FieldIndex(i)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
delete(b.dirtyIndices, field)
|
||||
delete(b.dirtyFields, field)
|
||||
delete(b.sharedFieldReferences, field)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
}
|
||||
state.StateCount.Sub(1)
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
@@ -424,22 +439,3 @@ func (b *BeaconState) resetFieldTrie(index types.FieldIndex, elements interface{
|
||||
b.dirtyIndices[index] = []uint64{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func finalizerCleanup(b *BeaconState) {
|
||||
fieldCount := params.BeaconConfig().BeaconStateFieldCount
|
||||
for field, v := range b.sharedFieldReferences {
|
||||
v.MinusRef()
|
||||
if b.stateFieldLeaves[field].FieldReference() != nil {
|
||||
b.stateFieldLeaves[field].FieldReference().MinusRef()
|
||||
}
|
||||
}
|
||||
for i := 0; i < fieldCount; i++ {
|
||||
field := types.FieldIndex(i)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
delete(b.dirtyIndices, field)
|
||||
delete(b.dirtyFields, field)
|
||||
delete(b.sharedFieldReferences, field)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
}
|
||||
state.StateCount.Sub(1)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -59,3 +62,8 @@ func (*BeaconState) SetInactivityScores(_ []uint64) error {
|
||||
func (*BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error {
|
||||
return errors.New("SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
func (b *BeaconState) ProcessInactivityScores(ctx context.Context,
|
||||
currentEpoch, previousEpoch, finalizedEpoch types.Epoch, vals []*types.Validator) ([]*types.Validator, error) {
|
||||
return nil, errors.New("ProcessInactivityScores is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/core:go_default_library",
|
||||
"//beacon-chain/state/fieldtrie:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -263,3 +266,26 @@ func (b *BeaconState) SetInactivityScores(val []uint64) error {
|
||||
b.markFieldAsDirty(inactivityScores)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) ProcessInactivityScores(ctx context.Context,
|
||||
currentEpoch, previousEpoch, finalizedEpoch types.Epoch, vals []*types.Validator) ([]*types.Validator, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
scores := b.state.InactivityScores
|
||||
if b.sharedFieldReferences[inactivityScores].Refs() > 1 {
|
||||
scores = b.inactivityScores()
|
||||
b.sharedFieldReferences[inactivityScores].MinusRef()
|
||||
b.sharedFieldReferences[inactivityScores] = stateutil.NewRef(1)
|
||||
}
|
||||
var err error
|
||||
scores, vals, err = core.ProcessInactivityScores(ctx, scores, currentEpoch, previousEpoch, finalizedEpoch, vals)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.state.InactivityScores = scores
|
||||
b.markFieldAsDirty(inactivityScores)
|
||||
return vals, err
|
||||
}
|
||||
|
||||
@@ -78,8 +78,6 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateAltair) (state.BeaconState,
|
||||
b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1)
|
||||
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(b, finalizerCleanup)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
@@ -181,7 +179,23 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(dst, finalizerCleanup)
|
||||
runtime.SetFinalizer(dst, func(b *BeaconState) {
|
||||
for field, v := range b.sharedFieldReferences {
|
||||
v.MinusRef()
|
||||
if b.stateFieldLeaves[field].FieldReference() != nil {
|
||||
b.stateFieldLeaves[field].FieldReference().MinusRef()
|
||||
}
|
||||
}
|
||||
for i := 0; i < fieldCount; i++ {
|
||||
field := types.FieldIndex(i)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
delete(b.dirtyIndices, field)
|
||||
delete(b.dirtyFields, field)
|
||||
delete(b.sharedFieldReferences, field)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
}
|
||||
state.StateCount.Sub(1)
|
||||
})
|
||||
|
||||
return dst
|
||||
}
|
||||
@@ -412,22 +426,3 @@ func (b *BeaconState) resetFieldTrie(index types.FieldIndex, elements interface{
|
||||
b.dirtyIndices[index] = []uint64{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func finalizerCleanup(b *BeaconState) {
|
||||
fieldCount := params.BeaconConfig().BeaconStateAltairFieldCount
|
||||
for field, v := range b.sharedFieldReferences {
|
||||
v.MinusRef()
|
||||
if b.stateFieldLeaves[field].FieldReference() != nil {
|
||||
b.stateFieldLeaves[field].FieldReference().MinusRef()
|
||||
}
|
||||
}
|
||||
for i := 0; i < fieldCount; i++ {
|
||||
field := types.FieldIndex(i)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
delete(b.dirtyIndices, field)
|
||||
delete(b.dirtyFields, field)
|
||||
delete(b.sharedFieldReferences, field)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
}
|
||||
state.StateCount.Sub(1)
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
go test fuzz v1
|
||||
[]byte("")
|
||||
uint64(117)
|
||||
@@ -37,6 +37,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/core:go_default_library",
|
||||
"//beacon-chain/state/fieldtrie:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -263,3 +266,26 @@ func (b *BeaconState) SetInactivityScores(val []uint64) error {
|
||||
b.markFieldAsDirty(inactivityScores)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) ProcessInactivityScores(ctx context.Context,
|
||||
currentEpoch, previousEpoch, finalizedEpoch types.Epoch, vals []*types.Validator) ([]*types.Validator, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
scores := b.state.InactivityScores
|
||||
if b.sharedFieldReferences[inactivityScores].Refs() > 1 {
|
||||
scores = b.inactivityScores()
|
||||
b.sharedFieldReferences[inactivityScores].MinusRef()
|
||||
b.sharedFieldReferences[inactivityScores] = stateutil.NewRef(1)
|
||||
}
|
||||
var err error
|
||||
scores, vals, err = core.ProcessInactivityScores(ctx, scores, currentEpoch, previousEpoch, finalizedEpoch, vals)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.state.InactivityScores = scores
|
||||
b.markFieldAsDirty(inactivityScores)
|
||||
return vals, err
|
||||
}
|
||||
|
||||
@@ -78,8 +78,6 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateBellatrix) (state.BeaconStat
|
||||
b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[latestExecutionPayloadHeader] = stateutil.NewRef(1) // New in Bellatrix.
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(b, finalizerCleanup)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
@@ -181,7 +179,23 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
}
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(dst, finalizerCleanup)
|
||||
runtime.SetFinalizer(dst, func(b *BeaconState) {
|
||||
for field, v := range b.sharedFieldReferences {
|
||||
v.MinusRef()
|
||||
if b.stateFieldLeaves[field].FieldReference() != nil {
|
||||
b.stateFieldLeaves[field].FieldReference().MinusRef()
|
||||
}
|
||||
}
|
||||
for i := 0; i < fieldCount; i++ {
|
||||
field := types.FieldIndex(i)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
delete(b.dirtyIndices, field)
|
||||
delete(b.dirtyFields, field)
|
||||
delete(b.sharedFieldReferences, field)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
}
|
||||
state.StateCount.Sub(1)
|
||||
})
|
||||
|
||||
return dst
|
||||
}
|
||||
@@ -407,22 +421,3 @@ func (b *BeaconState) resetFieldTrie(index types.FieldIndex, elements interface{
|
||||
b.dirtyIndices[index] = []uint64{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func finalizerCleanup(b *BeaconState) {
|
||||
fieldCount := params.BeaconConfig().BeaconStateBellatrixFieldCount
|
||||
for field, v := range b.sharedFieldReferences {
|
||||
v.MinusRef()
|
||||
if b.stateFieldLeaves[field].FieldReference() != nil {
|
||||
b.stateFieldLeaves[field].FieldReference().MinusRef()
|
||||
}
|
||||
}
|
||||
for i := 0; i < fieldCount; i++ {
|
||||
field := types.FieldIndex(i)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
delete(b.dirtyIndices, field)
|
||||
delete(b.dirtyFields, field)
|
||||
delete(b.sharedFieldReferences, field)
|
||||
delete(b.stateFieldLeaves, field)
|
||||
}
|
||||
state.StateCount.Sub(1)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ package flags
|
||||
import (
|
||||
"strings"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -214,7 +215,7 @@ var (
|
||||
SuggestedFeeRecipient = &cli.StringFlag{
|
||||
Name: "suggested-fee-recipient",
|
||||
Usage: "Post bellatrix, this address will receive the transaction fees produced by any blocks from this node. Default to junk whilst bellatrix is in development state. Validator client can override this value through the preparebeaconproposer api.",
|
||||
Value: params.BeaconConfig().EthBurnAddressHex,
|
||||
Value: fieldparams.EthBurnAddressHex,
|
||||
}
|
||||
// TerminalTotalDifficultyOverride specifies the total difficulty to manual overrides the `TERMINAL_TOTAL_DIFFICULTY` parameter.
|
||||
TerminalTotalDifficultyOverride = &cli.StringFlag{
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["jwt.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/cmd/beacon-chain/jwt",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["jwt_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,71 +0,0 @@
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
secretFileName = "jwt.hex"
|
||||
)
|
||||
|
||||
var Commands = &cli.Command{
|
||||
Name: "generate-auth-secret",
|
||||
Usage: "creates a random, 32 byte hex string in a plaintext file to be used for authenticating JSON-RPC requests. If no --output-file flag is defined, the file will be created in the current working directory",
|
||||
Description: `creates a random, 32 byte hex string in a plaintext file to be used for authenticating JSON-RPC requests. If no --output-file flag is defined, the file will be created in the current working directory`,
|
||||
Flags: cmd.WrapFlags([]cli.Flag{
|
||||
cmd.JwtOutputFileFlag,
|
||||
}),
|
||||
Action: generateAuthSecretInFile,
|
||||
}
|
||||
|
||||
func generateAuthSecretInFile(c *cli.Context) error {
|
||||
fileName := secretFileName
|
||||
specifiedFilePath := c.String(cmd.JwtOutputFileFlag.Name)
|
||||
if len(specifiedFilePath) > 0 {
|
||||
fileName = specifiedFilePath
|
||||
}
|
||||
var err error
|
||||
fileName, err = file.ExpandPath(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileDir := filepath.Dir(fileName)
|
||||
exists, err := file.HasDir(fileDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
if err := file.MkdirAll(fileDir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
secret, err := generateRandomHexString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := file.WriteFile(fileName, []byte(secret)); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Successfully wrote JSON-RPC authentication secret to file %s", fileName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateRandomHexString() (string, error) {
|
||||
secret := make([]byte, 32)
|
||||
randGen := rand.NewGenerator()
|
||||
n, err := randGen.Read(secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if n <= 0 {
|
||||
return "", errors.New("rand: unexpected length")
|
||||
}
|
||||
return hexutil.Encode(secret), nil
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func Test_generateJWTSecret(t *testing.T) {
|
||||
t.Run("command should be available", func(t *testing.T) {
|
||||
generateJwtCommand := Commands
|
||||
require.Equal(t, true, generateJwtCommand.Name == "generate-auth-secret")
|
||||
})
|
||||
t.Run("should create proper file in current directory", func(t *testing.T) {
|
||||
require.NoError(t, os.RemoveAll(secretFileName))
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, os.RemoveAll(secretFileName))
|
||||
})
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
err := generateAuthSecretInFile(cliCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We check the file has the contents we expect.
|
||||
checkAuthFileIntegrity(t, secretFileName)
|
||||
})
|
||||
t.Run("should create proper file in specified folder", func(t *testing.T) {
|
||||
customOutput := filepath.Join("data", "item.txt")
|
||||
require.NoError(t, os.RemoveAll(filepath.Dir(customOutput)))
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, os.RemoveAll(filepath.Dir(customOutput)))
|
||||
})
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(cmd.JwtOutputFileFlag.Name, customOutput, "")
|
||||
require.NoError(t, set.Set(cmd.JwtOutputFileFlag.Name, customOutput))
|
||||
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
err := generateAuthSecretInFile(cliCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We check the file has the contents we expect.
|
||||
checkAuthFileIntegrity(t, customOutput)
|
||||
})
|
||||
t.Run("creates proper file in nested specified folder", func(t *testing.T) {
|
||||
rootDirectory := "data"
|
||||
customOutputPath := filepath.Join(rootDirectory, "nest", "nested", "item.txt")
|
||||
require.NoError(t, os.RemoveAll(filepath.Dir(customOutputPath)))
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, os.RemoveAll(rootDirectory))
|
||||
_, err := os.Stat(customOutputPath)
|
||||
require.Equal(t, true, err != nil)
|
||||
})
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(cmd.JwtOutputFileFlag.Name, customOutputPath, "")
|
||||
require.NoError(t, set.Set(cmd.JwtOutputFileFlag.Name, customOutputPath))
|
||||
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
err := generateAuthSecretInFile(cliCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We check the file has the contents we expect.
|
||||
checkAuthFileIntegrity(t, customOutputPath)
|
||||
})
|
||||
}
|
||||
|
||||
func checkAuthFileIntegrity(t testing.TB, fPath string) {
|
||||
fileInfo, err := os.Stat(fPath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, fileInfo != nil)
|
||||
|
||||
enc, err := file.ReadFileAsBytes(fPath)
|
||||
require.NoError(t, err)
|
||||
decoded, err := hexutil.Decode(string(enc))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 32, len(decoded))
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package cmd defines the command line flags for the shared utilities.
|
||||
// Package cmd defines the command line flags for the shared utlities.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
@@ -255,12 +255,6 @@ var (
|
||||
Usage: "Specifies the timeout value for API requests in seconds",
|
||||
Value: 120,
|
||||
}
|
||||
// JwtOutputFileFlag specifies the JWT file path that gets generated into when invoked by generate-jwt-secret.
|
||||
JwtOutputFileFlag = &cli.StringFlag{
|
||||
Name: "output-file",
|
||||
Usage: "Target file path for outputting a generated JWT secret to be used for JSON-RPC authentication",
|
||||
Aliases: []string{"o"},
|
||||
}
|
||||
)
|
||||
|
||||
// LoadFlagsFromConfig sets flags values from config file if ConfigFileFlag is set.
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"benchmark.go",
|
||||
"generate.go",
|
||||
"ir.go",
|
||||
"main.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/cmd/ssz",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//sszgen:go_default_library",
|
||||
"//sszgen/backend:go_default_library",
|
||||
"//sszgen/testutil:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "ssz",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,315 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
fssz "github.com/ferranbt/fastssz"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/urfave/cli/v2"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime/pprof"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
pbbeacon "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
pbethv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
pbethv1alpha1 "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
const methodsetMethodical = "methodical"
|
||||
const methodsetFast = "fastssz"
|
||||
|
||||
var methodset string
|
||||
var benchmarkRepeat int
|
||||
var skipList string
|
||||
var benchmark = &cli.Command{
|
||||
Name: "benchmark",
|
||||
ArgsUsage: "<path to spectest repository>",
|
||||
Aliases: []string{"bench"},
|
||||
Usage: "Benchmark for comparing fastssz with methodical to generate profiling data",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "methodset",
|
||||
Value: "",
|
||||
Usage: "which methodset to evaluate, \"fastssz\" or \"methodical\"",
|
||||
Destination: &methodset,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "skip-list",
|
||||
Value: "",
|
||||
Usage: "comma-separated list of types to skip (useful for excluding that big ole BeaconState).",
|
||||
Destination: &skipList,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "repeat",
|
||||
Usage: "how many times to repeat each unmarshal/marshal operation (increase for more stability)",
|
||||
Destination: &benchmarkRepeat,
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
// validate args
|
||||
spectestPath := c.Args().Get(0)
|
||||
if spectestPath == "" {
|
||||
cli.ShowCommandHelp(c, "benchmark")
|
||||
return fmt.Errorf("error: missing required <path to spectest repository> argument")
|
||||
}
|
||||
if methodset != methodsetMethodical && methodset != methodsetFast {
|
||||
cli.ShowCommandHelp(c, "benchmark")
|
||||
return fmt.Errorf("error: --methodset must be equal to \"fastssz\" or \"methodical\"")
|
||||
}
|
||||
|
||||
// initialize profiling, profilePath will fail if spectest path is weird
|
||||
ppath, err := profilePath(spectestPath, methodset)
|
||||
f, err := os.Create(ppath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
defer pprof.StopCPUProfile()
|
||||
|
||||
skip := make(map[string]struct{})
|
||||
if skipList != "" {
|
||||
skipNames := strings.Split(skipList, ",")
|
||||
for _, s := range skipNames {
|
||||
skip[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
// use regex to parse test cases out of a dirwalk
|
||||
tcs, err := findTestCases(spectestPath, skip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Found %d test cases", len(tcs))
|
||||
for _, tc := range tcs {
|
||||
err := executeTestCase(tc, methodset, benchmarkRepeat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func profilePath(path string, methodset string) (string, error) {
|
||||
pre := regexp.MustCompile(`.*\/tests\/(mainnet|minimal)\/(altair|merge|phase0)\/ssz_static`)
|
||||
parts := pre.FindStringSubmatch(path)
|
||||
if len(parts) != 3 {
|
||||
return "", fmt.Errorf("unfamiliar spectest path, can't determine test configuration and phase")
|
||||
}
|
||||
return fmt.Sprintf("cpu-%s-%s-%s.%s.pprof", methodset, parts[1], parts[2], time.Now().Format("20060102-150405")), nil
|
||||
}
|
||||
|
||||
func executeTestCase(tc *TestCase, methodset string, repeat int) error {
|
||||
b, err := tc.MarshaledBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tys := make([]pbinit, 0)
|
||||
for _, c := range []map[string]pbinit{casesBeaconP2pV1,casesV1,casesV1Alpha1} {
|
||||
pi, ok := c[tc.typeName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tys = append(tys, pi)
|
||||
}
|
||||
for i := 0; i <= repeat; i++ {
|
||||
for _, fn := range tys {
|
||||
essz := fn()
|
||||
if methodset == methodsetFast {
|
||||
err := essz.UnmarshalSSZ(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = essz.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = essz.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if methodset == methodsetMethodical {
|
||||
err := essz.XXUnmarshalSSZ(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = essz.XXMarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = essz.XXHashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func findTestCases(path string, skip map[string]struct{}) ([]*TestCase, error) {
|
||||
var re = regexp.MustCompile(`.*\/tests\/(mainnet|minimal)\/(altair|merge|phase0)\/ssz_static\/(.*)\/ssz_random\/(case_\d+)`)
|
||||
tcs := make([]*TestCase, 0)
|
||||
testCaseFromPath := func (path string, d fs.DirEntry, err error) error {
|
||||
if !d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
parts := re.FindStringSubmatch(path)
|
||||
if len(parts) != 5 {
|
||||
return nil
|
||||
}
|
||||
tc := &TestCase{
|
||||
path: path,
|
||||
config: parts[1],
|
||||
phase: parts[2],
|
||||
typeName: parts[3],
|
||||
caseId: parts[4],
|
||||
}
|
||||
if tc.config == "" || tc.phase == "" || tc.typeName == "" || tc.caseId == "" {
|
||||
return nil
|
||||
}
|
||||
if _, ok := skip[tc.typeName]; ok {
|
||||
return nil
|
||||
}
|
||||
tcs = append(tcs, tc)
|
||||
return nil
|
||||
}
|
||||
err := filepath.WalkDir(path, testCaseFromPath)
|
||||
|
||||
return tcs, err
|
||||
}
|
||||
|
||||
type SSZRoots struct {
|
||||
Root string `json:"root"`
|
||||
SigningRoot string `json:"signing_root"`
|
||||
}
|
||||
|
||||
type SSZValue struct {
|
||||
Message json.RawMessage `json:"message"`
|
||||
Signature string `json:"signature"`// hex encoded '0x...'
|
||||
}
|
||||
|
||||
type TestCase struct {
|
||||
path string
|
||||
config string
|
||||
phase string
|
||||
typeName string
|
||||
caseId string
|
||||
}
|
||||
|
||||
func (tc *TestCase) MarshaledBytes() ([]byte, error) {
|
||||
fh, err := os.Open(path.Join(tc.path, "serialized.ssz_snappy"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fh.Close()
|
||||
buf := bytes.NewBuffer(nil)
|
||||
_, err = buf.ReadFrom(fh)
|
||||
return snappy.Decode(nil, buf.Bytes())
|
||||
}
|
||||
|
||||
func (tc *TestCase) Value() (*SSZValue, error) {
|
||||
fh, err := os.Open(path.Join(tc.path, "value.yaml"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fh.Close()
|
||||
d := json.NewDecoder(fh)
|
||||
v := &SSZValue{}
|
||||
err = d.Decode(v)
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (tc *TestCase) Roots() (*SSZRoots, error) {
|
||||
fh, err := os.Open(path.Join(tc.path, "roots.yaml"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fh.Close()
|
||||
d := json.NewDecoder(fh)
|
||||
r := &SSZRoots{}
|
||||
err = d.Decode(r)
|
||||
return r, err
|
||||
}
|
||||
|
||||
//rootBytes, err := hex.DecodeString(rootsYaml.Root[2:])
|
||||
//require.NoError(t, err)
|
||||
//require.DeepEqual(t, rootBytes, root[:], "Did not receive expected hash tree root")
|
||||
|
||||
type ExperimentalSSZ interface {
|
||||
XXUnmarshalSSZ(buf []byte) error
|
||||
XXMarshalSSZ() ([]byte, error)
|
||||
XXHashTreeRoot() ([32]byte, error)
|
||||
fssz.Unmarshaler
|
||||
fssz.Marshaler
|
||||
fssz.HashRoot
|
||||
}
|
||||
|
||||
type pbinit func() ExperimentalSSZ
|
||||
|
||||
var casesBeaconP2pV1 = map[string]pbinit{
|
||||
"BeaconState": func() ExperimentalSSZ { return &pbbeacon.BeaconState{} },
|
||||
"DepositMessage": func() ExperimentalSSZ { return &pbbeacon.DepositMessage{} },
|
||||
"Fork": func() ExperimentalSSZ { return &pbbeacon.Fork{} },
|
||||
"ForkData": func() ExperimentalSSZ { return &pbbeacon.ForkData{} },
|
||||
"HistoricalBatch": func() ExperimentalSSZ { return &pbbeacon.HistoricalBatch{} },
|
||||
"PendingAttestation": func() ExperimentalSSZ { return &pbbeacon.PendingAttestation{} },
|
||||
"SigningData": func() ExperimentalSSZ { return &pbbeacon.SigningData{} },
|
||||
}
|
||||
|
||||
var casesV1 map[string]pbinit = map[string]pbinit{
|
||||
"AggregateAndProof": func() ExperimentalSSZ { return &pbethv1.AggregateAttestationAndProof{} },
|
||||
"Attestation": func() ExperimentalSSZ { return &pbethv1.Attestation{} },
|
||||
"AttestationData": func() ExperimentalSSZ { return &pbethv1.AttestationData{} },
|
||||
"AttesterSlashing": func() ExperimentalSSZ { return &pbethv1.AttesterSlashing{} },
|
||||
"BeaconBlock": func() ExperimentalSSZ { return &pbethv1.BeaconBlock{} },
|
||||
"BeaconBlockBody": func() ExperimentalSSZ { return &pbethv1.BeaconBlockBody{} },
|
||||
"BeaconBlockHeader": func() ExperimentalSSZ { return &pbethv1.BeaconBlockHeader{} },
|
||||
// exists in proto/eth/v1, but fastssz methods are not genrated for it
|
||||
//"BeaconState": func() ExperimentalSSZ { return &pbethv1.BeaconState{} },
|
||||
"Checkpoint": func() ExperimentalSSZ { return &pbethv1.Checkpoint{} },
|
||||
"Deposit": func() ExperimentalSSZ { return &pbethv1.Deposit{} },
|
||||
"DepositData": func() ExperimentalSSZ { return &pbethv1.Deposit_Data{} },
|
||||
"Eth1Data": func() ExperimentalSSZ { return &pbethv1.Eth1Data{} },
|
||||
// Fork is defined in proto/eth/v1 package, but fastssz methods are not generated
|
||||
//"Fork": func() ExperimentalSSZ { return &pbethv1.Fork{} },
|
||||
"IndexedAttestation": func() ExperimentalSSZ { return &pbethv1.IndexedAttestation{} },
|
||||
// PendingAttestation is defined in proto/eth/v1 package, but fastssz methods are not generated
|
||||
//"PendingAttestation": func() ExperimentalSSZ { return &pbethv1.PendingAttestation{} },
|
||||
"ProposerSlashing": func() ExperimentalSSZ { return &pbethv1.ProposerSlashing{} },
|
||||
"SignedAggregateAndProof": func() ExperimentalSSZ { return &pbethv1.SignedAggregateAttestationAndProof{} },
|
||||
"SignedBeaconBlock": func() ExperimentalSSZ { return &pbethv1.SignedBeaconBlock{} },
|
||||
"SignedBeaconBlockHeader": func() ExperimentalSSZ { return &pbethv1.SignedBeaconBlockHeader{} },
|
||||
"SignedVoluntaryExit": func() ExperimentalSSZ { return &pbethv1.SignedVoluntaryExit{} },
|
||||
"Validator": func() ExperimentalSSZ { return &pbethv1.Validator{} },
|
||||
"VoluntaryExit": func() ExperimentalSSZ { return &pbethv1.VoluntaryExit{} },
|
||||
}
|
||||
|
||||
var casesV1Alpha1 map[string]pbinit = map[string]pbinit{
|
||||
"AggregateAndProof": func() ExperimentalSSZ { return &pbethv1alpha1.AggregateAttestationAndProof{} },
|
||||
"Attestation": func() ExperimentalSSZ { return &pbethv1alpha1.Attestation{} },
|
||||
"AttestationData": func() ExperimentalSSZ { return &pbethv1alpha1.AttestationData{} },
|
||||
"AttesterSlashing": func() ExperimentalSSZ { return &pbethv1alpha1.AttesterSlashing{} },
|
||||
"BeaconBlock": func() ExperimentalSSZ { return &pbethv1alpha1.BeaconBlock{} },
|
||||
"BeaconBlockBody": func() ExperimentalSSZ { return &pbethv1alpha1.BeaconBlockBody{} },
|
||||
"BeaconBlockHeader": func() ExperimentalSSZ { return &pbethv1alpha1.BeaconBlockHeader{} },
|
||||
"Checkpoint": func() ExperimentalSSZ { return &pbethv1alpha1.Checkpoint{} },
|
||||
"Deposit": func() ExperimentalSSZ { return &pbethv1alpha1.Deposit{} },
|
||||
"DepositData": func() ExperimentalSSZ { return &pbethv1alpha1.Deposit_Data{} },
|
||||
"Eth1Data": func() ExperimentalSSZ { return &pbethv1alpha1.Eth1Data{} },
|
||||
"IndexedAttestation": func() ExperimentalSSZ { return &pbethv1alpha1.IndexedAttestation{} },
|
||||
"ProposerSlashing": func() ExperimentalSSZ { return &pbethv1alpha1.ProposerSlashing{} },
|
||||
"SignedAggregateAndProof": func() ExperimentalSSZ { return &pbethv1alpha1.SignedAggregateAttestationAndProof{} },
|
||||
"SignedBeaconBlock": func() ExperimentalSSZ { return &pbethv1alpha1.SignedBeaconBlock{} },
|
||||
"SignedBeaconBlockHeader": func() ExperimentalSSZ { return &pbethv1alpha1.SignedBeaconBlockHeader{} },
|
||||
"SignedVoluntaryExit": func() ExperimentalSSZ { return &pbethv1alpha1.SignedVoluntaryExit{} },
|
||||
"Validator": func() ExperimentalSSZ { return &pbethv1alpha1.Validator{} },
|
||||
"VoluntaryExit": func() ExperimentalSSZ { return &pbethv1alpha1.VoluntaryExit{} },
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/sszgen"
|
||||
"github.com/prysmaticlabs/prysm/sszgen/backend"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var sourcePackage, output, typeNames string
|
||||
var generate = &cli.Command{
|
||||
Name: "generate",
|
||||
ArgsUsage: "<input package, eg github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1>",
|
||||
Aliases: []string{"gen"},
|
||||
Usage: "generate methodsets for a go struct type to support ssz ser/des",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "output",
|
||||
Value: "",
|
||||
Usage: "directory to write generated code (same as input by default)",
|
||||
Destination: &output,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "type-names",
|
||||
Value: "",
|
||||
Usage: "if specified, only generate methods for types specified in this comma-separated list",
|
||||
Destination: &typeNames,
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
sourcePackage = c.Args().Get(0)
|
||||
if sourcePackage == "" {
|
||||
cli.ShowCommandHelp(c, "generate")
|
||||
return fmt.Errorf("error: mising required <input package> argument")
|
||||
}
|
||||
var err error
|
||||
index := sszgen.NewPackageIndex()
|
||||
packageName, err := index.GetPackageName(sourcePackage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rep := sszgen.NewRepresenter(index)
|
||||
|
||||
var specs []*sszgen.DeclarationRef
|
||||
if len(typeNames) > 0 {
|
||||
for _, n := range strings.Split(strings.TrimSpace(typeNames), ",") {
|
||||
specs = append(specs, &sszgen.DeclarationRef{Package: sourcePackage, Name: n})
|
||||
}
|
||||
} else {
|
||||
specs, err = index.DeclarationRefs(sourcePackage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(specs) == 0 {
|
||||
return fmt.Errorf("Could not find any codegen targets in source package %s", sourcePackage)
|
||||
}
|
||||
|
||||
if output == "" {
|
||||
output = "methodical.ssz.go"
|
||||
}
|
||||
outFh, err := os.Create(output)
|
||||
defer outFh.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g := backend.NewGenerator(packageName, sourcePackage)
|
||||
for _, s := range specs {
|
||||
fmt.Printf("Generating methods for %s/%s\n", s.Package, s.Name)
|
||||
typeRep, err := rep.GetDeclaration(s.Package, s.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.Generate(typeRep)
|
||||
}
|
||||
rbytes, err := g.Render()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(outFh, bytes.NewReader(rbytes))
|
||||
return err
|
||||
},
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/sszgen"
|
||||
"github.com/prysmaticlabs/prysm/sszgen/testutil"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var ir = &cli.Command{
|
||||
Name: "ir",
|
||||
ArgsUsage: "<input package, eg github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1>",
|
||||
Aliases: []string{"gen"},
|
||||
Usage: "generate intermediate representation for a go struct type. This data structure is used by the backend code generator. Outputting it to a source file an be useful for generating test cases and debugging.",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "output",
|
||||
Value: "",
|
||||
Usage: "file path to write generated code",
|
||||
Destination: &output,
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "type-names",
|
||||
Value: "",
|
||||
Usage: "if specified, only generate types specified in this comma-separated list",
|
||||
Destination: &typeNames,
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
if c.NArg() > 0 {
|
||||
sourcePackage = c.Args().Get(0)
|
||||
}
|
||||
index := sszgen.NewPackageIndex()
|
||||
rep := sszgen.NewRepresenter(index)
|
||||
|
||||
var err error
|
||||
var specs []*sszgen.DeclarationRef
|
||||
if len(typeNames) > 0 {
|
||||
for _, n := range strings.Split(strings.TrimSpace(typeNames), ",") {
|
||||
specs = append(specs, &sszgen.DeclarationRef{Package: sourcePackage, Name: n})
|
||||
}
|
||||
} else {
|
||||
specs, err = index.DeclarationRefs(sourcePackage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(specs) == 0 {
|
||||
return fmt.Errorf("Could not find any codegen targets in source package %s", sourcePackage)
|
||||
}
|
||||
|
||||
outFh, err := os.Create(output)
|
||||
defer outFh.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
renderedTypes := make([]string, 0)
|
||||
for _, s := range specs {
|
||||
typeRep, err := rep.GetDeclaration(s.Package, s.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rendered, err := testutil.RenderIntermediate(typeRep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
renderedTypes = append(renderedTypes, rendered)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(outFh, strings.NewReader(strings.Join(renderedTypes, "\n")))
|
||||
return err
|
||||
},
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := &cli.App{
|
||||
Usage: "ssz support for prysm",
|
||||
Commands: []*cli.Command{benchmark, generate, ir},
|
||||
}
|
||||
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"accounts.go",
|
||||
"backup.go",
|
||||
"delete.go",
|
||||
"list.go",
|
||||
"wallet_utils.go",
|
||||
@@ -15,7 +14,6 @@ go_library(
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//io/prompt:go_default_library",
|
||||
"//runtime/tos:go_default_library",
|
||||
"//validator/accounts:go_default_library",
|
||||
"//validator/accounts/iface:go_default_library",
|
||||
@@ -31,27 +29,20 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"backup_test.go",
|
||||
"delete_test.go",
|
||||
],
|
||||
srcs = ["delete_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//validator/accounts:go_default_library",
|
||||
"//validator/accounts/iface:go_default_library",
|
||||
"//validator/accounts/wallet:go_default_library",
|
||||
"//validator/keymanager:go_default_library",
|
||||
"//validator/keymanager/derived:go_default_library",
|
||||
"//validator/keymanager/local:go_default_library",
|
||||
"//validator/testing:go_default_library",
|
||||
"@com_github_google_uuid//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@com_github_wealdtech_go_eth2_wallet_encryptor_keystorev4//:go_default_library",
|
||||
|
||||
@@ -111,16 +111,13 @@ var Commands = &cli.Command{
|
||||
if err := cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tos.VerifyTosAcceptedOrPrompt(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
return tos.VerifyTosAcceptedOrPrompt(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := features.ConfigureValidator(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := accountsBackup(cliCtx); err != nil {
|
||||
if err := accounts.BackupAccountsCli(cliCtx); err != nil {
|
||||
log.Fatalf("Could not backup accounts: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/io/prompt"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/userprompt"
|
||||
"github.com/prysmaticlabs/prysm/validator/client"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const backupPromptText = "Enter the directory where your backup.zip file will be written to"
|
||||
|
||||
func accountsBackup(c *cli.Context) error {
|
||||
w, km, err := walletWithKeymanager(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dialOpts := client.ConstructDialOptions(
|
||||
c.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name),
|
||||
c.String(flags.CertFlag.Name),
|
||||
c.Uint(flags.GrpcRetriesFlag.Name),
|
||||
c.Duration(flags.GrpcRetryDelayFlag.Name),
|
||||
)
|
||||
grpcHeaders := strings.Split(c.String(flags.GrpcHeadersFlag.Name), ",")
|
||||
|
||||
opts := []accounts.Option{
|
||||
accounts.WithWallet(w),
|
||||
accounts.WithKeymanager(km),
|
||||
accounts.WithGRPCDialOpts(dialOpts),
|
||||
accounts.WithBeaconRPCProvider(c.String(flags.BeaconRPCProviderFlag.Name)),
|
||||
accounts.WithGRPCHeaders(grpcHeaders),
|
||||
}
|
||||
|
||||
// Get full set of public keys from the keymanager.
|
||||
publicKeys, err := km.FetchValidatingPublicKeys(c.Context)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch validating public keys")
|
||||
}
|
||||
// Filter keys either from CLI flag or from interactive session.
|
||||
filteredPubKeys, err := accounts.FilterPublicKeysFromUserInput(
|
||||
c,
|
||||
flags.BackupPublicKeysFlag,
|
||||
publicKeys,
|
||||
userprompt.SelectAccountsBackupPromptText,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not filter public keys for backup")
|
||||
}
|
||||
opts = append(opts, accounts.WithFilteredPubKeys(filteredPubKeys))
|
||||
|
||||
// Input the directory where they wish to backup their accounts.
|
||||
backupsDir, err := userprompt.InputDirectory(c, backupPromptText, flags.BackupDirFlag)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse keys directory")
|
||||
}
|
||||
// Ask the user for their desired password for their backed up accounts.
|
||||
backupsPassword, err := prompt.InputPassword(
|
||||
c,
|
||||
flags.BackupPasswordFile,
|
||||
"Enter a new password for your backed up accounts",
|
||||
"Confirm new password",
|
||||
true,
|
||||
prompt.ValidatePasswordInput,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine password for backed up accounts")
|
||||
}
|
||||
|
||||
opts = append(opts, accounts.WithBackupsDir(backupsDir))
|
||||
opts = append(opts, accounts.WithBackupsPassword(backupsPassword))
|
||||
|
||||
acc, err := accounts.NewCLIManager(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return acc.Backup(c.Context)
|
||||
}
|
||||
@@ -13,7 +13,7 @@ go_library(
|
||||
"//validator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
field_params "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -327,36 +327,21 @@ var (
|
||||
// FeeRecipientConfigFileFlag defines the path or URL to a file with proposer config.
|
||||
FeeRecipientConfigFileFlag = &cli.StringFlag{
|
||||
Name: "fee-recipient-config-file",
|
||||
Usage: "DEPRECATED, please use proposer-settings-file",
|
||||
Usage: "Set path to a JSON file containing validator mappings to ETH addresses for receiving transaction fees when proposing blocks (i.e. --fee-recipient-config-file=/path/to/proposer.json). File format found in docs",
|
||||
Value: "",
|
||||
}
|
||||
// FeeRecipientConfigURLFlag defines the path or URL to a file with proposer config.
|
||||
FeeRecipientConfigURLFlag = &cli.StringFlag{
|
||||
Name: "fee-recipient-config-url",
|
||||
Usage: "DEPRECATED, please use proposer-settings-url",
|
||||
Value: "",
|
||||
}
|
||||
|
||||
// ProposerSettingsFlag defines the path or URL to a file with proposer config.
|
||||
ProposerSettingsFlag = &cli.StringFlag{
|
||||
Name: "proposer-settings-file",
|
||||
Usage: "Set path to a YAML or JSON file containing validator settings used when proposing blocks such as (fee recipient and gas limit) (i.e. --proposer-settings-file=/path/to/proposer.json). File format found in docs",
|
||||
Value: "",
|
||||
}
|
||||
// ProposerSettingsURLFlag defines the path or URL to a file with proposer config.
|
||||
ProposerSettingsURLFlag = &cli.StringFlag{
|
||||
Name: "proposer-settings-url",
|
||||
Usage: "Set URL to a REST endpoint containing validator settings used when proposing blocks such as (fee recipient) (i.e. --proposer-settings-url=https://example.com/api/getConfig). File format found in docs",
|
||||
Usage: "Set URL to a REST endpoint containing validator mappings to ETH addresses for receiving transaction fees when proposing blocks (i.e. --fee-recipient-config-url=https://example.com/api/getConfig). File format found in docs",
|
||||
Value: "",
|
||||
}
|
||||
|
||||
// SuggestedFeeRecipientFlag defines the address of the fee recipient.
|
||||
SuggestedFeeRecipientFlag = &cli.StringFlag{
|
||||
Name: "suggested-fee-recipient",
|
||||
Usage: "Sets ALL validators' mapping to a suggested an eth address to receive gas fees when proposing a block." +
|
||||
" note that this is only a suggestion when integrating with a Builder API, which may choose to specify a different fee recipient as payment for the blocks it builds." +
|
||||
" For additional setting overrides use the --" + ProposerSettingsFlag.Name + " or --" + ProposerSettingsURLFlag.Name + " Flags. ",
|
||||
Value: params.BeaconConfig().EthBurnAddressHex,
|
||||
Name: "suggested-fee-recipient",
|
||||
Usage: "Sets ALL validators' mapping to a suggested an eth address to receive gas fees when proposing a block. Overrides the --fee-recipient-config-file flag if set",
|
||||
Value: field_params.EthBurnAddressHex,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -78,8 +78,6 @@ var appFlags = []cli.Flag{
|
||||
flags.FeeRecipientConfigFileFlag,
|
||||
flags.FeeRecipientConfigURLFlag,
|
||||
flags.SuggestedFeeRecipientFlag,
|
||||
flags.ProposerSettingsURLFlag,
|
||||
flags.ProposerSettingsFlag,
|
||||
////////////////////
|
||||
cmd.DisableMonitoringFlag,
|
||||
cmd.MonitoringHostFlag,
|
||||
|
||||
@@ -111,8 +111,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.Web3SignerPublicValidatorKeysFlag,
|
||||
flags.FeeRecipientConfigFileFlag,
|
||||
flags.FeeRecipientConfigURLFlag,
|
||||
flags.ProposerSettingsFlag,
|
||||
flags.ProposerSettingsURLFlag,
|
||||
flags.SuggestedFeeRecipientFlag,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -122,12 +122,6 @@ func configureTestnet(ctx *cli.Context) error {
|
||||
if err := params.SetActive(params.RopstenConfig().Copy()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ctx.Set(enableVecHTR.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling vectorized HTR flag")
|
||||
}
|
||||
if err := ctx.Set(enableForkChoiceDoublyLinkedTree.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling doubly linked tree forkchoice flag")
|
||||
}
|
||||
params.UseRopstenNetworkConfig()
|
||||
} else {
|
||||
log.Warn("Running on Ethereum Consensus Mainnet")
|
||||
|
||||
@@ -5,22 +5,23 @@ package field_params
|
||||
|
||||
const (
|
||||
Preset = "mainnet"
|
||||
BlockRootsLength = 8192 // SLOTS_PER_HISTORICAL_ROOT
|
||||
StateRootsLength = 8192 // SLOTS_PER_HISTORICAL_ROOT
|
||||
RandaoMixesLength = 65536 // EPOCHS_PER_HISTORICAL_VECTOR
|
||||
HistoricalRootsLength = 16777216 // HISTORICAL_ROOTS_LIMIT
|
||||
ValidatorRegistryLimit = 1099511627776 // VALIDATOR_REGISTRY_LIMIT
|
||||
Eth1DataVotesLength = 2048 // SLOTS_PER_ETH1_VOTING_PERIOD
|
||||
PreviousEpochAttestationsLength = 4096 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
CurrentEpochAttestationsLength = 4096 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
SlashingsLength = 8192 // EPOCHS_PER_SLASHINGS_VECTOR
|
||||
SyncCommitteeLength = 512 // SYNC_COMMITTEE_SIZE
|
||||
RootLength = 32 // RootLength defines the byte length of a Merkle root.
|
||||
BLSSignatureLength = 96 // BLSSignatureLength defines the byte length of a BLSSignature.
|
||||
BLSPubkeyLength = 48 // BLSPubkeyLength defines the byte length of a BLSSignature.
|
||||
MaxTxsPerPayloadLength = 1048576 // MaxTxsPerPayloadLength defines the maximum number of transactions that can be included in a payload.
|
||||
MaxBytesPerTxLength = 1073741824 // MaxBytesPerTxLength defines the maximum number of bytes that can be included in a transaction.
|
||||
FeeRecipientLength = 20 // FeeRecipientLength defines the byte length of a fee recipient.
|
||||
LogsBloomLength = 256 // LogsBloomLength defines the byte length of a logs bloom.
|
||||
VersionLength = 4 // VersionLength defines the byte length of a fork version number.
|
||||
BlockRootsLength = 8192 // SLOTS_PER_HISTORICAL_ROOT
|
||||
StateRootsLength = 8192 // SLOTS_PER_HISTORICAL_ROOT
|
||||
RandaoMixesLength = 65536 // EPOCHS_PER_HISTORICAL_VECTOR
|
||||
HistoricalRootsLength = 16777216 // HISTORICAL_ROOTS_LIMIT
|
||||
ValidatorRegistryLimit = 1099511627776 // VALIDATOR_REGISTRY_LIMIT
|
||||
Eth1DataVotesLength = 2048 // SLOTS_PER_ETH1_VOTING_PERIOD
|
||||
PreviousEpochAttestationsLength = 4096 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
CurrentEpochAttestationsLength = 4096 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
SlashingsLength = 8192 // EPOCHS_PER_SLASHINGS_VECTOR
|
||||
SyncCommitteeLength = 512 // SYNC_COMMITTEE_SIZE
|
||||
RootLength = 32 // RootLength defines the byte length of a Merkle root.
|
||||
BLSSignatureLength = 96 // BLSSignatureLength defines the byte length of a BLSSignature.
|
||||
BLSPubkeyLength = 48 // BLSPubkeyLength defines the byte length of a BLSSignature.
|
||||
MaxTxsPerPayloadLength = 1048576 // MaxTxsPerPayloadLength defines the maximum number of transactions that can be included in a payload.
|
||||
MaxBytesPerTxLength = 1073741824 // MaxBytesPerTxLength defines the maximum number of bytes that can be included in a transaction.
|
||||
FeeRecipientLength = 20 // FeeRecipientLength defines the byte length of a fee recipient.
|
||||
LogsBloomLength = 256 // LogsBloomLength defines the byte length of a logs bloom.
|
||||
VersionLength = 4 // VersionLength defines the byte length of a fork version number.
|
||||
EthBurnAddressHex = "0x0000000000000000000000000000000000000000" // EthBurnAddressHex defines the hex encoded address of the eth1.0 burn contract.
|
||||
)
|
||||
|
||||
@@ -5,22 +5,23 @@ package field_params
|
||||
|
||||
const (
|
||||
Preset = "minimal"
|
||||
BlockRootsLength = 64 // SLOTS_PER_HISTORICAL_ROOT
|
||||
StateRootsLength = 64 // SLOTS_PER_HISTORICAL_ROOT
|
||||
RandaoMixesLength = 64 // EPOCHS_PER_HISTORICAL_VECTOR
|
||||
HistoricalRootsLength = 16777216 // HISTORICAL_ROOTS_LIMIT
|
||||
ValidatorRegistryLimit = 1099511627776 // VALIDATOR_REGISTRY_LIMIT
|
||||
Eth1DataVotesLength = 32 // SLOTS_PER_ETH1_VOTING_PERIOD
|
||||
PreviousEpochAttestationsLength = 1024 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
CurrentEpochAttestationsLength = 1024 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
SlashingsLength = 64 // EPOCHS_PER_SLASHINGS_VECTOR
|
||||
SyncCommitteeLength = 32 // SYNC_COMMITTEE_SIZE
|
||||
RootLength = 32 // RootLength defines the byte length of a Merkle root.
|
||||
BLSSignatureLength = 96 // BLSSignatureLength defines the byte length of a BLSSignature.
|
||||
BLSPubkeyLength = 48 // BLSPubkeyLength defines the byte length of a BLSSignature.
|
||||
MaxTxsPerPayloadLength = 1048576 // MaxTxsPerPayloadLength defines the maximum number of transactions that can be included in a payload.
|
||||
MaxBytesPerTxLength = 1073741824 // MaxBytesPerTxLength defines the maximum number of bytes that can be included in a transaction.
|
||||
FeeRecipientLength = 20 // FeeRecipientLength defines the byte length of a fee recipient.
|
||||
LogsBloomLength = 256 // LogsBloomLength defines the byte length of a logs bloom.
|
||||
VersionLength = 4 // VersionLength defines the byte length of a fork version number.
|
||||
BlockRootsLength = 64 // SLOTS_PER_HISTORICAL_ROOT
|
||||
StateRootsLength = 64 // SLOTS_PER_HISTORICAL_ROOT
|
||||
RandaoMixesLength = 64 // EPOCHS_PER_HISTORICAL_VECTOR
|
||||
HistoricalRootsLength = 16777216 // HISTORICAL_ROOTS_LIMIT
|
||||
ValidatorRegistryLimit = 1099511627776 // VALIDATOR_REGISTRY_LIMIT
|
||||
Eth1DataVotesLength = 32 // SLOTS_PER_ETH1_VOTING_PERIOD
|
||||
PreviousEpochAttestationsLength = 1024 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
CurrentEpochAttestationsLength = 1024 // MAX_ATTESTATIONS * SLOTS_PER_EPOCH
|
||||
SlashingsLength = 64 // EPOCHS_PER_SLASHINGS_VECTOR
|
||||
SyncCommitteeLength = 32 // SYNC_COMMITTEE_SIZE
|
||||
RootLength = 32 // RootLength defines the byte length of a Merkle root.
|
||||
BLSSignatureLength = 96 // BLSSignatureLength defines the byte length of a BLSSignature.
|
||||
BLSPubkeyLength = 48 // BLSPubkeyLength defines the byte length of a BLSSignature.
|
||||
MaxTxsPerPayloadLength = 1048576 // MaxTxsPerPayloadLength defines the maximum number of transactions that can be included in a payload.
|
||||
MaxBytesPerTxLength = 1073741824 // MaxBytesPerTxLength defines the maximum number of bytes that can be included in a transaction.
|
||||
FeeRecipientLength = 20 // FeeRecipientLength defines the byte length of a fee recipient.
|
||||
LogsBloomLength = 256 // LogsBloomLength defines the byte length of a logs bloom.
|
||||
VersionLength = 4 // VersionLength defines the byte length of a fork version number.
|
||||
EthBurnAddressHex = "0x0000000000000000000000000000000000000000" // EthBurnAddressHex defines the hex encoded address of the eth1.0 burn contract.
|
||||
)
|
||||
|
||||
@@ -192,8 +192,6 @@ type BeaconChainConfig struct {
|
||||
TerminalBlockHashActivationEpoch types.Epoch `yaml:"TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH" spec:"true"` // TerminalBlockHashActivationEpoch of beacon chain.
|
||||
TerminalTotalDifficulty string `yaml:"TERMINAL_TOTAL_DIFFICULTY" spec:"true"` // TerminalTotalDifficulty is part of the experimental Bellatrix spec. This value is type is currently TBD.
|
||||
DefaultFeeRecipient common.Address // DefaultFeeRecipient where the transaction fee goes to.
|
||||
EthBurnAddressHex string // EthBurnAddressHex is the constant eth address written in hex format to burn fees in that network. the default is 0x0
|
||||
DefaultBuilderGasLimit uint64 // DefaultBuilderGasLimit is the default used to set the gaslimit for the Builder APIs, typically at around 30M wei.
|
||||
}
|
||||
|
||||
// InitializeForkSchedule initializes the schedules forks baked into the config.
|
||||
|
||||
@@ -244,8 +244,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
TerminalBlockHashActivationEpoch: 18446744073709551615,
|
||||
TerminalBlockHash: [32]byte{},
|
||||
TerminalTotalDifficulty: "115792089237316195423570985008687907853269984665640564039457584007913129638912",
|
||||
EthBurnAddressHex: "0x0000000000000000000000000000000000000000",
|
||||
DefaultBuilderGasLimit: uint64(30000000),
|
||||
}
|
||||
|
||||
// MainnetTestConfig provides a version of the mainnet config that has a different name
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user