mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
38 Commits
v5.0.1
...
pass_justi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd4460fd46 | ||
|
|
5237cb60cb | ||
|
|
ee9274a9bc | ||
|
|
ef21d3adf8 | ||
|
|
b6ce6c2eba | ||
|
|
b3caaa9acc | ||
|
|
d6fb8c29c9 | ||
|
|
3df7a1f067 | ||
|
|
4c3dbae3c0 | ||
|
|
68b78dd520 | ||
|
|
2e2ef4a179 | ||
|
|
b61d17731e | ||
|
|
6d3c6a6331 | ||
|
|
f1615c4c88 | ||
|
|
87b127365f | ||
|
|
5215ed03fd | ||
|
|
0453d18395 | ||
|
|
0132c1b17d | ||
|
|
d9d2ee75de | ||
|
|
ddb321e0ce | ||
|
|
5735379963 | ||
|
|
1d5a09c05d | ||
|
|
70e1b11aeb | ||
|
|
e100fb0c08 | ||
|
|
789c3f8078 | ||
|
|
0b261cba5e | ||
|
|
7a9608ea20 | ||
|
|
f795e09ecf | ||
|
|
e6a6365bdd | ||
|
|
4c66e4d060 | ||
|
|
daad29d0de | ||
|
|
9f67ad9496 | ||
|
|
0ee0653a15 | ||
|
|
4ff91bebf8 | ||
|
|
f85e027141 | ||
|
|
e09ae75c9f | ||
|
|
cb80d5ad32 | ||
|
|
24b029bbef |
@@ -113,6 +113,13 @@ http_archive(
|
||||
url = "https://github.com/GoogleContainerTools/distroless/archive/9dc924b9fe812eec2fa0061824dcad39eb09d0d6.tar.gz", # 2024-01-24
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "aspect_bazel_lib",
|
||||
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
|
||||
strip_prefix = "bazel-lib-2.5.0",
|
||||
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
|
||||
)
|
||||
|
||||
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
|
||||
|
||||
aspect_bazel_lib_dependencies()
|
||||
|
||||
@@ -108,10 +108,10 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
}
|
||||
|
||||
log.
|
||||
WithField("block_slot", b.Block().Slot()).
|
||||
WithField("state_slot", s.Slot()).
|
||||
WithField("state_root", hexutil.Encode(sr[:])).
|
||||
WithField("block_root", hexutil.Encode(br[:])).
|
||||
WithField("blockSlot", b.Block().Slot()).
|
||||
WithField("stateSlot", s.Slot()).
|
||||
WithField("stateRoot", hexutil.Encode(sr[:])).
|
||||
WithField("blockRoot", hexutil.Encode(br[:])).
|
||||
Info("Downloaded checkpoint sync state and block.")
|
||||
return &OriginData{
|
||||
st: s,
|
||||
|
||||
@@ -310,8 +310,8 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
|
||||
for _, failure := range errorJson.Failures {
|
||||
w := request[failure.Index].Message
|
||||
log.WithFields(log.Fields{
|
||||
"validator_index": w.ValidatorIndex,
|
||||
"withdrawal_address": w.ToExecutionAddress,
|
||||
"validatorIndex": w.ValidatorIndex,
|
||||
"withdrawalAddress": w.ToExecutionAddress,
|
||||
}).Error(failure.Message)
|
||||
}
|
||||
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
|
||||
|
||||
@@ -57,8 +57,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
b := bytes.NewBuffer(nil)
|
||||
if r.Body == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"body-base64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
return nil
|
||||
}
|
||||
@@ -74,8 +74,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
}
|
||||
r.Body = io.NopCloser(b)
|
||||
log.WithFields(log.Fields{
|
||||
"body-base64": string(body),
|
||||
"url": r.URL.String(),
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
|
||||
return nil
|
||||
|
||||
@@ -13,8 +13,6 @@ var (
|
||||
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
|
||||
// errNilFinalizedCheckpoint is returned when a nil finalized checkpt is returned from a state.
|
||||
errNilFinalizedCheckpoint = errors.New("nil finalized checkpoint returned from state")
|
||||
// errNilJustifiedCheckpoint is returned when a nil justified checkpt is returned from a state.
|
||||
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
|
||||
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
|
||||
errBlockDoesNotExist = errors.New("could not find block in DB")
|
||||
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -238,7 +238,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
|
||||
@@ -163,7 +163,7 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p, bytesutil.ToBytes32(parentHash))
|
||||
require.Equal(t, td, totalDifficulty.String())
|
||||
require.Equal(t, td, totalDifficulty.Hex())
|
||||
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
|
||||
require.ErrorContains(t, "could not get pow block: block not found", err)
|
||||
|
||||
@@ -307,16 +307,16 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, st, e); err != nil {
|
||||
return errors.Wrap(err, "could not update proposer index cache")
|
||||
}
|
||||
go func() {
|
||||
go func(ep primitives.Epoch) {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := helpers.UpdateCommitteeCache(slotCtx, st, e+1); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(slotCtx, st, ep+1); err != nil {
|
||||
log.WithError(err).Warn("Could not update committee cache")
|
||||
}
|
||||
}()
|
||||
}(e)
|
||||
// The latest block header is from the previous epoch
|
||||
r, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
|
||||
@@ -1531,6 +1531,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
// 12 and recover. Notice that it takes two epochs to fully recover, and we stay
|
||||
// optimistic for the whole time.
|
||||
func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
t.Skip("Requires #13664 to be fixed")
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
@@ -2114,7 +2115,7 @@ func TestMissingIndices(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present))
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present...))
|
||||
missing, err := missingIndices(bs, c.root, c.expected)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
|
||||
@@ -32,6 +32,9 @@ import (
|
||||
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
|
||||
var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
|
||||
|
||||
// This defines how many epochs since finality the run time will begin to expand our respective cache sizes.
|
||||
var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
@@ -188,6 +191,11 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
|
||||
// We apply the same heuristic to some of our more important caches.
|
||||
if err := s.handleCaches(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
@@ -361,6 +369,27 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
|
||||
}
|
||||
|
||||
func (s *Service) handleCaches() error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality primitives.Epoch
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
if currentEpoch > finalized.Epoch {
|
||||
sinceFinality = currentEpoch - finalized.Epoch
|
||||
}
|
||||
|
||||
if sinceFinality >= epochsSinceFinalityExpandCache {
|
||||
helpers.ExpandCommitteeCache()
|
||||
return nil
|
||||
}
|
||||
|
||||
helpers.CompressCommitteeCache()
|
||||
return nil
|
||||
}
|
||||
|
||||
// This performs the state transition function and returns the poststate or an
|
||||
// error if the block fails to verify the consensus rules
|
||||
func (s *Service) validateStateTransition(ctx context.Context, preState state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
|
||||
@@ -308,6 +308,29 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestHandleCaches_EnablingLargeSize(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
require.NoError(t, s.handleCaches())
|
||||
assert.LogsContain(t, hook, "Expanding committee cache size")
|
||||
}
|
||||
|
||||
func TestHandleCaches_DisablingLargeSize(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
require.NoError(t, s.handleCaches())
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.handleCaches())
|
||||
assert.LogsContain(t, hook, "Reducing committee cache size")
|
||||
}
|
||||
|
||||
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
pool := tr.blsPool
|
||||
|
||||
@@ -199,6 +199,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
defer s.removeStartupState()
|
||||
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.StartFromSavedState(saved); err != nil {
|
||||
@@ -271,13 +272,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
}
|
||||
spawnCountdownIfPreGenesis(s.ctx, s.genesisTime, s.cfg.BeaconDB)
|
||||
|
||||
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
if justified == nil {
|
||||
return errNilJustifiedCheckpoint
|
||||
}
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
@@ -289,8 +283,8 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
@@ -418,7 +412,7 @@ func (s *Service) startFromExecutionChain() error {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
s.onExecutionChainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
@@ -550,6 +544,10 @@ func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
return s.cfg.BeaconDB.HasBlock(ctx, root)
|
||||
}
|
||||
|
||||
func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
|
||||
34
beacon-chain/cache/committee.go
vendored
34
beacon-chain/cache/committee.go
vendored
@@ -17,12 +17,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxCommitteesCacheSize = int(32)
|
||||
maxCommitteesCacheSize = int(4)
|
||||
// expandedCommitteeCacheSize defines the expanded size of the committee cache in the event we
|
||||
// do not have finality to deal with long forks better.
|
||||
expandedCommitteeCacheSize = int(32)
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -43,6 +47,7 @@ type CommitteeCache struct {
|
||||
CommitteeCache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
inProgress map[string]bool
|
||||
size int
|
||||
}
|
||||
|
||||
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
@@ -67,6 +72,33 @@ func (c *CommitteeCache) Clear() {
|
||||
defer c.lock.Unlock()
|
||||
c.CommitteeCache = lruwrpr.New(maxCommitteesCacheSize)
|
||||
c.inProgress = make(map[string]bool)
|
||||
c.size = maxCommitteesCacheSize
|
||||
}
|
||||
|
||||
// ExpandCommitteeCache expands the size of the committee cache.
|
||||
func (c *CommitteeCache) ExpandCommitteeCache() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.size == expandedCommitteeCacheSize {
|
||||
return
|
||||
}
|
||||
c.CommitteeCache.Resize(expandedCommitteeCacheSize)
|
||||
c.size = expandedCommitteeCacheSize
|
||||
log.Warnf("Expanding committee cache size from %d to %d", maxCommitteesCacheSize, expandedCommitteeCacheSize)
|
||||
}
|
||||
|
||||
// CompressCommitteeCache compresses the size of the committee cache.
|
||||
func (c *CommitteeCache) CompressCommitteeCache() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.size == maxCommitteesCacheSize {
|
||||
return
|
||||
}
|
||||
c.CommitteeCache.Resize(maxCommitteesCacheSize)
|
||||
c.size = maxCommitteesCacheSize
|
||||
log.Warnf("Reducing committee cache size from %d to %d", expandedCommitteeCacheSize, maxCommitteesCacheSize)
|
||||
}
|
||||
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
|
||||
8
beacon-chain/cache/committee_disabled.go
vendored
8
beacon-chain/cache/committee_disabled.go
vendored
@@ -74,3 +74,11 @@ func (c *FakeCommitteeCache) MarkNotInProgress(seed [32]byte) error {
|
||||
func (c *FakeCommitteeCache) Clear() {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *FakeCommitteeCache) ExpandCommitteeCache() {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *FakeCommitteeCache) CompressCommitteeCache() {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,10 +74,10 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"depositRoot": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
|
||||
@@ -33,10 +33,10 @@ func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum ui
|
||||
}
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"depositRoot": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
currentEpoch := slots.ToEpoch(blk.Block().Slot())
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
@@ -115,7 +115,9 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
sig := blk.Signature()
|
||||
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, blk.Block().HashTreeRoot)
|
||||
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, func() ([32]byte, error) {
|
||||
return blkRoot, nil
|
||||
})
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
|
||||
@@ -79,11 +79,13 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
|
||||
}
|
||||
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorsRoot())
|
||||
assert.NoError(t, err)
|
||||
blkRoot, err := altairBlk.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
rt, err := signing.ComputeSigningRoot(altairBlk.Block, domain)
|
||||
assert.NoError(t, err)
|
||||
sig := keys[0].Sign(rt[:]).Marshal()
|
||||
altairBlk.Signature = sig
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(altairBlk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb))
|
||||
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb, blkRoot))
|
||||
}
|
||||
|
||||
@@ -391,6 +391,16 @@ func UpdateCachedCheckpointToStateRoot(state state.ReadOnlyBeaconState, cp *fork
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpandCommitteeCache resizes the cache to a higher limit.
|
||||
func ExpandCommitteeCache() {
|
||||
committeeCache.ExpandCommitteeCache()
|
||||
}
|
||||
|
||||
// CompressCommitteeCache resizes the cache to a lower limit.
|
||||
func CompressCommitteeCache() {
|
||||
committeeCache.CompressCommitteeCache()
|
||||
}
|
||||
|
||||
// ClearCache clears the beacon committee cache and sync committee cache.
|
||||
func ClearCache() {
|
||||
committeeCache.Clear()
|
||||
|
||||
@@ -96,6 +96,7 @@ go_test(
|
||||
"//testing/benchmark:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
@@ -48,7 +49,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
epoch := time.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
|
||||
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
@@ -135,7 +136,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
|
||||
epoch := time.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
|
||||
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
@@ -50,7 +51,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
epoch := time.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
|
||||
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
@@ -124,7 +125,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
|
||||
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(params.BeaconConfig().SlotsPerEpoch)))
|
||||
e := beaconState.Eth1Data()
|
||||
e.DepositCount = 100
|
||||
require.NoError(t, beaconState.SetEth1Data(e))
|
||||
@@ -137,7 +138,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
|
||||
epoch := time.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
|
||||
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"ephemeral.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"pruner.go",
|
||||
],
|
||||
|
||||
@@ -16,12 +16,15 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
var (
|
||||
errIndexOutOfBounds = errors.New("blob index in file name >= MaxBlobsPerBlock")
|
||||
errIndexOutOfBounds = errors.New("blob index in file name >= MaxBlobsPerBlock")
|
||||
errEmptyBlobWritten = errors.New("zero bytes written to disk when saving blob sidecar")
|
||||
errSidecarEmptySSZData = errors.New("sidecar marshalled to an empty ssz byte slice")
|
||||
errNoBasePath = errors.New("BlobStorage base path not specified in init")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,14 +37,26 @@ const (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
b.base = base
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobRetentionEpochs is an option that changes the number of epochs blobs will be persisted.
|
||||
func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
pruner, err := newBlobPruner(b.fs, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.pruner = pruner
|
||||
b.retentionEpochs = e
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSaveFsync is an option that causes Save to call fsync before renaming part files for improved durability.
|
||||
func WithSaveFsync(fsync bool) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
b.fsync = fsync
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -49,30 +64,36 @@ func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption {
|
||||
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(base string, opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
base = path.Clean(base)
|
||||
if err := file.MkdirAll(base); err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
|
||||
}
|
||||
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
|
||||
b := &BlobStorage{
|
||||
fs: fs,
|
||||
}
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{}
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
}
|
||||
}
|
||||
if b.pruner == nil {
|
||||
log.Warn("Initializing blob filesystem storage with pruning disabled")
|
||||
if b.base == "" {
|
||||
return nil, errNoBasePath
|
||||
}
|
||||
b.base = path.Clean(b.base)
|
||||
if err := file.MkdirAll(b.base); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create blob storage at %s", b.base)
|
||||
}
|
||||
b.fs = afero.NewBasePathFs(afero.NewOsFs(), b.base)
|
||||
pruner, err := newBlobPruner(b.fs, b.retentionEpochs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.pruner = pruner
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
|
||||
type BlobStorage struct {
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
base string
|
||||
retentionEpochs primitives.Epoch
|
||||
fsync bool
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
@@ -83,7 +104,7 @@ func (bs *BlobStorage) WarmCache() {
|
||||
}
|
||||
go func() {
|
||||
if err := bs.pruner.prune(0); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob pruner cache.")
|
||||
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -98,7 +119,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("ignoring a duplicate blob sidecar Save attempt")
|
||||
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("Ignoring a duplicate blob sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
if bs.pruner != nil {
|
||||
@@ -111,11 +132,14 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath()
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
// Ensure the partial file is deleted.
|
||||
@@ -126,9 +150,9 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("removed partial file")
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -138,7 +162,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
_, err = partialFile.Write(sidecarData)
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
@@ -146,11 +170,24 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
err = partialFile.Close()
|
||||
if err != nil {
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
@@ -257,16 +294,12 @@ func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
func (p blobNamer) fname(ext string) string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, ext))
|
||||
}
|
||||
|
||||
func (p blobNamer) partPath() string {
|
||||
return p.fname(partExt)
|
||||
func (p blobNamer) partPath(entropy string) string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%s-%d.%s", entropy, p.index, partExt))
|
||||
}
|
||||
|
||||
func (p blobNamer) path() string {
|
||||
return p.fname(sszExt)
|
||||
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, sszExt))
|
||||
}
|
||||
|
||||
func rootString(root [32]byte) string {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -101,6 +102,30 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, err = b.Get(blob.BlockRoot(), blob.Index)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
})
|
||||
|
||||
t.Run("race conditions", func(t *testing.T) {
|
||||
// There was a bug where saving the same blob in multiple go routines would cause a partial blob
|
||||
// to be empty. This test ensures that several routines can safely save the same blob at the
|
||||
// same time. This isn't ideal behavior from the caller, but should be handled safely anyway.
|
||||
// See https://github.com/prysmaticlabs/prysm/pull/13648
|
||||
b, err := NewBlobStorage(WithBasePath(t.TempDir()))
|
||||
require.NoError(t, err)
|
||||
blob := testSidecars[0]
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
require.NoError(t, b.Save(blob))
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
res, err := b.Get(blob.BlockRoot(), blob.Index)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, blob, res)
|
||||
})
|
||||
}
|
||||
|
||||
// pollUntil polls a condition function until it returns true or a timeout is reached.
|
||||
@@ -243,6 +268,8 @@ func BenchmarkPruning(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestNewBlobStorage(t *testing.T) {
|
||||
_, err := NewBlobStorage(path.Join(t.TempDir(), "good"))
|
||||
_, err := NewBlobStorage()
|
||||
require.ErrorIs(t, err, errNoBasePath)
|
||||
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ type BlobMocker struct {
|
||||
|
||||
// CreateFakeIndices creates empty blob sidecar files at the expected path for the given
|
||||
// root and indices to influence the result of Indices().
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices []uint64) error {
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices ...uint64) error {
|
||||
for i := range indices {
|
||||
n := blobNamer{root: root, index: indices[i]}
|
||||
if err := bm.fs.MkdirAll(n.dir(), directoryPermissions); err != nil {
|
||||
|
||||
5
beacon-chain/db/filesystem/log.go
Normal file
5
beacon-chain/db/filesystem/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package filesystem
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "filesystem")
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
@@ -87,7 +87,7 @@ func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
|
||||
}()
|
||||
} else {
|
||||
defer func() {
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"upToEpoch": slots.ToEpoch(pruneBefore),
|
||||
"duration": time.Since(start).String(),
|
||||
"filesRemoved": totalPruned,
|
||||
|
||||
@@ -100,37 +100,24 @@ func StoreDatafilePath(dirPath string) string {
|
||||
}
|
||||
|
||||
var Buckets = [][]byte{
|
||||
attestationsBucket,
|
||||
blocksBucket,
|
||||
stateBucket,
|
||||
proposerSlashingsBucket,
|
||||
attesterSlashingsBucket,
|
||||
voluntaryExitsBucket,
|
||||
chainMetadataBucket,
|
||||
checkpointBucket,
|
||||
powchainBucket,
|
||||
stateSummaryBucket,
|
||||
stateValidatorsBucket,
|
||||
// Indices buckets.
|
||||
attestationHeadBlockRootBucket,
|
||||
attestationSourceRootIndicesBucket,
|
||||
attestationSourceEpochIndicesBucket,
|
||||
attestationTargetRootIndicesBucket,
|
||||
attestationTargetEpochIndicesBucket,
|
||||
blockSlotIndicesBucket,
|
||||
stateSlotIndicesBucket,
|
||||
blockParentRootIndicesBucket,
|
||||
finalizedBlockRootsIndexBucket,
|
||||
blockRootValidatorHashesBucket,
|
||||
// State management service bucket.
|
||||
newStateServiceCompatibleBucket,
|
||||
// Migrations
|
||||
migrationsBucket,
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
|
||||
blobsBucket,
|
||||
}
|
||||
|
||||
// KVStoreOption is a functional option that modifies a kv.Store.
|
||||
|
||||
@@ -7,20 +7,15 @@ package kv
|
||||
// it easy to scan for keys that have a certain shard number as a prefix and return those
|
||||
// corresponding attestations.
|
||||
var (
|
||||
attestationsBucket = []byte("attestations")
|
||||
blobsBucket = []byte("blobs")
|
||||
blocksBucket = []byte("blocks")
|
||||
stateBucket = []byte("state")
|
||||
stateSummaryBucket = []byte("state-summary")
|
||||
proposerSlashingsBucket = []byte("proposer-slashings")
|
||||
attesterSlashingsBucket = []byte("attester-slashings")
|
||||
voluntaryExitsBucket = []byte("voluntary-exits")
|
||||
chainMetadataBucket = []byte("chain-metadata")
|
||||
checkpointBucket = []byte("check-point")
|
||||
powchainBucket = []byte("powchain")
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
blocksBucket = []byte("blocks")
|
||||
stateBucket = []byte("state")
|
||||
stateSummaryBucket = []byte("state-summary")
|
||||
chainMetadataBucket = []byte("chain-metadata")
|
||||
checkpointBucket = []byte("check-point")
|
||||
powchainBucket = []byte("powchain")
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
|
||||
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
|
||||
slotsHasObjectBucket = []byte("slots-has-objects")
|
||||
@@ -28,16 +23,11 @@ var (
|
||||
archivedRootBucket = []byte("archived-index-root")
|
||||
|
||||
// Key indices buckets.
|
||||
blockParentRootIndicesBucket = []byte("block-parent-root-indices")
|
||||
blockSlotIndicesBucket = []byte("block-slot-indices")
|
||||
stateSlotIndicesBucket = []byte("state-slot-indices")
|
||||
attestationHeadBlockRootBucket = []byte("attestation-head-block-root-indices")
|
||||
attestationSourceRootIndicesBucket = []byte("attestation-source-root-indices")
|
||||
attestationSourceEpochIndicesBucket = []byte("attestation-source-epoch-indices")
|
||||
attestationTargetRootIndicesBucket = []byte("attestation-target-root-indices")
|
||||
attestationTargetEpochIndicesBucket = []byte("attestation-target-epoch-indices")
|
||||
finalizedBlockRootsIndexBucket = []byte("finalized-block-roots-index")
|
||||
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
|
||||
blockParentRootIndicesBucket = []byte("block-parent-root-indices")
|
||||
blockSlotIndicesBucket = []byte("block-slot-indices")
|
||||
stateSlotIndicesBucket = []byte("state-slot-indices")
|
||||
finalizedBlockRootsIndexBucket = []byte("finalized-block-roots-index")
|
||||
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
|
||||
|
||||
// Specific item keys.
|
||||
headBlockRootKey = []byte("head-root")
|
||||
@@ -69,9 +59,6 @@ var (
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
savedStateSlotsKey = []byte("saved-state-slots")
|
||||
|
||||
// New state management service compatibility bucket.
|
||||
newStateServiceCompatibleBucket = []byte("new-state-compatible")
|
||||
|
||||
// Migrations
|
||||
migrationsBucket = []byte("migrations")
|
||||
)
|
||||
|
||||
@@ -22,7 +22,14 @@ func Restore(cliCtx *cli.Context) error {
|
||||
targetDir := cliCtx.String(cmd.RestoreTargetDirFlag.Name)
|
||||
|
||||
restoreDir := path.Join(targetDir, kv.BeaconNodeDbDirName)
|
||||
if file.Exists(path.Join(restoreDir, kv.DatabaseFileName)) {
|
||||
restoreFile := path.Join(restoreDir, kv.DatabaseFileName)
|
||||
|
||||
dbExists, err := file.Exists(restoreFile, file.Regular)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check if database exists in %s", restoreFile)
|
||||
}
|
||||
|
||||
if dbExists {
|
||||
resp, err := prompt.ValidatePrompt(
|
||||
os.Stdin, dbExistsYesNoPrompt, prompt.ValidateYesOrNo,
|
||||
)
|
||||
|
||||
@@ -48,7 +48,6 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -23,6 +23,10 @@ import (
|
||||
const (
|
||||
attestationRecordKeySize = 32 // Bytes.
|
||||
rootSize = 32 // Bytes.
|
||||
|
||||
// For database performance reasons, database read/write operations
|
||||
// are chunked into batches of maximum `batchSize` elements.
|
||||
batchSize = 10_000
|
||||
)
|
||||
|
||||
// LastEpochWrittenForValidators given a list of validator indices returns the latest
|
||||
@@ -259,14 +263,23 @@ func (s *Store) AttestationRecordForValidator(
|
||||
// then only the first one is (arbitrarily) saved in the `attestationDataRootsBucket` bucket.
|
||||
func (s *Store) SaveAttestationRecordsForValidators(
|
||||
ctx context.Context,
|
||||
attestations []*slashertypes.IndexedAttestationWrapper,
|
||||
attWrappers []*slashertypes.IndexedAttestationWrapper,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveAttestationRecordsForValidators")
|
||||
defer span.End()
|
||||
encodedTargetEpoch := make([][]byte, len(attestations))
|
||||
encodedRecords := make([][]byte, len(attestations))
|
||||
|
||||
for i, attestation := range attestations {
|
||||
attWrappersCount := len(attWrappers)
|
||||
|
||||
// If no attestations are provided, skip.
|
||||
if attWrappersCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build encoded target epochs and encoded records
|
||||
encodedTargetEpoch := make([][]byte, attWrappersCount)
|
||||
encodedRecords := make([][]byte, attWrappersCount)
|
||||
|
||||
for i, attestation := range attWrappers {
|
||||
encEpoch := encodeTargetEpoch(attestation.IndexedAttestation.Data.Target.Epoch)
|
||||
|
||||
value, err := encodeAttestationRecord(attestation)
|
||||
@@ -278,60 +291,115 @@ func (s *Store) SaveAttestationRecordsForValidators(
|
||||
encodedRecords[i] = value
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
dataRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
// Save attestation records in the database by batch.
|
||||
for stop := attWrappersCount; stop >= 0; stop -= batchSize {
|
||||
start := max(0, stop-batchSize)
|
||||
|
||||
for i := len(attestations) - 1; i >= 0; i-- {
|
||||
attestation := attestations[i]
|
||||
attWrappersBatch := attWrappers[start:stop]
|
||||
encodedTargetEpochBatch := encodedTargetEpoch[start:stop]
|
||||
encodedRecordsBatch := encodedRecords[start:stop]
|
||||
|
||||
if err := attRecordsBkt.Put(attestation.DataRoot[:], encodedRecords[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, valIdx := range attestation.IndexedAttestation.AttestingIndices {
|
||||
encIdx := encodeValidatorIndex(primitives.ValidatorIndex(valIdx))
|
||||
|
||||
key := append(encodedTargetEpoch[i], encIdx...)
|
||||
if err := dataRootsBkt.Put(key, attestation.DataRoot[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Perform basic check.
|
||||
if len(encodedTargetEpochBatch) != len(encodedRecordsBatch) {
|
||||
return fmt.Errorf(
|
||||
"cannot save attestation records, got %d target epochs and %d records",
|
||||
len(encodedTargetEpochBatch), len(encodedRecordsBatch),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
currentBatchSize := len(encodedTargetEpochBatch)
|
||||
|
||||
// Save attestation records in the database.
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
dataRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
|
||||
for i := currentBatchSize - 1; i >= 0; i-- {
|
||||
attWrapper := attWrappersBatch[i]
|
||||
dataRoot := attWrapper.DataRoot
|
||||
|
||||
encodedTargetEpoch := encodedTargetEpochBatch[i]
|
||||
encodedRecord := encodedRecordsBatch[i]
|
||||
|
||||
if err := attRecordsBkt.Put(dataRoot[:], encodedRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, validatorIndex := range attWrapper.IndexedAttestation.AttestingIndices {
|
||||
encodedIndex := encodeValidatorIndex(primitives.ValidatorIndex(validatorIndex))
|
||||
|
||||
key := append(encodedTargetEpoch, encodedIndex...)
|
||||
if err := dataRootsBkt.Put(key, dataRoot[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "failed to save attestation records")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadSlasherChunks given a chunk kind and a disk keys, retrieves chunks for a validator
|
||||
// min or max span used by slasher from our database.
|
||||
func (s *Store) LoadSlasherChunks(
|
||||
ctx context.Context, kind slashertypes.ChunkKind, diskKeys [][]byte,
|
||||
ctx context.Context, kind slashertypes.ChunkKind, chunkKeys [][]byte,
|
||||
) ([][]uint16, []bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LoadSlasherChunk")
|
||||
defer span.End()
|
||||
chunks := make([][]uint16, 0)
|
||||
var exists []bool
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(slasherChunksBucket)
|
||||
for _, diskKey := range diskKeys {
|
||||
key := append(ssz.MarshalUint8(make([]byte, 0), uint8(kind)), diskKey...)
|
||||
chunkBytes := bkt.Get(key)
|
||||
if chunkBytes == nil {
|
||||
chunks = append(chunks, []uint16{})
|
||||
exists = append(exists, false)
|
||||
continue
|
||||
|
||||
keysCount := len(chunkKeys)
|
||||
|
||||
chunks := make([][]uint16, 0, keysCount)
|
||||
exists := make([]bool, 0, keysCount)
|
||||
encodedKeys := make([][]byte, 0, keysCount)
|
||||
|
||||
// Encode kind.
|
||||
encodedKind := ssz.MarshalUint8(make([]byte, 0), uint8(kind))
|
||||
|
||||
// Encode keys.
|
||||
for _, chunkKey := range chunkKeys {
|
||||
encodedKey := append(encodedKind, chunkKey...)
|
||||
encodedKeys = append(encodedKeys, encodedKey)
|
||||
}
|
||||
|
||||
// Read chunks from the database by batch.
|
||||
for start := 0; start < keysCount; start += batchSize {
|
||||
stop := min(start+batchSize, len(encodedKeys))
|
||||
encodedKeysBatch := encodedKeys[start:stop]
|
||||
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(slasherChunksBucket)
|
||||
|
||||
for _, encodedKey := range encodedKeysBatch {
|
||||
chunkBytes := bkt.Get(encodedKey)
|
||||
|
||||
if chunkBytes == nil {
|
||||
chunks = append(chunks, []uint16{})
|
||||
exists = append(exists, false)
|
||||
continue
|
||||
}
|
||||
|
||||
chunk, err := decodeSlasherChunk(chunkBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chunks = append(chunks, chunk)
|
||||
exists = append(exists, true)
|
||||
}
|
||||
chunk, err := decodeSlasherChunk(chunkBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
exists = append(exists, true)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return chunks, exists, err
|
||||
}
|
||||
|
||||
return chunks, exists, nil
|
||||
}
|
||||
|
||||
// SaveSlasherChunks given a chunk kind, list of disk keys, and list of chunks,
|
||||
@@ -341,25 +409,60 @@ func (s *Store) SaveSlasherChunks(
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveSlasherChunks")
|
||||
defer span.End()
|
||||
encodedKeys := make([][]byte, len(chunkKeys))
|
||||
encodedChunks := make([][]byte, len(chunkKeys))
|
||||
for i := 0; i < len(chunkKeys); i++ {
|
||||
encodedKeys[i] = append(ssz.MarshalUint8(make([]byte, 0), uint8(kind)), chunkKeys[i]...)
|
||||
encodedChunk, err := encodeSlasherChunk(chunks[i])
|
||||
|
||||
// Ensure we have the same number of keys and chunks.
|
||||
if len(chunkKeys) != len(chunks) {
|
||||
return fmt.Errorf(
|
||||
"cannot save slasher chunks, got %d keys and %d chunks",
|
||||
len(chunkKeys), len(chunks),
|
||||
)
|
||||
}
|
||||
|
||||
chunksCount := len(chunks)
|
||||
|
||||
// Encode kind.
|
||||
encodedKind := ssz.MarshalUint8(make([]byte, 0), uint8(kind))
|
||||
|
||||
// Encode keys and chunks.
|
||||
encodedKeys := make([][]byte, chunksCount)
|
||||
encodedChunks := make([][]byte, chunksCount)
|
||||
|
||||
for i := 0; i < chunksCount; i++ {
|
||||
chunkKey, chunk := chunkKeys[i], chunks[i]
|
||||
encodedKey := append(encodedKind, chunkKey...)
|
||||
|
||||
encodedChunk, err := encodeSlasherChunk(chunk)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "failed to encode slasher chunk for key %v", chunkKey)
|
||||
}
|
||||
|
||||
encodedKeys[i] = encodedKey
|
||||
encodedChunks[i] = encodedChunk
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(slasherChunksBucket)
|
||||
for i := 0; i < len(chunkKeys); i++ {
|
||||
if err := bkt.Put(encodedKeys[i], encodedChunks[i]); err != nil {
|
||||
return err
|
||||
|
||||
// Save chunks in the database by batch.
|
||||
for start := 0; start < chunksCount; start += batchSize {
|
||||
stop := min(start+batchSize, len(encodedKeys))
|
||||
encodedKeysBatch := encodedKeys[start:stop]
|
||||
encodedChunksBatch := encodedChunks[start:stop]
|
||||
batchSize := len(encodedKeysBatch)
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(slasherChunksBucket)
|
||||
|
||||
for i := 0; i < batchSize; i++ {
|
||||
if err := bkt.Put(encodedKeysBatch[i], encodedChunksBatch[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "failed to save slasher chunks")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckDoubleBlockProposals takes in a list of proposals and for each,
|
||||
|
||||
@@ -14,33 +14,56 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := setupDB(t)
|
||||
valIdx := primitives.ValidatorIndex(1)
|
||||
target := primitives.Epoch(5)
|
||||
source := primitives.Epoch(4)
|
||||
attRecord, err := beaconDB.AttestationRecordForValidator(ctx, valIdx, target)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, attRecord == nil)
|
||||
const attestationsCount = 11_000
|
||||
|
||||
sr := [32]byte{1}
|
||||
err = beaconDB.SaveAttestationRecordsForValidators(
|
||||
ctx,
|
||||
[]*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(source, target, []uint64{uint64(valIdx)}, sr[:]),
|
||||
},
|
||||
)
|
||||
// Create context.
|
||||
ctx := context.Background()
|
||||
|
||||
// Create database.
|
||||
beaconDB := setupDB(t)
|
||||
|
||||
// Define the validator index.
|
||||
validatorIndex := primitives.ValidatorIndex(1)
|
||||
|
||||
// Defines attestations to save and retrieve.
|
||||
attWrappers := make([]*slashertypes.IndexedAttestationWrapper, attestationsCount)
|
||||
for i := 0; i < attestationsCount; i++ {
|
||||
var dataRoot [32]byte
|
||||
binary.LittleEndian.PutUint64(dataRoot[:], uint64(i))
|
||||
|
||||
attWrapper := createAttestationWrapper(
|
||||
primitives.Epoch(i),
|
||||
primitives.Epoch(i+1),
|
||||
[]uint64{uint64(validatorIndex)},
|
||||
dataRoot[:],
|
||||
)
|
||||
|
||||
attWrappers[i] = attWrapper
|
||||
}
|
||||
|
||||
// Check on a sample of validators that no attestation records are available.
|
||||
for i := 0; i < attestationsCount; i += 100 {
|
||||
attRecord, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, attRecord == nil)
|
||||
}
|
||||
|
||||
// Save the attestation records to the database.
|
||||
err := beaconDB.SaveAttestationRecordsForValidators(ctx, attWrappers)
|
||||
require.NoError(t, err)
|
||||
attRecord, err = beaconDB.AttestationRecordForValidator(ctx, valIdx, target)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, target, attRecord.IndexedAttestation.Data.Target.Epoch)
|
||||
assert.DeepEqual(t, source, attRecord.IndexedAttestation.Data.Source.Epoch)
|
||||
assert.DeepEqual(t, sr, attRecord.DataRoot)
|
||||
|
||||
// Check on a sample of validators that attestation records are available.
|
||||
for i := 0; i < attestationsCount; i += 100 {
|
||||
expected := attWrappers[i]
|
||||
actual, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected.IndexedAttestation.Data.Source.Epoch, actual.IndexedAttestation.Data.Source.Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LastEpochWrittenForValidators(t *testing.T) {
|
||||
@@ -138,61 +161,113 @@ func TestStore_CheckAttesterDoubleVotes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_SlasherChunk_SaveRetrieve(t *testing.T) {
|
||||
// Define test parameters.
|
||||
const (
|
||||
elemsPerChunk = 16
|
||||
totalChunks = 11_000
|
||||
)
|
||||
|
||||
// Create context.
|
||||
ctx := context.Background()
|
||||
|
||||
// Create database.
|
||||
beaconDB := setupDB(t)
|
||||
elemsPerChunk := 16
|
||||
totalChunks := 64
|
||||
chunkKeys := make([][]byte, totalChunks)
|
||||
chunks := make([][]uint16, totalChunks)
|
||||
|
||||
// Create min chunk keys and chunks.
|
||||
minChunkKeys := make([][]byte, totalChunks)
|
||||
minChunks := make([][]uint16, totalChunks)
|
||||
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
// Create chunk key.
|
||||
chunkKey := ssz.MarshalUint64(make([]byte, 0), uint64(i))
|
||||
minChunkKeys[i] = chunkKey
|
||||
|
||||
// Create chunk.
|
||||
chunk := make([]uint16, elemsPerChunk)
|
||||
|
||||
for j := 0; j < len(chunk); j++ {
|
||||
chunk[j] = uint16(0)
|
||||
chunk[j] = uint16(i + j)
|
||||
}
|
||||
chunks[i] = chunk
|
||||
chunkKeys[i] = ssz.MarshalUint64(make([]byte, 0), uint64(i))
|
||||
|
||||
minChunks[i] = chunk
|
||||
}
|
||||
|
||||
// We save chunks for min spans.
|
||||
err := beaconDB.SaveSlasherChunks(ctx, slashertypes.MinSpan, chunkKeys, chunks)
|
||||
// Create max chunk keys and chunks.
|
||||
maxChunkKeys := make([][]byte, totalChunks)
|
||||
maxChunks := make([][]uint16, totalChunks)
|
||||
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
// Create chunk key.
|
||||
chunkKey := ssz.MarshalUint64(make([]byte, 0), uint64(i+1))
|
||||
maxChunkKeys[i] = chunkKey
|
||||
|
||||
// Create chunk.
|
||||
chunk := make([]uint16, elemsPerChunk)
|
||||
|
||||
for j := 0; j < len(chunk); j++ {
|
||||
chunk[j] = uint16(i + j + 1)
|
||||
}
|
||||
|
||||
maxChunks[i] = chunk
|
||||
}
|
||||
|
||||
// Save chunks for min spans.
|
||||
err := beaconDB.SaveSlasherChunks(ctx, slashertypes.MinSpan, minChunkKeys, minChunks)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect no chunks to be stored for max spans.
|
||||
// Expect no chunks to be stored for max spans.
|
||||
_, chunksExist, err := beaconDB.LoadSlasherChunks(
|
||||
ctx, slashertypes.MaxSpan, chunkKeys,
|
||||
ctx, slashertypes.MaxSpan, minChunkKeys,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(chunks), len(chunksExist))
|
||||
require.Equal(t, len(minChunks), len(chunksExist))
|
||||
|
||||
for _, exists := range chunksExist {
|
||||
require.Equal(t, false, exists)
|
||||
}
|
||||
|
||||
// We check we saved the right chunks.
|
||||
// Check the right chunks are saved.
|
||||
retrievedChunks, chunksExist, err := beaconDB.LoadSlasherChunks(
|
||||
ctx, slashertypes.MinSpan, chunkKeys,
|
||||
ctx, slashertypes.MinSpan, minChunkKeys,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(chunks), len(retrievedChunks))
|
||||
require.Equal(t, len(chunks), len(chunksExist))
|
||||
require.Equal(t, len(minChunks), len(retrievedChunks))
|
||||
require.Equal(t, len(minChunks), len(chunksExist))
|
||||
|
||||
for i, exists := range chunksExist {
|
||||
require.Equal(t, true, exists)
|
||||
require.DeepEqual(t, chunks[i], retrievedChunks[i])
|
||||
require.DeepEqual(t, minChunks[i], retrievedChunks[i])
|
||||
}
|
||||
|
||||
// We save chunks for max spans.
|
||||
err = beaconDB.SaveSlasherChunks(ctx, slashertypes.MaxSpan, chunkKeys, chunks)
|
||||
// Save chunks for max spans.
|
||||
err = beaconDB.SaveSlasherChunks(ctx, slashertypes.MaxSpan, maxChunkKeys, maxChunks)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We check we saved the right chunks.
|
||||
// Check right chunks are saved.
|
||||
retrievedChunks, chunksExist, err = beaconDB.LoadSlasherChunks(
|
||||
ctx, slashertypes.MaxSpan, chunkKeys,
|
||||
ctx, slashertypes.MaxSpan, maxChunkKeys,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(chunks), len(retrievedChunks))
|
||||
require.Equal(t, len(chunks), len(chunksExist))
|
||||
|
||||
require.Equal(t, len(maxChunks), len(retrievedChunks))
|
||||
require.Equal(t, len(maxChunks), len(chunksExist))
|
||||
|
||||
for i, exists := range chunksExist {
|
||||
require.Equal(t, true, exists)
|
||||
require.DeepEqual(t, chunks[i], retrievedChunks[i])
|
||||
require.DeepEqual(t, maxChunks[i], retrievedChunks[i])
|
||||
}
|
||||
|
||||
// Check the right chunks are still saved for min span.
|
||||
retrievedChunks, chunksExist, err = beaconDB.LoadSlasherChunks(
|
||||
ctx, slashertypes.MinSpan, minChunkKeys,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(minChunks), len(retrievedChunks))
|
||||
require.Equal(t, len(minChunks), len(chunksExist))
|
||||
|
||||
for i, exists := range chunksExist {
|
||||
require.Equal(t, true, exists)
|
||||
require.DeepEqual(t, minChunks[i], retrievedChunks[i])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -269,7 +269,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ChainStartTime": chainStartTime,
|
||||
"chainStartTime": chainStartTime,
|
||||
}).Info("Minimum number of validators reached for beacon-chain to start")
|
||||
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.ChainStarted,
|
||||
|
||||
@@ -583,6 +583,9 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
s.runError = nil
|
||||
|
||||
s.initPOWService()
|
||||
// Do not keep storing the finalized state as it is
|
||||
// no longer of use.
|
||||
s.removeStartupState()
|
||||
|
||||
chainstartTicker := time.NewTicker(logPeriod)
|
||||
defer chainstartTicker.Stop()
|
||||
@@ -636,7 +639,7 @@ func (s *Service) logTillChainStart(ctx context.Context) {
|
||||
}
|
||||
|
||||
fields := logrus.Fields{
|
||||
"Additional validators needed": valNeeded,
|
||||
"additionalValidatorsNeeded": valNeeded,
|
||||
}
|
||||
if secondsLeft > 0 {
|
||||
fields["Generating genesis state in"] = time.Duration(secondsLeft) * time.Second
|
||||
@@ -910,3 +913,7 @@ func (s *Service) migrateOldDepositTree(eth1DataInDB *ethpb.ETH1ChainData) error
|
||||
s.depositTrie = newDepositTrie
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) removeStartupState() {
|
||||
s.cfg.finalizedStateAtStartup = nil
|
||||
}
|
||||
|
||||
@@ -82,6 +82,15 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// Return early if we are checking before 10 seconds into the slot
|
||||
secs, err := slots.SecondsSinceSlotStart(head.slot, f.store.genesisTime, uint64(time.Now().Unix()))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check current slot")
|
||||
return true
|
||||
}
|
||||
if secs < ProcessAttestationsThreshold {
|
||||
return true
|
||||
}
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return
|
||||
|
||||
@@ -85,11 +85,19 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent = saved
|
||||
})
|
||||
t.Run("parent is weak", func(t *testing.T) {
|
||||
t.Run("parent is weak early call", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.weight
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, true, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.weight = saved
|
||||
})
|
||||
t.Run("parent is weak late call", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.weight
|
||||
driftGenesisTime(f, 2, 11)
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.weight = saved
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
})
|
||||
t.Run("Head is strong", func(t *testing.T) {
|
||||
f.store.headNode.weight = f.store.committeeWeight
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
// MuxConfig contains configuration that should be used when registering the beacon node in the gateway.
|
||||
type MuxConfig struct {
|
||||
Handler gateway.MuxHandler
|
||||
EthPbMux *gateway.PbMux
|
||||
V1AlphaPbMux *gateway.PbMux
|
||||
}
|
||||
|
||||
@@ -44,11 +44,11 @@ func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.A
|
||||
// logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target)
|
||||
func logMessageTimelyFlagsForIndex(idx primitives.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": data.Slot,
|
||||
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
|
||||
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
|
||||
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
|
||||
"validatorIndex": idx,
|
||||
"slot": data.Slot,
|
||||
"source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
|
||||
"target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
|
||||
"head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,12 +146,12 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
|
||||
aggregatedPerf.totalCorrectTarget++
|
||||
}
|
||||
}
|
||||
logFields["CorrectHead"] = latestPerf.timelyHead
|
||||
logFields["CorrectSource"] = latestPerf.timelySource
|
||||
logFields["CorrectTarget"] = latestPerf.timelyTarget
|
||||
logFields["InclusionSlot"] = latestPerf.inclusionSlot
|
||||
logFields["NewBalance"] = balance
|
||||
logFields["BalanceChange"] = balanceChg
|
||||
logFields["correctHead"] = latestPerf.timelyHead
|
||||
logFields["correctSource"] = latestPerf.timelySource
|
||||
logFields["correctTarget"] = latestPerf.timelyTarget
|
||||
logFields["inclusionSlot"] = latestPerf.inclusionSlot
|
||||
logFields["newBalance"] = balance
|
||||
logFields["balanceChange"] = balanceChg
|
||||
|
||||
s.latestPerformance[primitives.ValidatorIndex(idx)] = latestPerf
|
||||
s.aggregatedPerformance[primitives.ValidatorIndex(idx)] = aggregatedPerf
|
||||
@@ -167,7 +167,7 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb
|
||||
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if st == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping unaggregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
@@ -190,13 +190,13 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
|
||||
defer s.Unlock()
|
||||
if s.trackedIndex(att.AggregatorIndex) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"AggregatorIndex": att.AggregatorIndex,
|
||||
"Slot": att.Aggregate.Data.Slot,
|
||||
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
"aggregatorIndex": att.AggregatorIndex,
|
||||
"slot": att.Aggregate.Data.Slot,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.BeaconBlockRoot)),
|
||||
"SourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
"sourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.Source.Root)),
|
||||
"TargetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.Target.Root)),
|
||||
}).Info("Processed attestation aggregation")
|
||||
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
|
||||
@@ -209,7 +209,7 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
|
||||
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
|
||||
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if st == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping aggregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -55,8 +55,8 @@ func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processIncludedAttestation(context.Background(), state, att)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
|
||||
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
@@ -124,8 +124,8 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processUnaggregatedAttestation(context.Background(), att)
|
||||
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
wanted1 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
|
||||
wanted2 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
|
||||
},
|
||||
}
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x000000000000 prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
|
||||
require.LogsContain(t, hook, "Skipping aggregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
@@ -200,9 +200,9 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
||||
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x68656c6c6f2d prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12")
|
||||
}
|
||||
|
||||
func TestProcessAttestations(t *testing.T) {
|
||||
@@ -240,8 +240,8 @@ func TestProcessAttestations(t *testing.T) {
|
||||
wrappedBlock, err := blocks.NewBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.processAttestations(ctx, state, wrappedBlock)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
|
||||
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
|
||||
}
|
||||
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if st == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping block collection due to state not found in cache")
|
||||
return
|
||||
}
|
||||
@@ -90,13 +90,13 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b
|
||||
|
||||
parentRoot := blk.ParentRoot()
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": blk.ProposerIndex(),
|
||||
"Slot": blk.Slot(),
|
||||
"Version": blk.Version(),
|
||||
"ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
|
||||
"BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"NewBalance": balance,
|
||||
"BalanceChange": balanceChg,
|
||||
"proposerIndex": blk.ProposerIndex(),
|
||||
"slot": blk.Slot(),
|
||||
"version": blk.Version(),
|
||||
"parentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"newBalance": balance,
|
||||
"balanceChange": balanceChg,
|
||||
}).Info("Proposed beacon block was included")
|
||||
}
|
||||
}
|
||||
@@ -109,11 +109,11 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
idx := slashing.Header_1.Header.ProposerIndex
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": idx,
|
||||
"Slot": blk.Slot(),
|
||||
"SlashingSlot": slashing.Header_1.Header.Slot,
|
||||
"BodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
|
||||
"BodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
|
||||
"proposerIndex": idx,
|
||||
"slot": blk.Slot(),
|
||||
"slashingSlot": slashing.Header_1.Header.Slot,
|
||||
"bodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
|
||||
"bodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
|
||||
}).Info("Proposer slashing was included")
|
||||
}
|
||||
}
|
||||
@@ -122,16 +122,16 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
|
||||
if s.trackedIndex(primitives.ValidatorIndex(idx)) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"AttesterIndex": idx,
|
||||
"BlockInclusionSlot": blk.Slot(),
|
||||
"AttestationSlot1": slashing.Attestation_1.Data.Slot,
|
||||
"BeaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
|
||||
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
|
||||
"AttestationSlot2": slashing.Attestation_2.Data.Slot,
|
||||
"BeaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
|
||||
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
|
||||
"attesterIndex": idx,
|
||||
"blockInclusionSlot": blk.Slot(),
|
||||
"attestationSlot1": slashing.Attestation_1.Data.Slot,
|
||||
"beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
|
||||
"sourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
|
||||
"targetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
|
||||
"attestationSlot2": slashing.Attestation_2.Data.Slot,
|
||||
"beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
|
||||
"sourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
|
||||
"targetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
|
||||
}).Info("Attester slashing was included")
|
||||
}
|
||||
}
|
||||
@@ -159,19 +159,19 @@ func (s *Service) logAggregatedPerformance() {
|
||||
percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"StartEpoch": p.startEpoch,
|
||||
"StartBalance": p.startBalance,
|
||||
"TotalRequested": p.totalRequestedCount,
|
||||
"AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
|
||||
"BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
|
||||
"CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
|
||||
"CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
|
||||
"CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
|
||||
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
|
||||
"TotalProposedBlocks": p.totalProposedCount,
|
||||
"TotalAggregations": p.totalAggregations,
|
||||
"TotalSyncContributions": p.totalSyncCommitteeContributions,
|
||||
"validatorIndex": idx,
|
||||
"startEpoch": p.startEpoch,
|
||||
"startBalance": p.startBalance,
|
||||
"totalRequested": p.totalRequestedCount,
|
||||
"attestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
|
||||
"balanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
|
||||
"correctlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
|
||||
"correctlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
|
||||
"correctlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
|
||||
"averageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
|
||||
"totalProposedBlocks": p.totalProposedCount,
|
||||
"totalAggregations": p.totalAggregations,
|
||||
"totalSyncContributions": p.totalSyncCommitteeContributions,
|
||||
}).Info("Aggregated performance since launch")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestProcessSlashings(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Proposer slashing was included\" BodyRoot1= BodyRoot2= ProposerIndex=2",
|
||||
wantedErr: "\"Proposer slashing was included\" bodyRoot1= bodyRoot2= prefix=monitor proposerIndex=2",
|
||||
},
|
||||
{
|
||||
name: "Proposer slashing an untracked index",
|
||||
@@ -89,8 +89,8 @@ func TestProcessSlashings(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Attester slashing was included\" AttestationSlot1=0 AttestationSlot2=0 AttesterIndex=1 " +
|
||||
"BeaconBlockRoot1=0x000000000000 BeaconBlockRoot2=0x000000000000 BlockInclusionSlot=0 SourceEpoch1=1 SourceEpoch2=0 TargetEpoch1=0 TargetEpoch2=0",
|
||||
wantedErr: "\"Attester slashing was included\" attestationSlot1=0 attestationSlot2=0 attesterIndex=1 " +
|
||||
"beaconBlockRoot1=0x000000000000 beaconBlockRoot2=0x000000000000 blockInclusionSlot=0 prefix=monitor sourceEpoch1=1 sourceEpoch2=0 targetEpoch1=0 targetEpoch2=0",
|
||||
},
|
||||
{
|
||||
name: "Attester slashing untracked index",
|
||||
@@ -150,7 +150,7 @@ func TestProcessProposedBlock(t *testing.T) {
|
||||
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
},
|
||||
wantedErr: "\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=0x68656c6c6f2d NewBalance=32000000000 ParentRoot=0x68656c6c6f2d ProposerIndex=12 Slot=6 Version=0 prefix=monitor",
|
||||
wantedErr: "\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=0x68656c6c6f2d newBalance=32000000000 parentRoot=0x68656c6c6f2d prefix=monitor proposerIndex=12 slot=6 version=0",
|
||||
},
|
||||
{
|
||||
name: "Block proposed by untracked validator",
|
||||
@@ -225,10 +225,10 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
|
||||
root, err := b.GetBlock().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
|
||||
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" BodyRoot1=0x000100000000 BodyRoot2=0x000200000000 ProposerIndex=%d SlashingSlot=0 Slot=1 prefix=monitor", idx)
|
||||
wanted3 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=3 ExpectedContribCount=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor"
|
||||
wanted4 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor"
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
|
||||
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" bodyRoot1=0x000100000000 bodyRoot2=0x000200000000 prefix=monitor proposerIndex=%d slashingSlot=0 slot=1", idx)
|
||||
wanted3 := "\"Sync committee contribution included\" balanceChange=0 contribCount=3 expectedContribCount=3 newBalance=32000000000 prefix=monitor validatorIndex=1"
|
||||
wanted4 := "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=1 newBalance=32000000000 prefix=monitor validatorIndex=2"
|
||||
wrapped, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
s.processBlock(ctx, wrapped)
|
||||
@@ -278,10 +278,10 @@ func TestLogAggregatedPerformance(t *testing.T) {
|
||||
}
|
||||
|
||||
s.logAggregatedPerformance()
|
||||
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" +
|
||||
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " +
|
||||
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " +
|
||||
"StartEpoch=0 TotalAggregations=0 TotalProposedBlocks=1 TotalRequested=15 TotalSyncContributions=0 " +
|
||||
"ValidatorIndex=1 prefix=monitor"
|
||||
wanted := "\"Aggregated performance since launch\" attestationInclusion=\"80.00%\"" +
|
||||
" averageInclusionDistance=1.2 balanceChangePct=\"0.95%\" correctlyVotedHeadPct=\"66.67%\" " +
|
||||
"correctlyVotedSourcePct=\"91.67%\" correctlyVotedTargetPct=\"100.00%\" prefix=monitor startBalance=31700000000 " +
|
||||
"startEpoch=0 totalAggregations=0 totalProposedBlocks=1 totalRequested=15 totalSyncContributions=0 " +
|
||||
"validatorIndex=1"
|
||||
require.LogsContain(t, hook, wanted)
|
||||
}
|
||||
|
||||
@@ -14,8 +14,8 @@ func (s *Service) processExitsFromBlock(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
idx := exit.Exit.ValidatorIndex
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": blk.Slot(),
|
||||
"validatorIndex": idx,
|
||||
"slot": blk.Slot(),
|
||||
}).Info("Voluntary exit was included")
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
|
||||
defer s.RUnlock()
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"validatorIndex": idx,
|
||||
}).Info("Voluntary exit was processed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
|
||||
wb, err := blocks.NewBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.processExitsFromBlock(wb)
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was included\" Slot=0 ValidatorIndex=2")
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was included\" prefix=monitor slot=0 validatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
|
||||
@@ -99,7 +99,7 @@ func TestProcessExitP2PTrackedIndices(t *testing.T) {
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
s.processExit(exit)
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was processed\" ValidatorIndex=1")
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was processed\" prefix=monitor validatorIndex=1")
|
||||
}
|
||||
|
||||
func TestProcessExitP2PUntrackedIndices(t *testing.T) {
|
||||
|
||||
@@ -21,7 +21,7 @@ func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedCon
|
||||
aggPerf.totalSyncCommitteeAggregations++
|
||||
s.aggregatedPerformance[idx] = aggPerf
|
||||
|
||||
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
|
||||
log.WithField("validatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,11 +69,11 @@ func (s *Service) processSyncAggregate(state state.BeaconState, blk interfaces.R
|
||||
fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib))
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": validatorIdx,
|
||||
"ExpectedContribCount": len(committeeIndices),
|
||||
"ContribCount": contrib,
|
||||
"NewBalance": balance,
|
||||
"BalanceChange": balanceChg,
|
||||
"validatorIndex": validatorIdx,
|
||||
"expectedContribCount": len(committeeIndices),
|
||||
"contribCount": contrib,
|
||||
"newBalance": balance,
|
||||
"balanceChange": balanceChg,
|
||||
}).Info("Sync committee contribution included")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,8 +22,8 @@ func TestProcessSyncCommitteeContribution(t *testing.T) {
|
||||
}
|
||||
|
||||
s.processSyncCommitteeContribution(contrib)
|
||||
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1")
|
||||
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
|
||||
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" prefix=monitor validatorIndex=1")
|
||||
require.LogsDoNotContain(t, hook, "validatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessSyncAggregate(t *testing.T) {
|
||||
@@ -53,7 +53,7 @@ func TestProcessSyncAggregate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s.processSyncAggregate(beaconState, wrappedBlock)
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 ContribCount=2 ExpectedContribCount=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=4 newBalance=32000000000 prefix=monitor validatorIndex=1")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=100000000 contribCount=2 expectedContribCount=2 newBalance=32000000000 prefix=monitor validatorIndex=12")
|
||||
require.LogsDoNotContain(t, hook, "validatorIndex=2")
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ func (s *Service) Start() {
|
||||
sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] })
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndices": tracked,
|
||||
"validatorIndices": tracked,
|
||||
}).Info("Starting service")
|
||||
|
||||
go s.run()
|
||||
@@ -134,7 +134,7 @@ func (s *Service) run() {
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
log.WithField("Epoch", epoch).Info("Synced to head epoch, starting reporting performance")
|
||||
log.WithField("epoch", epoch).Info("Synced to head epoch, starting reporting performance")
|
||||
|
||||
s.Lock()
|
||||
s.initializePerformanceStructures(st, epoch)
|
||||
@@ -157,7 +157,7 @@ func (s *Service) initializePerformanceStructures(state state.BeaconState, epoch
|
||||
for idx := range s.TrackedValidators {
|
||||
balance, err := state.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("ValidatorIndex", idx).Error(
|
||||
log.WithError(err).WithField("validatorIndex", idx).Error(
|
||||
"Could not fetch starting balance, skipping aggregated logs.")
|
||||
balance = 0
|
||||
}
|
||||
@@ -276,7 +276,7 @@ func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) {
|
||||
for idx := range s.TrackedValidators {
|
||||
syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("ValidatorIndex", idx).Error(
|
||||
log.WithError(err).WithField("validatorIndex", idx).Error(
|
||||
"Sync committee assignments will not be reported")
|
||||
delete(s.trackedSyncCommitteeIndices, idx)
|
||||
} else if len(syncIdx) == 0 {
|
||||
|
||||
@@ -148,7 +148,7 @@ func TestStart(t *testing.T) {
|
||||
// wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
|
||||
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"")
|
||||
require.LogsContain(t, hook, "\"Starting service\" prefix=monitor validatorIndices=\"[1 2 12 15]\"")
|
||||
s.Lock()
|
||||
require.Equal(t, s.isLogging, true, "monitor is not running")
|
||||
s.Unlock()
|
||||
@@ -237,7 +237,7 @@ func TestMonitorRoutine(t *testing.T) {
|
||||
|
||||
// Wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
|
||||
}
|
||||
|
||||
@@ -151,19 +151,19 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalTotalDifficulty = cliCtx.String(flags.TerminalTotalDifficultyOverride.Name)
|
||||
log.WithField("terminal block difficult", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
|
||||
log.WithField("terminalBlockDifficulty", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name))
|
||||
log.WithField("terminal block hash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
|
||||
log.WithField("terminalBlockHash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHashActivationEpoch = primitives.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
|
||||
log.WithField("terminal block hash activation epoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
|
||||
log.WithField("terminalBlockHashActivationEpoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
|
||||
|
||||
@@ -118,6 +118,7 @@ type BeaconNode struct {
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
blobRetentionEpochs primitives.Epoch
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
@@ -209,6 +210,16 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Allow tests to set it as an opt.
|
||||
if beacon.BlobStorage == nil {
|
||||
beacon.BlobStorageOptions = append(beacon.BlobStorageOptions, filesystem.WithSaveFsync(features.Get().BlobSaveFsync))
|
||||
blobs, err := filesystem.NewBlobStorage(beacon.BlobStorageOptions...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, err
|
||||
@@ -326,6 +337,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
}
|
||||
beacon.collector = c
|
||||
|
||||
// Do not store the finalized state as it has been provided to the respective services during
|
||||
// their initialization.
|
||||
beacon.finalizedStateAtStartUp = nil
|
||||
|
||||
return beacon, nil
|
||||
}
|
||||
func initSyncWaiter(ctx context.Context, complete chan struct{}) func() error {
|
||||
@@ -422,7 +437,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
@@ -525,7 +540,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
@@ -987,7 +1002,6 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
|
||||
apigateway.WithGatewayAddr(gatewayAddress),
|
||||
apigateway.WithRemoteAddr(selfAddress),
|
||||
apigateway.WithPbHandlers(muxs),
|
||||
apigateway.WithMuxHandler(gatewayConfig.Handler),
|
||||
apigateway.WithRemoteCert(selfCert),
|
||||
apigateway.WithMaxCallRecvMsgSize(maxCallSize),
|
||||
apigateway.WithAllowedOrigins(allowedOrigins),
|
||||
|
||||
@@ -43,6 +43,15 @@ func WithBlobStorage(bs *filesystem.BlobStorage) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobStorageOptions appends 1 or more filesystem.BlobStorageOption on the beacon node,
|
||||
// to be used when initializing blob storage.
|
||||
func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.BlobStorageOptions = append(bn.BlobStorageOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobRetentionEpochs sets the blobRetentionEpochs value, used in kv store initialization.
|
||||
func WithBlobRetentionEpochs(e primitives.Epoch) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
|
||||
@@ -42,7 +42,7 @@ func (s *Service) prepareForkChoiceAtts() {
|
||||
switch slotInterval.Interval {
|
||||
case 0:
|
||||
duration := time.Since(t)
|
||||
log.WithField("Duration", duration).Debug("Aggregated unaggregated attestations")
|
||||
log.WithField("duration", duration).Debug("Aggregated unaggregated attestations")
|
||||
batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds()))
|
||||
case 1:
|
||||
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"endpoints.go",
|
||||
"log.go",
|
||||
"service.go",
|
||||
],
|
||||
@@ -58,6 +59,9 @@ go_library(
|
||||
"@com_github_grpc_ecosystem_go_grpc_middleware//tracing/opentracing:go_default_library",
|
||||
"@com_github_grpc_ecosystem_go_grpc_prometheus//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promhttp:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//plugin/ocgrpc:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
@@ -70,23 +74,14 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = ["service_test.go"],
|
||||
srcs = [
|
||||
"endpoints_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/blob:go_default_library",
|
||||
"//beacon-chain/rpc/eth/builder:go_default_library",
|
||||
"//beacon-chain/rpc/eth/debug:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//beacon-chain/rpc/eth/light-client:go_default_library",
|
||||
"//beacon-chain/rpc/eth/node:go_default_library",
|
||||
"//beacon-chain/rpc/eth/rewards:go_default_library",
|
||||
"//beacon-chain/rpc/eth/validator:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/node:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/validator:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
780
beacon-chain/rpc/endpoints.go
Normal file
780
beacon-chain/rpc/endpoints.go
Normal file
@@ -0,0 +1,780 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/blob"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/config"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/debug"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/events"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/node"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/rewards"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/lookup"
|
||||
beaconprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/beacon"
|
||||
nodeprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/node"
|
||||
validatorv1alpha1 "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
validatorprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
type endpoint struct {
|
||||
template string
|
||||
name string
|
||||
handler http.HandlerFunc
|
||||
methods []string
|
||||
}
|
||||
|
||||
func (s *Service) endpoints(
|
||||
enableDebug bool,
|
||||
blocker lookup.Blocker,
|
||||
stater lookup.Stater,
|
||||
rewardFetcher rewards.BlockRewardsFetcher,
|
||||
validatorServer *validatorv1alpha1.Server,
|
||||
coreService *core.Service,
|
||||
ch *stategen.CanonicalHistory,
|
||||
) []endpoint {
|
||||
endpoints := make([]endpoint, 0)
|
||||
endpoints = append(endpoints, s.rewardsEndpoints(blocker, stater, rewardFetcher)...)
|
||||
endpoints = append(endpoints, s.builderEndpoints(stater)...)
|
||||
endpoints = append(endpoints, s.blobEndpoints(blocker)...)
|
||||
endpoints = append(endpoints, s.validatorEndpoints(validatorServer, stater, coreService, rewardFetcher)...)
|
||||
endpoints = append(endpoints, s.nodeEndpoints()...)
|
||||
endpoints = append(endpoints, s.beaconEndpoints(ch, stater, blocker, validatorServer, coreService)...)
|
||||
endpoints = append(endpoints, s.configEndpoints()...)
|
||||
endpoints = append(endpoints, s.lightClientEndpoints(blocker, stater)...)
|
||||
endpoints = append(endpoints, s.eventsEndpoints()...)
|
||||
endpoints = append(endpoints, s.prysmBeaconEndpoints(ch, stater)...)
|
||||
endpoints = append(endpoints, s.prysmNodeEndpoints()...)
|
||||
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService, stater)...)
|
||||
if enableDebug {
|
||||
endpoints = append(endpoints, s.debugEndpoints(stater)...)
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater, rewardFetcher rewards.BlockRewardsFetcher) []endpoint {
|
||||
server := &rewards.Server{
|
||||
Blocker: blocker,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
Stater: stater,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
BlockRewardFetcher: rewardFetcher,
|
||||
}
|
||||
|
||||
const namespace = "rewards"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/eth/v1/beacon/rewards/blocks/{block_id}",
|
||||
name: namespace + ".BlockRewards",
|
||||
handler: server.BlockRewards,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/rewards/attestations/{epoch}",
|
||||
name: namespace + ".AttestationRewards",
|
||||
handler: server.AttestationRewards,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/rewards/sync_committee/{block_id}",
|
||||
name: namespace + ".SyncCommitteeRewards",
|
||||
handler: server.SyncCommitteeRewards,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
|
||||
server := &builder.Server{
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
Stater: stater,
|
||||
}
|
||||
|
||||
const namespace = "builder"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/eth/v1/builder/states/{state_id}/expected_withdrawals",
|
||||
name: namespace + ".ExpectedWithdrawals",
|
||||
handler: server.ExpectedWithdrawals,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
server := &blob.Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
const namespace = "blob"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/eth/v1/beacon/blob_sidecars/{block_id}",
|
||||
name: namespace + ".Blobs",
|
||||
handler: server.Blobs,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) validatorEndpoints(
|
||||
validatorServer *validatorv1alpha1.Server,
|
||||
stater lookup.Stater,
|
||||
coreService *core.Service,
|
||||
rewardFetcher rewards.BlockRewardsFetcher,
|
||||
) []endpoint {
|
||||
server := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
V1Alpha1Server: validatorServer,
|
||||
Stater: stater,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
BlockBuilder: s.cfg.BlockBuilder,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
|
||||
PayloadIDCache: s.cfg.PayloadIDCache,
|
||||
CoreService: coreService,
|
||||
BlockRewardFetcher: rewardFetcher,
|
||||
}
|
||||
|
||||
const namespace = "validator"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/eth/v1/validator/aggregate_attestation",
|
||||
name: namespace + ".GetAggregateAttestation",
|
||||
handler: server.GetAggregateAttestation,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/contribution_and_proofs",
|
||||
name: namespace + ".SubmitContributionAndProofs",
|
||||
handler: server.SubmitContributionAndProofs,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/aggregate_and_proofs",
|
||||
name: namespace + ".SubmitAggregateAndProofs",
|
||||
handler: server.SubmitAggregateAndProofs,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/sync_committee_contribution",
|
||||
name: namespace + ".ProduceSyncCommitteeContribution",
|
||||
handler: server.ProduceSyncCommitteeContribution,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/sync_committee_subscriptions",
|
||||
name: namespace + ".SubmitSyncCommitteeSubscription",
|
||||
handler: server.SubmitSyncCommitteeSubscription,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/beacon_committee_subscriptions",
|
||||
name: namespace + ".SubmitBeaconCommitteeSubscription",
|
||||
handler: server.SubmitBeaconCommitteeSubscription,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/attestation_data",
|
||||
name: namespace + ".GetAttestationData",
|
||||
handler: server.GetAttestationData,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/register_validator",
|
||||
name: namespace + ".RegisterValidator",
|
||||
handler: server.RegisterValidator,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/duties/attester/{epoch}",
|
||||
name: namespace + ".GetAttesterDuties",
|
||||
handler: server.GetAttesterDuties,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/duties/proposer/{epoch}",
|
||||
name: namespace + ".GetProposerDuties",
|
||||
handler: server.GetProposerDuties,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/duties/sync/{epoch}",
|
||||
name: namespace + ".GetSyncCommitteeDuties",
|
||||
handler: server.GetSyncCommitteeDuties,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/prepare_beacon_proposer",
|
||||
name: namespace + ".PrepareBeaconProposer",
|
||||
handler: server.PrepareBeaconProposer,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/liveness/{epoch}",
|
||||
name: namespace + ".GetLiveness",
|
||||
handler: server.GetLiveness,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v2/validator/blocks/{slot}",
|
||||
name: namespace + ".ProduceBlockV2",
|
||||
handler: server.ProduceBlockV2,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/blinded_blocks/{slot}",
|
||||
name: namespace + ".ProduceBlindedBlock",
|
||||
handler: server.ProduceBlindedBlock,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v3/validator/blocks/{slot}",
|
||||
name: namespace + ".ProduceBlockV3",
|
||||
handler: server.ProduceBlockV3,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/beacon_committee_selections",
|
||||
name: namespace + ".BeaconCommitteeSelections",
|
||||
handler: server.BeaconCommitteeSelections,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/sync_committee_selections",
|
||||
name: namespace + ".SyncCommittee Selections",
|
||||
handler: server.SyncCommitteeSelections,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) nodeEndpoints() []endpoint {
|
||||
server := &node.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
MetadataProvider: s.cfg.MetadataProvider,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
}
|
||||
|
||||
const namespace = "node"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/eth/v1/node/syncing",
|
||||
name: namespace + ".GetSyncStatus",
|
||||
handler: server.GetSyncStatus,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/node/identity",
|
||||
name: namespace + ".GetIdentity",
|
||||
handler: server.GetIdentity,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/node/peers/{peer_id}",
|
||||
name: namespace + ".GetPeer",
|
||||
handler: server.GetPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/node/peers",
|
||||
name: namespace + ".GetPeers",
|
||||
handler: server.GetPeers,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/node/peer_count",
|
||||
name: namespace + ".GetPeerCount",
|
||||
handler: server.GetPeerCount,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/node/version",
|
||||
name: namespace + ".GetVersion",
|
||||
handler: server.GetVersion,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/node/health",
|
||||
name: namespace + ".GetHealth",
|
||||
handler: server.GetHealth,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) beaconEndpoints(
|
||||
ch *stategen.CanonicalHistory,
|
||||
stater lookup.Stater,
|
||||
blocker lookup.Blocker,
|
||||
validatorServer *validatorv1alpha1.Server,
|
||||
coreService *core.Service,
|
||||
) []endpoint {
|
||||
server := &beacon.Server{
|
||||
CanonicalHistory: ch,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlockNotifier: s.cfg.BlockNotifier,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
StateGenService: s.cfg.StateGen,
|
||||
Stater: stater,
|
||||
Blocker: blocker,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
VoluntaryExitsPool: s.cfg.ExitPool,
|
||||
V1Alpha1ValidatorServer: validatorServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
ExecutionPayloadReconstructor: s.cfg.ExecutionPayloadReconstructor,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
CoreService: coreService,
|
||||
}
|
||||
|
||||
const namespace = "beacon"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/committees",
|
||||
name: namespace + ".GetCommittees",
|
||||
handler: server.GetCommittees,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/fork",
|
||||
name: namespace + ".GetStateFork",
|
||||
handler: server.GetStateFork,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/root",
|
||||
name: namespace + ".GetStateRoot",
|
||||
handler: server.GetStateRoot,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/sync_committees",
|
||||
name: namespace + ".GetSyncCommittees",
|
||||
handler: server.GetSyncCommittees,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/randao",
|
||||
name: namespace + ".GetRandao",
|
||||
handler: server.GetRandao,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/blocks",
|
||||
name: namespace + ".PublishBlock",
|
||||
handler: server.PublishBlock,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/blinded_blocks",
|
||||
name: namespace + ".PublishBlindedBlock",
|
||||
handler: server.PublishBlindedBlock,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v2/beacon/blocks",
|
||||
name: namespace + ".PublishBlockV2",
|
||||
handler: server.PublishBlockV2,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v2/beacon/blinded_blocks",
|
||||
name: namespace + ".PublishBlindedBlockV2",
|
||||
handler: server.PublishBlindedBlockV2,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v2/beacon/blocks/{block_id}",
|
||||
name: namespace + ".GetBlockV2",
|
||||
handler: server.GetBlockV2,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/blocks/{block_id}/attestations",
|
||||
name: namespace + ".GetBlockAttestations",
|
||||
handler: server.GetBlockAttestations,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/blinded_blocks/{block_id}",
|
||||
name: namespace + ".GetBlindedBlock",
|
||||
handler: server.GetBlindedBlock,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/blocks/{block_id}/root",
|
||||
name: namespace + ".GetBlockRoot",
|
||||
handler: server.GetBlockRoot,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/attestations",
|
||||
name: namespace + ".ListAttestations",
|
||||
handler: server.ListAttestations,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/attestations",
|
||||
name: namespace + ".SubmitAttestations",
|
||||
handler: server.SubmitAttestations,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/voluntary_exits",
|
||||
name: namespace + ".ListVoluntaryExits",
|
||||
handler: server.ListVoluntaryExits,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/voluntary_exits",
|
||||
name: namespace + ".SubmitVoluntaryExit",
|
||||
handler: server.SubmitVoluntaryExit,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/sync_committees",
|
||||
name: namespace + ".SubmitSyncCommitteeSignatures",
|
||||
handler: server.SubmitSyncCommitteeSignatures,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/bls_to_execution_changes",
|
||||
name: namespace + ".ListBLSToExecutionChanges",
|
||||
handler: server.ListBLSToExecutionChanges,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/bls_to_execution_changes",
|
||||
name: namespace + ".SubmitBLSToExecutionChanges",
|
||||
handler: server.SubmitBLSToExecutionChanges,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/attester_slashings",
|
||||
name: namespace + ".GetAttesterSlashings",
|
||||
handler: server.GetAttesterSlashings,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/attester_slashings",
|
||||
name: namespace + ".SubmitAttesterSlashing",
|
||||
handler: server.SubmitAttesterSlashing,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/proposer_slashings",
|
||||
name: namespace + ".GetProposerSlashings",
|
||||
handler: server.GetProposerSlashings,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/proposer_slashings",
|
||||
name: namespace + ".SubmitProposerSlashing",
|
||||
handler: server.SubmitProposerSlashing,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/headers",
|
||||
name: namespace + ".GetBlockHeaders",
|
||||
handler: server.GetBlockHeaders,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/headers/{block_id}",
|
||||
name: namespace + ".GetBlockHeader",
|
||||
handler: server.GetBlockHeader,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/genesis",
|
||||
name: namespace + ".GetGenesis",
|
||||
handler: server.GetGenesis,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/finality_checkpoints",
|
||||
name: namespace + ".GetFinalityCheckpoints",
|
||||
handler: server.GetFinalityCheckpoints,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/validators",
|
||||
name: namespace + ".GetValidators",
|
||||
handler: server.GetValidators,
|
||||
methods: []string{http.MethodGet, http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/validators/{validator_id}",
|
||||
name: namespace + ".GetValidator",
|
||||
handler: server.GetValidator,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/validator_balances",
|
||||
name: namespace + ".GetValidatorBalances",
|
||||
handler: server.GetValidatorBalances,
|
||||
methods: []string{http.MethodGet, http.MethodPost},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) configEndpoints() []endpoint {
|
||||
const namespace = "config"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/eth/v1/config/deposit_contract",
|
||||
name: namespace + ".GetDepositContract",
|
||||
handler: config.GetDepositContract,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/config/fork_schedule",
|
||||
name: namespace + ".GetForkSchedule",
|
||||
handler: config.GetForkSchedule,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/config/spec",
|
||||
name: namespace + ".GetSpec",
|
||||
handler: config.GetSpec,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Stater) []endpoint {
|
||||
server := &lightclient.Server{
|
||||
Blocker: blocker,
|
||||
Stater: stater,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
}
|
||||
|
||||
const namespace = "lightclient"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/eth/v1/beacon/light_client/bootstrap/{block_root}",
|
||||
name: namespace + ".GetLightClientBootstrap",
|
||||
handler: server.GetLightClientBootstrap,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/light_client/updates",
|
||||
name: namespace + ".GetLightClientUpdatesByRange",
|
||||
handler: server.GetLightClientUpdatesByRange,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/light_client/finality_update",
|
||||
name: namespace + ".GetLightClientFinalityUpdate",
|
||||
handler: server.GetLightClientFinalityUpdate,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/light_client/optimistic_update",
|
||||
name: namespace + ".GetLightClientOptimisticUpdate",
|
||||
handler: server.GetLightClientOptimisticUpdate,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
server := &debug.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
Stater: stater,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
ForkFetcher: s.cfg.ForkFetcher,
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
}
|
||||
|
||||
const namespace = "debug"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/eth/v2/debug/beacon/states/{state_id}",
|
||||
name: namespace + ".GetBeaconStateV2",
|
||||
handler: server.GetBeaconStateV2,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v2/debug/beacon/heads",
|
||||
name: namespace + ".GetForkChoiceHeadsV2",
|
||||
handler: server.GetForkChoiceHeadsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/debug/fork_choice",
|
||||
name: namespace + ".GetForkChoice",
|
||||
handler: server.GetForkChoice,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) eventsEndpoints() []endpoint {
|
||||
server := &events.Server{
|
||||
StateNotifier: s.cfg.StateNotifier,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
}
|
||||
|
||||
const namespace = "events"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/eth/v1/events",
|
||||
name: namespace + ".StreamEvents",
|
||||
handler: server.StreamEvents,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Prysm custom endpoints
|
||||
|
||||
func (s *Service) prysmBeaconEndpoints(ch *stategen.CanonicalHistory, stater lookup.Stater) []endpoint {
|
||||
server := &beaconprysm.Server{
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
CanonicalHistory: ch,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Stater: stater,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
}
|
||||
|
||||
const namespace = "prysm.beacon"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/prysm/v1/beacon/weak_subjectivity",
|
||||
name: namespace + ".GetWeakSubjectivity",
|
||||
handler: server.GetWeakSubjectivity,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/validator_count",
|
||||
name: namespace + ".GetValidatorCount",
|
||||
handler: server.GetValidatorCount,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/prysm/v1/beacon/states/{state_id}/validator_count",
|
||||
name: namespace + ".GetValidatorCount",
|
||||
handler: server.GetValidatorCount,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
server := &nodeprysm.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
MetadataProvider: s.cfg.MetadataProvider,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
}
|
||||
|
||||
const namespace = "prysm.node"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/prysm/node/trusted_peers",
|
||||
name: namespace + ".ListTrustedPeer",
|
||||
handler: server.ListTrustedPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/prysm/v1/node/trusted_peers",
|
||||
name: namespace + ".ListTrustedPeer",
|
||||
handler: server.ListTrustedPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/prysm/node/trusted_peers",
|
||||
name: namespace + ".AddTrustedPeer",
|
||||
handler: server.AddTrustedPeer,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/prysm/v1/node/trusted_peers",
|
||||
name: namespace + ".AddTrustedPeer",
|
||||
handler: server.AddTrustedPeer,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/prysm/node/trusted_peers/{peer_id}",
|
||||
name: namespace + ".RemoveTrustedPeer",
|
||||
handler: server.RemoveTrustedPeer,
|
||||
methods: []string{http.MethodDelete},
|
||||
},
|
||||
{
|
||||
template: "/prysm/v1/node/trusted_peers/{peer_id}",
|
||||
name: namespace + ".RemoveTrustedPeer",
|
||||
handler: server.RemoveTrustedPeer,
|
||||
methods: []string{http.MethodDelete},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) prysmValidatorEndpoints(coreService *core.Service, stater lookup.Stater) []endpoint {
|
||||
server := &validatorprysm.Server{
|
||||
CoreService: coreService,
|
||||
}
|
||||
|
||||
const namespace = "prysm.validator"
|
||||
return []endpoint{
|
||||
{
|
||||
template: "/prysm/validators/performance",
|
||||
name: namespace + ".GetValidatorPerformance",
|
||||
handler: server.GetValidatorPerformance,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
{
|
||||
template: "/prysm/v1/validators/performance",
|
||||
name: namespace + ".GetValidatorPerformance",
|
||||
handler: server.GetValidatorPerformance,
|
||||
methods: []string{http.MethodPost},
|
||||
},
|
||||
}
|
||||
}
|
||||
150
beacon-chain/rpc/endpoints_test.go
Normal file
150
beacon-chain/rpc/endpoints_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
)
|
||||
|
||||
func Test_endpoints(t *testing.T) {
|
||||
rewardsRoutes := map[string][]string{
|
||||
"/eth/v1/beacon/rewards/blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/rewards/attestations/{epoch}": {http.MethodPost},
|
||||
"/eth/v1/beacon/rewards/sync_committee/{block_id}": {http.MethodPost},
|
||||
}
|
||||
|
||||
beaconRoutes := map[string][]string{
|
||||
"/eth/v1/beacon/genesis": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/root": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/fork": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/finality_checkpoints": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/validators": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/states/{state_id}/validators/{validator_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/validator_balances": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/states/{state_id}/committees": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/sync_committees": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/randao": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blinded_blocks": {http.MethodPost},
|
||||
"/eth/v2/beacon/blinded_blocks": {http.MethodPost},
|
||||
"/eth/v1/beacon/blocks": {http.MethodPost},
|
||||
"/eth/v2/beacon/blocks": {http.MethodPost},
|
||||
"/eth/v1/beacon/blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v2/beacon/blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blocks/{block_id}/root": {http.MethodGet},
|
||||
"/eth/v1/beacon/blocks/{block_id}/attestations": {http.MethodGet},
|
||||
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/deposit_snapshot": {http.MethodGet},
|
||||
"/eth/v1/beacon/blinded_blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/pool/attestations": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/attester_slashings": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/proposer_slashings": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/sync_committees": {http.MethodPost},
|
||||
"/eth/v1/beacon/pool/voluntary_exits": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/bls_to_execution_changes": {http.MethodGet, http.MethodPost},
|
||||
}
|
||||
|
||||
lightClientRoutes := map[string][]string{
|
||||
"/eth/v1/beacon/light_client/bootstrap/{block_root}": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/updates": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/finality_update": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/optimistic_update": {http.MethodGet},
|
||||
}
|
||||
|
||||
builderRoutes := map[string][]string{
|
||||
"/eth/v1/builder/states/{state_id}/expected_withdrawals": {http.MethodGet},
|
||||
}
|
||||
|
||||
blobRoutes := map[string][]string{
|
||||
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
|
||||
}
|
||||
|
||||
configRoutes := map[string][]string{
|
||||
"/eth/v1/config/fork_schedule": {http.MethodGet},
|
||||
"/eth/v1/config/spec": {http.MethodGet},
|
||||
"/eth/v1/config/deposit_contract": {http.MethodGet},
|
||||
}
|
||||
|
||||
debugRoutes := map[string][]string{
|
||||
"/eth/v1/debug/beacon/states/{state_id}": {http.MethodGet},
|
||||
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
|
||||
"/eth/v2/debug/beacon/heads": {http.MethodGet},
|
||||
"/eth/v1/debug/fork_choice": {http.MethodGet},
|
||||
}
|
||||
|
||||
eventsRoutes := map[string][]string{
|
||||
"/eth/v1/events": {http.MethodGet},
|
||||
}
|
||||
|
||||
nodeRoutes := map[string][]string{
|
||||
"/eth/v1/node/identity": {http.MethodGet},
|
||||
"/eth/v1/node/peers": {http.MethodGet},
|
||||
"/eth/v1/node/peers/{peer_id}": {http.MethodGet},
|
||||
"/eth/v1/node/peer_count": {http.MethodGet},
|
||||
"/eth/v1/node/version": {http.MethodGet},
|
||||
"/eth/v1/node/syncing": {http.MethodGet},
|
||||
"/eth/v1/node/health": {http.MethodGet},
|
||||
}
|
||||
|
||||
validatorRoutes := map[string][]string{
|
||||
"/eth/v1/validator/duties/attester/{epoch}": {http.MethodPost},
|
||||
"/eth/v1/validator/duties/proposer/{epoch}": {http.MethodGet},
|
||||
"/eth/v1/validator/duties/sync/{epoch}": {http.MethodPost},
|
||||
"/eth/v2/validator/blocks/{slot}": {http.MethodGet},
|
||||
"/eth/v3/validator/blocks/{slot}": {http.MethodGet},
|
||||
"/eth/v1/validator/blinded_blocks/{slot}": {http.MethodGet},
|
||||
"/eth/v1/validator/attestation_data": {http.MethodGet},
|
||||
"/eth/v1/validator/aggregate_attestation": {http.MethodGet},
|
||||
"/eth/v1/validator/aggregate_and_proofs": {http.MethodPost},
|
||||
"/eth/v1/validator/beacon_committee_subscriptions": {http.MethodPost},
|
||||
"/eth/v1/validator/sync_committee_subscriptions": {http.MethodPost},
|
||||
"/eth/v1/validator/beacon_committee_selections": {http.MethodPost},
|
||||
"/eth/v1/validator/sync_committee_selections": {http.MethodPost},
|
||||
"/eth/v1/validator/sync_committee_contribution": {http.MethodGet},
|
||||
"/eth/v1/validator/contribution_and_proofs": {http.MethodPost},
|
||||
"/eth/v1/validator/prepare_beacon_proposer": {http.MethodPost},
|
||||
"/eth/v1/validator/register_validator": {http.MethodPost},
|
||||
"/eth/v1/validator/liveness/{epoch}": {http.MethodPost},
|
||||
}
|
||||
|
||||
prysmBeaconRoutes := map[string][]string{
|
||||
"/prysm/v1/beacon/weak_subjectivity": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
|
||||
"/prysm/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
|
||||
}
|
||||
|
||||
prysmNodeRoutes := map[string][]string{
|
||||
"/prysm/node/trusted_peers": {http.MethodGet, http.MethodPost},
|
||||
"/prysm/v1/node/trusted_peers": {http.MethodGet, http.MethodPost},
|
||||
"/prysm/node/trusted_peers/{peer_id}": {http.MethodDelete},
|
||||
"/prysm/v1/node/trusted_peers/{peer_id}": {http.MethodDelete},
|
||||
}
|
||||
|
||||
prysmValidatorRoutes := map[string][]string{
|
||||
"/prysm/validators/performance": {http.MethodPost},
|
||||
"/prysm/v1/validators/performance": {http.MethodPost},
|
||||
}
|
||||
|
||||
s := &Service{cfg: &Config{}}
|
||||
|
||||
routesMap := combineMaps(beaconRoutes, builderRoutes, configRoutes, debugRoutes, eventsRoutes, nodeRoutes, validatorRoutes, rewardsRoutes, lightClientRoutes, blobRoutes, prysmValidatorRoutes, prysmNodeRoutes, prysmBeaconRoutes)
|
||||
actual := s.endpoints(true, nil, nil, nil, nil, nil, nil)
|
||||
for _, e := range actual {
|
||||
methods, ok := routesMap[e.template]
|
||||
assert.Equal(t, true, ok, "endpoint "+e.template+" not found")
|
||||
if ok {
|
||||
for _, em := range e.methods {
|
||||
methodFound := false
|
||||
for _, m := range methods {
|
||||
if m == em {
|
||||
methodFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, true, methodFound, "method "+em+" for endpoint "+e.template+" not found")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -118,10 +118,10 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//mock:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -13,7 +13,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/mock/gomock"
|
||||
"go.uber.org/mock/gomock"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
|
||||
@@ -80,7 +80,7 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteError(w, handleWrapError(err, "could not get optimistic mode info", http.StatusInternalServerError))
|
||||
return
|
||||
}
|
||||
root, err := helpers.BlockRootAtSlot(st, st.Slot()-1)
|
||||
root, err := helpers.BlockRootAtSlot(st, slots.PrevSlot(st.Slot()))
|
||||
if err != nil {
|
||||
httputil.WriteError(w, handleWrapError(err, "could not get block root", http.StatusInternalServerError))
|
||||
return
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/httputil"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// BlockRewardsFetcher is a interface that provides access to reward related responses
|
||||
@@ -123,7 +124,7 @@ func (rs *BlockRewardService) GetStateForRewards(ctx context.Context, blk interf
|
||||
// We want to run several block processing functions that update the proposer's balance.
|
||||
// This will allow us to calculate proposer rewards for each operation (atts, slashings etc).
|
||||
// To do this, we replay the state up to the block's slot, but before processing the block.
|
||||
st, err := rs.Replayer.ReplayerForSlot(blk.Slot()-1).ReplayToSlot(ctx, blk.Slot())
|
||||
st, err := rs.Replayer.ReplayerForSlot(slots.PrevSlot(blk.Slot())).ReplayToSlot(ctx, blk.Slot())
|
||||
if err != nil {
|
||||
return nil, &httputil.DefaultJsonError{
|
||||
Message: "Could not get state: " + err.Error(),
|
||||
|
||||
@@ -90,9 +90,9 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1092,6 +1092,12 @@ func (s *Server) BeaconCommitteeSelections(w http.ResponseWriter, _ *http.Reques
|
||||
httputil.HandleError(w, "Endpoint not implemented", 501)
|
||||
}
|
||||
|
||||
// SyncCommitteeSelections responds with appropriate message and status code according the spec:
|
||||
// https://ethereum.github.io/beacon-APIs/#/Validator/submitSyncCommitteeSelections.
|
||||
func (s *Server) SyncCommitteeSelections(w http.ResponseWriter, _ *http.Request) {
|
||||
httputil.HandleError(w, "Endpoint not implemented", 501)
|
||||
}
|
||||
|
||||
// attestationDependentRoot is get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch - 1) - 1)
|
||||
// or the genesis block root in the case of underflow.
|
||||
func attestationDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
mock2 "github.com/prysmaticlabs/prysm/v5/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestProduceBlockV2(t *testing.T) {
|
||||
|
||||
@@ -238,7 +238,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
m, err := p.BlobStorage.Indices(bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block root": hexutil.Encode(root),
|
||||
"blockRoot": hexutil.Encode(root),
|
||||
}).Error(errors.Wrapf(err, "could not retrieve blob indices for root %#x", root))
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob indices for root %#x", root), Reason: core.Internal}
|
||||
}
|
||||
@@ -254,8 +254,8 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
vblob, err := p.BlobStorage.Get(bytesutil.ToBytes32(root), index)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block root": hexutil.Encode(root),
|
||||
"blob index": index,
|
||||
"blockRoot": hexutil.Encode(root),
|
||||
"blobIndex": index,
|
||||
}).Error(errors.Wrapf(err, "could not retrieve blob for block root %#x at index %d", root, index))
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index), Reason: core.Internal}
|
||||
}
|
||||
|
||||
@@ -162,7 +162,7 @@ common_deps = [
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
@@ -18,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestServer_StreamAltairBlocksVerified_ContextCanceled(t *testing.T) {
|
||||
|
||||
@@ -115,13 +115,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
return vs.constructGenericBeaconBlock(sBlk, bundleCache.get(req.Slot))
|
||||
}
|
||||
|
||||
func (vs *Server) handleFailedReorgAttempt(ctx context.Context, slot primitives.Slot, parentRoot, headRoot [32]byte) (state.BeaconState, error) {
|
||||
blockchain.LateBlockAttemptedReorgCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"parentRoot": fmt.Sprintf("%#x", parentRoot),
|
||||
"headRoot": fmt.Sprintf("%#x", headRoot),
|
||||
}).Warn("late block attempted reorg failed")
|
||||
func (vs *Server) handleSuccesfulReorgAttempt(ctx context.Context, slot primitives.Slot, parentRoot, headRoot [32]byte) (state.BeaconState, error) {
|
||||
// Try to get the state from the NSC
|
||||
head := transition.NextSlotState(parentRoot[:], slot)
|
||||
if head != nil {
|
||||
@@ -135,7 +129,16 @@ func (vs *Server) handleFailedReorgAttempt(ctx context.Context, slot primitives.
|
||||
return head, nil
|
||||
}
|
||||
|
||||
func (vs *Server) getHeadNoFailedReorg(ctx context.Context, slot primitives.Slot, parentRoot [32]byte) (state.BeaconState, error) {
|
||||
func logFailedReorgAttempt(slot primitives.Slot, oldHeadRoot, headRoot [32]byte) {
|
||||
blockchain.LateBlockAttemptedReorgCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"oldHeadRoot": fmt.Sprintf("%#x", oldHeadRoot),
|
||||
"headRoot": fmt.Sprintf("%#x", headRoot),
|
||||
}).Warn("late block attempted reorg failed")
|
||||
}
|
||||
|
||||
func (vs *Server) getHeadNoReorg(ctx context.Context, slot primitives.Slot, parentRoot [32]byte) (state.BeaconState, error) {
|
||||
// Try to get the state from the NSC
|
||||
head := transition.NextSlotState(parentRoot[:], slot)
|
||||
if head != nil {
|
||||
@@ -148,11 +151,14 @@ func (vs *Server) getHeadNoFailedReorg(ctx context.Context, slot primitives.Slot
|
||||
return head, nil
|
||||
}
|
||||
|
||||
func (vs *Server) getParentStateFromReorgData(ctx context.Context, slot primitives.Slot, parentRoot, headRoot [32]byte) (head state.BeaconState, err error) {
|
||||
func (vs *Server) getParentStateFromReorgData(ctx context.Context, slot primitives.Slot, oldHeadRoot, parentRoot, headRoot [32]byte) (head state.BeaconState, err error) {
|
||||
if parentRoot != headRoot {
|
||||
head, err = vs.handleFailedReorgAttempt(ctx, slot, parentRoot, headRoot)
|
||||
head, err = vs.handleSuccesfulReorgAttempt(ctx, slot, parentRoot, headRoot)
|
||||
} else {
|
||||
head, err = vs.getHeadNoFailedReorg(ctx, slot, parentRoot)
|
||||
if oldHeadRoot != headRoot {
|
||||
logFailedReorgAttempt(slot, oldHeadRoot, headRoot)
|
||||
}
|
||||
head, err = vs.getHeadNoReorg(ctx, slot, parentRoot)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -169,10 +175,11 @@ func (vs *Server) getParentStateFromReorgData(ctx context.Context, slot primitiv
|
||||
|
||||
func (vs *Server) getParentState(ctx context.Context, slot primitives.Slot) (state.BeaconState, [32]byte, error) {
|
||||
// process attestations and update head in forkchoice
|
||||
oldHeadRoot := vs.ForkchoiceFetcher.CachedHeadRoot()
|
||||
vs.ForkchoiceFetcher.UpdateHead(ctx, vs.TimeFetcher.CurrentSlot())
|
||||
headRoot := vs.ForkchoiceFetcher.CachedHeadRoot()
|
||||
parentRoot := vs.ForkchoiceFetcher.GetProposerHead()
|
||||
head, err := vs.getParentStateFromReorgData(ctx, slot, parentRoot, headRoot)
|
||||
head, err := vs.getParentStateFromReorgData(ctx, slot, oldHeadRoot, parentRoot, headRoot)
|
||||
return head, parentRoot, err
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
synccontribution "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation/sync_contribution"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -20,7 +21,7 @@ func (vs *Server) setSyncAggregate(ctx context.Context, blk interfaces.SignedBea
|
||||
return
|
||||
}
|
||||
|
||||
syncAggregate, err := vs.getSyncAggregate(ctx, blk.Block().Slot()-1, blk.Block().ParentRoot())
|
||||
syncAggregate, err := vs.getSyncAggregate(ctx, slots.PrevSlot(blk.Block().Slot()), blk.Block().ParentRoot())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get sync aggregate")
|
||||
emptySig := [96]byte{0xC0}
|
||||
|
||||
@@ -146,8 +146,8 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
|
||||
|
||||
if shouldRebuildTrie(canonicalEth1Data.DepositCount, uint64(len(upToEth1DataDeposits))) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"unfinalized deposits": len(upToEth1DataDeposits),
|
||||
"total deposit count": canonicalEth1Data.DepositCount,
|
||||
"unfinalizedDeposits": len(upToEth1DataDeposits),
|
||||
"totalDepositCount": canonicalEth1Data.DepositCount,
|
||||
}).Warn("Too many unfinalized deposits, building a deposit trie from scratch.")
|
||||
return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
|
||||
}
|
||||
|
||||
@@ -838,7 +838,7 @@ func TestProposer_ComputeStateRoot_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
|
||||
req.Block.Body.RandaoReveal = randaoReveal
|
||||
currentEpoch := coretime.CurrentEpoch(beaconState)
|
||||
req.Signature, err = signing.ComputeDomainAndSign(beaconState, currentEpoch, req.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
@@ -2874,8 +2874,8 @@ func TestProposer_GetParentHeadState(t *testing.T) {
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
}
|
||||
t.Run("failed reorg", func(tt *testing.T) {
|
||||
head, err := proposerServer.getParentStateFromReorgData(ctx, 1, parentRoot, headRoot)
|
||||
t.Run("successful reorg", func(tt *testing.T) {
|
||||
head, err := proposerServer.getParentStateFromReorgData(ctx, 1, parentRoot, parentRoot, headRoot)
|
||||
require.NoError(t, err)
|
||||
st := parentState.Copy()
|
||||
st, err = transition.ProcessSlots(ctx, st, st.Slot()+1)
|
||||
@@ -2892,7 +2892,7 @@ func TestProposer_GetParentHeadState(t *testing.T) {
|
||||
|
||||
t.Run("no reorg", func(tt *testing.T) {
|
||||
require.NoError(t, transition.UpdateNextSlotCache(ctx, headRoot[:], headState))
|
||||
head, err := proposerServer.getParentStateFromReorgData(ctx, 1, headRoot, headRoot)
|
||||
head, err := proposerServer.getParentStateFromReorgData(ctx, 1, headRoot, headRoot, headRoot)
|
||||
require.NoError(t, err)
|
||||
st := headState.Copy()
|
||||
st, err = transition.ProcessSlots(ctx, st, st.Slot()+1)
|
||||
@@ -2906,4 +2906,23 @@ func TestProposer_GetParentHeadState(t *testing.T) {
|
||||
require.Equal(t, [32]byte(str), [32]byte(headStr))
|
||||
require.NotEqual(t, [32]byte(str), [32]byte(genesisStr))
|
||||
})
|
||||
|
||||
t.Run("failed reorg", func(tt *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
require.NoError(t, transition.UpdateNextSlotCache(ctx, headRoot[:], headState))
|
||||
head, err := proposerServer.getParentStateFromReorgData(ctx, 1, parentRoot, headRoot, headRoot)
|
||||
require.NoError(t, err)
|
||||
st := headState.Copy()
|
||||
st, err = transition.ProcessSlots(ctx, st, st.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
str, err := st.StateRootAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
headStr, err := head.StateRootAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
genesisStr, err := parentState.StateRootAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte(str), [32]byte(headStr))
|
||||
require.NotEqual(t, [32]byte(str), [32]byte(genesisStr))
|
||||
require.LogsContain(t, hook, "late block attempted reorg failed")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@ func (vs *Server) WaitForChainStart(_ *emptypb.Empty, stream ethpb.BeaconNodeVal
|
||||
if err != nil {
|
||||
return status.Error(codes.Canceled, "Context canceled")
|
||||
}
|
||||
log.WithField("starttime", clock.GenesisTime()).Debug("Received chain started event")
|
||||
log.WithField("startTime", clock.GenesisTime()).Debug("Received chain started event")
|
||||
log.Debug("Sending genesis time notification to connected validator clients")
|
||||
gvr := clock.GenesisValidatorsRoot()
|
||||
res := ðpb.ChainStartResponse{
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
@@ -24,6 +23,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"go.uber.org/mock/gomock"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
|
||||
@@ -196,7 +196,7 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
|
||||
|
||||
if (headCurrentParticipation[valIndex] != 0) || (headPreviousParticipation[valIndex] != 0) ||
|
||||
(prevCurrentParticipation[valIndex] != 0) || (prevPreviousParticipation[valIndex] != 0) {
|
||||
log.WithField("ValidatorIndex", valIndex).Infof("Participation flag found")
|
||||
log.WithField("validatorIndex", valIndex).Infof("Participation flag found")
|
||||
resp.Responses = append(resp.Responses,
|
||||
ðpb.DoppelGangerResponse_ValidatorResponse{
|
||||
PublicKey: v.PublicKey,
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -15,6 +14,9 @@ import (
|
||||
grpcopentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
@@ -32,24 +34,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/blob"
|
||||
rpcBuilder "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/config"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/debug"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/events"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/node"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/rewards"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/lookup"
|
||||
beaconprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/beacon"
|
||||
nodeprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/node"
|
||||
beaconv1alpha1 "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/beacon"
|
||||
debugv1alpha1 "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/debug"
|
||||
nodev1alpha1 "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/node"
|
||||
validatorv1alpha1 "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
validatorprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
chainSync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
@@ -68,6 +58,24 @@ import (
|
||||
|
||||
const attestationBufferSize = 100
|
||||
|
||||
var (
|
||||
httpRequestLatency = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "http_request_latency_seconds",
|
||||
Help: "Latency of HTTP requests in seconds",
|
||||
Buckets: []float64{0.001, 0.01, 0.025, 0.1, 0.25, 1, 2.5, 10},
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
httpRequestCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_request_count",
|
||||
Help: "Number of HTTP requests",
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
)
|
||||
|
||||
// Service defining an RPC server for a beacon node.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
@@ -79,6 +87,7 @@ type Service struct {
|
||||
credentialError error
|
||||
connectedRPCClients map[net.Addr]bool
|
||||
clientConnectionLock sync.Mutex
|
||||
validatorServer *validatorv1alpha1.Server
|
||||
}
|
||||
|
||||
// Config options for the beacon node RPC server.
|
||||
@@ -187,140 +196,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
}
|
||||
s.grpcServer = grpc.NewServer(opts...)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// paranoid build time check to ensure ChainInfoFetcher implements required interfaces
|
||||
var _ stategen.CanonicalChecker = blockchain.ChainInfoFetcher(nil)
|
||||
var _ stategen.CurrentSlotter = blockchain.ChainInfoFetcher(nil)
|
||||
|
||||
func (s *Service) initializeRewardServerRoutes(rewardsServer *rewards.Server) {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/blocks/{block_id}", rewardsServer.BlockRewards).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/attestations/{epoch}", rewardsServer.AttestationRewards).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/sync_committee/{block_id}", rewardsServer.SyncCommitteeRewards).Methods(http.MethodPost)
|
||||
}
|
||||
|
||||
func (s *Service) initializeBuilderServerRoutes(builderServer *rpcBuilder.Server) {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/builder/states/{state_id}/expected_withdrawals", builderServer.ExpectedWithdrawals).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
func (s *Service) initializeBlobServerRoutes(blobServer *blob.Server) {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/blob_sidecars/{block_id}", blobServer.Blobs).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
func (s *Service) initializeValidatorServerRoutes(validatorServer *validator.Server) {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/aggregate_attestation", validatorServer.GetAggregateAttestation).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/contribution_and_proofs", validatorServer.SubmitContributionAndProofs).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/aggregate_and_proofs", validatorServer.SubmitAggregateAndProofs).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/sync_committee_contribution", validatorServer.ProduceSyncCommitteeContribution).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/sync_committee_subscriptions", validatorServer.SubmitSyncCommitteeSubscription).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/beacon_committee_subscriptions", validatorServer.SubmitBeaconCommitteeSubscription).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/attestation_data", validatorServer.GetAttestationData).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/register_validator", validatorServer.RegisterValidator).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/duties/attester/{epoch}", validatorServer.GetAttesterDuties).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/duties/proposer/{epoch}", validatorServer.GetProposerDuties).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/duties/sync/{epoch}", validatorServer.GetSyncCommitteeDuties).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/prepare_beacon_proposer", validatorServer.PrepareBeaconProposer).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/liveness/{epoch}", validatorServer.GetLiveness).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/validator/blocks/{slot}", validatorServer.ProduceBlockV2).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/blinded_blocks/{slot}", validatorServer.ProduceBlindedBlock).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v3/validator/blocks/{slot}", validatorServer.ProduceBlockV3).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/beacon_committee_selections", validatorServer.BeaconCommitteeSelections).Methods(http.MethodPost)
|
||||
}
|
||||
|
||||
func (s *Service) initializeNodeServerRoutes(nodeServer *node.Server) {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/node/syncing", nodeServer.GetSyncStatus).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/node/identity", nodeServer.GetIdentity).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/node/peers/{peer_id}", nodeServer.GetPeer).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/node/peers", nodeServer.GetPeers).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/node/peer_count", nodeServer.GetPeerCount).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/node/version", nodeServer.GetVersion).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/node/health", nodeServer.GetHealth).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
func (s *Service) initializeBeaconServerRoutes(beaconServer *beacon.Server) {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/committees", beaconServer.GetCommittees).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/fork", beaconServer.GetStateFork).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/root", beaconServer.GetStateRoot).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/sync_committees", beaconServer.GetSyncCommittees).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/randao", beaconServer.GetRandao).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/blocks", beaconServer.PublishBlock).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/blinded_blocks", beaconServer.PublishBlindedBlock).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconServer.PublishBlockV2).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconServer.PublishBlindedBlockV2).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks/{block_id}", beaconServer.GetBlockV2).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/blocks/{block_id}/attestations", beaconServer.GetBlockAttestations).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/blinded_blocks/{block_id}", beaconServer.GetBlindedBlock).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/blocks/{block_id}/root", beaconServer.GetBlockRoot).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/deposit_snapshot", beaconServer.GetDepositSnapshot).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/attestations", beaconServer.ListAttestations).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/attestations", beaconServer.SubmitAttestations).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/voluntary_exits", beaconServer.ListVoluntaryExits).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/voluntary_exits", beaconServer.SubmitVoluntaryExit).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/sync_committees", beaconServer.SubmitSyncCommitteeSignatures).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/bls_to_execution_changes", beaconServer.ListBLSToExecutionChanges).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/bls_to_execution_changes", beaconServer.SubmitBLSToExecutionChanges).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/attester_slashings", beaconServer.GetAttesterSlashings).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/attester_slashings", beaconServer.SubmitAttesterSlashing).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/proposer_slashings", beaconServer.GetProposerSlashings).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/proposer_slashings", beaconServer.SubmitProposerSlashing).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/headers", beaconServer.GetBlockHeaders).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/headers/{block_id}", beaconServer.GetBlockHeader).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/genesis", beaconServer.GetGenesis).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/finality_checkpoints", beaconServer.GetFinalityCheckpoints).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validators", beaconServer.GetValidators).Methods(http.MethodGet, http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validators/{validator_id}", beaconServer.GetValidator).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validator_balances", beaconServer.GetValidatorBalances).Methods(http.MethodGet, http.MethodPost)
|
||||
}
|
||||
|
||||
func (s *Service) initializeConfigRoutes() {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/config/deposit_contract", config.GetDepositContract).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/config/fork_schedule", config.GetForkSchedule).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/config/spec", config.GetSpec).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
func (s *Service) initializeEventsServerRoutes(eventsServer *events.Server) {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/events", eventsServer.StreamEvents).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
func (s *Service) initializeLightClientServerRoutes(lightClientServer *lightclient.Server) {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/bootstrap/{block_root}", lightClientServer.GetLightClientBootstrap).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/updates", lightClientServer.GetLightClientUpdatesByRange).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/finality_update", lightClientServer.GetLightClientFinalityUpdate).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/optimistic_update", lightClientServer.GetLightClientOptimisticUpdate).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
func (s *Service) initializeDebugServerRoutes(debugServer *debug.Server) {
|
||||
s.cfg.Router.HandleFunc("/eth/v2/debug/beacon/states/{state_id}", debugServer.GetBeaconStateV2).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/debug/beacon/heads", debugServer.GetForkChoiceHeadsV2).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/debug/fork_choice", debugServer.GetForkChoice).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
// prysm internal routes
|
||||
func (s *Service) initializePrysmBeaconServerRoutes(beaconServerPrysm *beaconprysm.Server) {
|
||||
s.cfg.Router.HandleFunc("/prysm/v1/beacon/weak_subjectivity", beaconServerPrysm.GetWeakSubjectivity).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validator_count", beaconServerPrysm.GetValidatorCount).Methods(http.MethodGet) // TODO: deprecate in Swagger, remove in v6
|
||||
s.cfg.Router.HandleFunc("/prysm/v1/beacon/states/{state_id}/validator_count", beaconServerPrysm.GetValidatorCount).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
func (s *Service) initializePrysmNodeServerRoutes(nodeServerPrysm *nodeprysm.Server) {
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.ListTrustedPeer).Methods(http.MethodGet) // TODO: deprecate in Swagger, remove in v6
|
||||
s.cfg.Router.HandleFunc("/prysm/v1/node/trusted_peers", nodeServerPrysm.ListTrustedPeer).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.AddTrustedPeer).Methods(http.MethodPost) // TODO: deprecate in Swagger, remove in v6
|
||||
s.cfg.Router.HandleFunc("/prysm/v1/node/trusted_peers", nodeServerPrysm.AddTrustedPeer).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers/{peer_id}", nodeServerPrysm.RemoveTrustedPeer).Methods(http.MethodDelete) // TODO: deprecate in Swagger, remove in v6
|
||||
s.cfg.Router.HandleFunc("/prysm/v1/node/trusted_peers/{peer_id}", nodeServerPrysm.RemoveTrustedPeer).Methods(http.MethodDelete)
|
||||
}
|
||||
|
||||
func (s *Service) initializePrysmValidatorServerRoutes(validatorServerPrysm *validatorprysm.Server) {
|
||||
s.cfg.Router.HandleFunc("/prysm/validator/performance", validatorServerPrysm.GetValidatorPerformance).Methods(http.MethodPost) // TODO: deprecate in Swagger, remove in v6
|
||||
s.cfg.Router.HandleFunc("/prysm/v1/validator/performance", validatorServerPrysm.GetValidatorPerformance).Methods(http.MethodPost)
|
||||
}
|
||||
|
||||
// Start the gRPC server.
|
||||
func (s *Service) Start() {
|
||||
grpcprometheus.EnableHandlingTimeHistogram()
|
||||
|
||||
var stateCache stategen.CachedGetter
|
||||
if s.cfg.StateGen != nil {
|
||||
stateCache = s.cfg.StateGen.CombinedCache()
|
||||
@@ -341,25 +216,6 @@ func (s *Service) Start() {
|
||||
BlobStorage: s.cfg.BlobStorage,
|
||||
}
|
||||
rewardFetcher := &rewards.BlockRewardService{Replayer: ch}
|
||||
|
||||
s.initializeRewardServerRoutes(&rewards.Server{
|
||||
Blocker: blocker,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
Stater: stater,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
BlockRewardFetcher: rewardFetcher,
|
||||
})
|
||||
s.initializeBuilderServerRoutes(&rpcBuilder.Server{
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
Stater: stater,
|
||||
})
|
||||
s.initializeBlobServerRoutes(&blob.Server{
|
||||
Blocker: blocker,
|
||||
})
|
||||
|
||||
coreService := &core.Service{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
@@ -373,7 +229,6 @@ func (s *Service) Start() {
|
||||
FinalizedFetcher: s.cfg.FinalizationFetcher,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
}
|
||||
|
||||
validatorServer := &validatorv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
AttPool: s.cfg.AttestationsPool,
|
||||
@@ -412,27 +267,7 @@ func (s *Service) Start() {
|
||||
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
|
||||
PayloadIDCache: s.cfg.PayloadIDCache,
|
||||
}
|
||||
s.initializeValidatorServerRoutes(&validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
V1Alpha1Server: validatorServer,
|
||||
Stater: stater,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
BlockBuilder: s.cfg.BlockBuilder,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
|
||||
PayloadIDCache: s.cfg.PayloadIDCache,
|
||||
CoreService: coreService,
|
||||
BlockRewardFetcher: rewardFetcher,
|
||||
})
|
||||
|
||||
s.validatorServer = validatorServer
|
||||
nodeServer := &nodev1alpha1.Server{
|
||||
LogsStreamer: logs.NewStreamServer(),
|
||||
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
||||
@@ -447,19 +282,6 @@ func (s *Service) Start() {
|
||||
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||
}
|
||||
s.initializeNodeServerRoutes(&node.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
MetadataProvider: s.cfg.MetadataProvider,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
})
|
||||
|
||||
beaconChainServer := &beaconv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
@@ -484,47 +306,20 @@ func (s *Service) Start() {
|
||||
ReplayerBuilder: ch,
|
||||
CoreService: coreService,
|
||||
}
|
||||
s.initializeBeaconServerRoutes(&beacon.Server{
|
||||
CanonicalHistory: ch,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlockNotifier: s.cfg.BlockNotifier,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
StateGenService: s.cfg.StateGen,
|
||||
Stater: stater,
|
||||
Blocker: blocker,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
VoluntaryExitsPool: s.cfg.ExitPool,
|
||||
V1Alpha1ValidatorServer: validatorServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
ExecutionPayloadReconstructor: s.cfg.ExecutionPayloadReconstructor,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
CoreService: coreService,
|
||||
})
|
||||
|
||||
s.initializeConfigRoutes()
|
||||
|
||||
s.initializeEventsServerRoutes(&events.Server{
|
||||
StateNotifier: s.cfg.StateNotifier,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
})
|
||||
|
||||
s.initializeLightClientServerRoutes(&lightclient.Server{
|
||||
Blocker: blocker,
|
||||
Stater: stater,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
})
|
||||
endpoints := s.endpoints(s.cfg.EnableDebugRPCEndpoints, blocker, stater, rewardFetcher, validatorServer, coreService, ch)
|
||||
for _, e := range endpoints {
|
||||
s.cfg.Router.HandleFunc(
|
||||
e.template,
|
||||
promhttp.InstrumentHandlerDuration(
|
||||
httpRequestLatency.MustCurryWith(prometheus.Labels{"endpoint": e.name}),
|
||||
promhttp.InstrumentHandlerCounter(
|
||||
httpRequestCount.MustCurryWith(prometheus.Labels{"endpoint": e.name}),
|
||||
e.handler,
|
||||
),
|
||||
),
|
||||
).Methods(e.methods...)
|
||||
}
|
||||
|
||||
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
|
||||
@@ -540,50 +335,23 @@ func (s *Service) Start() {
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
ReplayerBuilder: ch,
|
||||
}
|
||||
s.initializeDebugServerRoutes(&debug.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
Stater: stater,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
ForkFetcher: s.cfg.ForkFetcher,
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
})
|
||||
ethpbv1alpha1.RegisterDebugServer(s.grpcServer, debugServer)
|
||||
}
|
||||
ethpbv1alpha1.RegisterBeaconNodeValidatorServer(s.grpcServer, validatorServer)
|
||||
// Register reflection service on gRPC server.
|
||||
reflection.Register(s.grpcServer)
|
||||
|
||||
validatorServer.PruneBlobsBundleCacheRoutine()
|
||||
return s
|
||||
}
|
||||
|
||||
s.initializePrysmBeaconServerRoutes(&beaconprysm.Server{
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
CanonicalHistory: ch,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Stater: stater,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
})
|
||||
|
||||
s.initializePrysmNodeServerRoutes(&nodeprysm.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
MetadataProvider: s.cfg.MetadataProvider,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
})
|
||||
|
||||
s.initializePrysmValidatorServerRoutes(&validatorprysm.Server{CoreService: coreService})
|
||||
// paranoid build time check to ensure ChainInfoFetcher implements required interfaces
|
||||
var _ stategen.CanonicalChecker = blockchain.ChainInfoFetcher(nil)
|
||||
var _ stategen.CurrentSlotter = blockchain.ChainInfoFetcher(nil)
|
||||
|
||||
// Start the gRPC server.
|
||||
func (s *Service) Start() {
|
||||
grpcprometheus.EnableHandlingTimeHistogram()
|
||||
s.validatorServer.PruneBlobsBundleCacheRoutine()
|
||||
go func() {
|
||||
if s.listener != nil {
|
||||
if err := s.grpcServer.Serve(s.listener); err != nil {
|
||||
|
||||
@@ -4,25 +4,12 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/blob"
|
||||
rpcBuilder "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/debug"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/events"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/node"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/rewards"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/validator"
|
||||
beaconprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/beacon"
|
||||
nodeprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/node"
|
||||
validatorprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
@@ -48,148 +35,6 @@ func combineMaps(maps ...map[string][]string) map[string][]string {
|
||||
return combinedMap
|
||||
}
|
||||
|
||||
func TestServer_InitializeRoutes(t *testing.T) {
|
||||
s := Service{
|
||||
cfg: &Config{
|
||||
Router: mux.NewRouter(),
|
||||
},
|
||||
}
|
||||
s.initializeRewardServerRoutes(&rewards.Server{})
|
||||
s.initializeBuilderServerRoutes(&rpcBuilder.Server{})
|
||||
s.initializeBlobServerRoutes(&blob.Server{})
|
||||
s.initializeValidatorServerRoutes(&validator.Server{})
|
||||
s.initializeNodeServerRoutes(&node.Server{})
|
||||
s.initializeBeaconServerRoutes(&beacon.Server{})
|
||||
s.initializeConfigRoutes()
|
||||
s.initializeEventsServerRoutes(&events.Server{})
|
||||
s.initializeLightClientServerRoutes(&lightclient.Server{})
|
||||
s.initializeDebugServerRoutes(&debug.Server{})
|
||||
|
||||
//prysm internal
|
||||
s.initializePrysmBeaconServerRoutes(&beaconprysm.Server{})
|
||||
s.initializePrysmNodeServerRoutes(&nodeprysm.Server{})
|
||||
s.initializePrysmValidatorServerRoutes(&validatorprysm.Server{})
|
||||
|
||||
beaconRoutes := map[string][]string{
|
||||
"/eth/v1/beacon/genesis": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/root": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/fork": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/finality_checkpoints": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/validators": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/states/{state_id}/validators/{validator_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/validator_balances": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/states/{state_id}/committees": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/sync_committees": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/randao": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blinded_blocks": {http.MethodPost},
|
||||
"/eth/v2/beacon/blinded_blocks": {http.MethodPost},
|
||||
"/eth/v1/beacon/blocks": {http.MethodPost},
|
||||
"/eth/v2/beacon/blocks": {http.MethodPost},
|
||||
"/eth/v2/beacon/blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blocks/{block_id}/root": {http.MethodGet},
|
||||
"/eth/v1/beacon/blocks/{block_id}/attestations": {http.MethodGet},
|
||||
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/rewards/sync_committee/{block_id}": {http.MethodPost},
|
||||
"/eth/v1/beacon/deposit_snapshot": {http.MethodGet},
|
||||
"/eth/v1/beacon/rewards/blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/rewards/attestations/{epoch}": {http.MethodPost},
|
||||
"/eth/v1/beacon/blinded_blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/bootstrap/{block_root}": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/updates": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/finality_update": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/optimistic_update": {http.MethodGet},
|
||||
"/eth/v1/beacon/pool/attestations": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/attester_slashings": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/proposer_slashings": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/sync_committees": {http.MethodPost},
|
||||
"/eth/v1/beacon/pool/voluntary_exits": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/bls_to_execution_changes": {http.MethodGet, http.MethodPost},
|
||||
}
|
||||
|
||||
builderRoutes := map[string][]string{
|
||||
"/eth/v1/builder/states/{state_id}/expected_withdrawals": {http.MethodGet},
|
||||
}
|
||||
|
||||
configRoutes := map[string][]string{
|
||||
"/eth/v1/config/fork_schedule": {http.MethodGet},
|
||||
"/eth/v1/config/spec": {http.MethodGet},
|
||||
"/eth/v1/config/deposit_contract": {http.MethodGet},
|
||||
}
|
||||
|
||||
debugRoutes := map[string][]string{
|
||||
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
|
||||
"/eth/v2/debug/beacon/heads": {http.MethodGet},
|
||||
"/eth/v1/debug/fork_choice": {http.MethodGet},
|
||||
}
|
||||
|
||||
eventsRoutes := map[string][]string{
|
||||
"/eth/v1/events": {http.MethodGet},
|
||||
}
|
||||
|
||||
nodeRoutes := map[string][]string{
|
||||
"/eth/v1/node/identity": {http.MethodGet},
|
||||
"/eth/v1/node/peers": {http.MethodGet},
|
||||
"/eth/v1/node/peers/{peer_id}": {http.MethodGet},
|
||||
"/eth/v1/node/peer_count": {http.MethodGet},
|
||||
"/eth/v1/node/version": {http.MethodGet},
|
||||
"/eth/v1/node/syncing": {http.MethodGet},
|
||||
"/eth/v1/node/health": {http.MethodGet},
|
||||
}
|
||||
|
||||
validatorRoutes := map[string][]string{
|
||||
"/eth/v1/validator/duties/attester/{epoch}": {http.MethodPost},
|
||||
"/eth/v1/validator/duties/proposer/{epoch}": {http.MethodGet},
|
||||
"/eth/v1/validator/duties/sync/{epoch}": {http.MethodPost},
|
||||
"/eth/v2/validator/blocks/{slot}": {http.MethodGet}, //deprecated
|
||||
"/eth/v3/validator/blocks/{slot}": {http.MethodGet},
|
||||
"/eth/v1/validator/blinded_blocks/{slot}": {http.MethodGet}, //deprecated
|
||||
"/eth/v1/validator/attestation_data": {http.MethodGet},
|
||||
"/eth/v1/validator/aggregate_attestation": {http.MethodGet},
|
||||
"/eth/v1/validator/aggregate_and_proofs": {http.MethodPost},
|
||||
"/eth/v1/validator/beacon_committee_subscriptions": {http.MethodPost},
|
||||
"/eth/v1/validator/sync_committee_subscriptions": {http.MethodPost},
|
||||
"/eth/v1/validator/beacon_committee_selections": {http.MethodPost},
|
||||
"/eth/v1/validator/sync_committee_contribution": {http.MethodGet},
|
||||
//"/eth/v1/validator/sync_committee_selections": {http.MethodPost}, // not implemented
|
||||
"/eth/v1/validator/contribution_and_proofs": {http.MethodPost},
|
||||
"/eth/v1/validator/prepare_beacon_proposer": {http.MethodPost},
|
||||
"/eth/v1/validator/register_validator": {http.MethodPost},
|
||||
"/eth/v1/validator/liveness/{epoch}": {http.MethodPost},
|
||||
}
|
||||
|
||||
prysmCustomRoutes := map[string][]string{
|
||||
"/prysm/v1/beacon/weak_subjectivity": {http.MethodGet},
|
||||
"/prysm/node/trusted_peers": {http.MethodGet, http.MethodPost},
|
||||
"/prysm/v1/node/trusted_peers": {http.MethodGet, http.MethodPost},
|
||||
"/prysm/node/trusted_peers/{peer_id}": {http.MethodDelete},
|
||||
"/prysm/v1/node/trusted_peers/{peer_id}": {http.MethodDelete},
|
||||
"/prysm/validator/performance": {http.MethodPost},
|
||||
"/prysm/v1/validator/performance": {http.MethodPost},
|
||||
"/eth/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
|
||||
"/prysm/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
|
||||
}
|
||||
|
||||
wantRouteList := combineMaps(beaconRoutes, builderRoutes, configRoutes, debugRoutes, eventsRoutes, nodeRoutes, validatorRoutes, prysmCustomRoutes)
|
||||
gotRouteList := make(map[string][]string)
|
||||
err := s.cfg.Router.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
|
||||
tpl, err1 := route.GetPathTemplate()
|
||||
require.NoError(t, err1)
|
||||
met, err2 := route.GetMethods()
|
||||
require.NoError(t, err2)
|
||||
methods, ok := gotRouteList[tpl]
|
||||
if !ok {
|
||||
gotRouteList[tpl] = met
|
||||
} else {
|
||||
gotRouteList[tpl] = append(methods, met...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wantRouteList, gotRouteList)
|
||||
}
|
||||
|
||||
func TestLifecycle_OK(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
chainService := &mock.ChainService{
|
||||
|
||||
@@ -4,14 +4,12 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -20,8 +18,6 @@ import (
|
||||
func (s *Service) checkSlashableAttestations(
|
||||
ctx context.Context, currentEpoch primitives.Epoch, atts []*slashertypes.IndexedAttestationWrapper,
|
||||
) (map[[fieldparams.RootLength]byte]*ethpb.AttesterSlashing, error) {
|
||||
start := time.Now()
|
||||
|
||||
slashings := map[[fieldparams.RootLength]byte]*ethpb.AttesterSlashing{}
|
||||
|
||||
// Double votes
|
||||
@@ -30,8 +26,6 @@ func (s *Service) checkSlashableAttestations(
|
||||
return nil, errors.Wrap(err, "could not check slashable double votes")
|
||||
}
|
||||
|
||||
log.WithField("elapsed", time.Since(start)).Debug("Done checking double votes")
|
||||
|
||||
for root, slashing := range doubleVoteSlashings {
|
||||
slashings[root] = slashing
|
||||
}
|
||||
@@ -53,19 +47,6 @@ func (s *Service) checkSlashableAttestations(
|
||||
slashings[root] = slashing
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
|
||||
fields := logrus.Fields{
|
||||
"numAttestations": len(atts),
|
||||
"elapsed": elapsed,
|
||||
}
|
||||
|
||||
log.WithFields(fields).Info("Done checking slashable attestations")
|
||||
|
||||
if len(slashings) > 0 {
|
||||
log.WithField("numSlashings", len(slashings)).Warn("Slashable attestation offenses found")
|
||||
}
|
||||
|
||||
return slashings, nil
|
||||
}
|
||||
|
||||
@@ -75,10 +56,21 @@ func (s *Service) checkSurroundVotes(
|
||||
attWrappers []*slashertypes.IndexedAttestationWrapper,
|
||||
currentEpoch primitives.Epoch,
|
||||
) (map[[fieldparams.RootLength]byte]*ethpb.AttesterSlashing, error) {
|
||||
// With 256 validators and 16 epochs per chunk, there is 4096 `uint16` elements per chunk.
|
||||
// 4096 `uint16` elements = 8192 bytes = 8KB
|
||||
// 25_600 chunks * 8KB = 200MB
|
||||
const maxChunkBeforeFlush = 25_600
|
||||
|
||||
slashings := map[[fieldparams.RootLength]byte]*ethpb.AttesterSlashing{}
|
||||
|
||||
// Group attestation wrappers by validator chunk index.
|
||||
attWrappersByValidatorChunkIndex := s.groupByValidatorChunkIndex(attWrappers)
|
||||
attWrappersByValidatorChunkIndexCount := len(attWrappersByValidatorChunkIndex)
|
||||
|
||||
minChunkByChunkIndexByValidatorChunkIndex := make(map[uint64]map[uint64]Chunker, attWrappersByValidatorChunkIndexCount)
|
||||
maxChunkByChunkIndexByValidatorChunkIndex := make(map[uint64]map[uint64]Chunker, attWrappersByValidatorChunkIndexCount)
|
||||
|
||||
chunksCounts := 0
|
||||
|
||||
for validatorChunkIndex, attWrappers := range attWrappersByValidatorChunkIndex {
|
||||
minChunkByChunkIndex, err := s.updatedChunkByChunkIndex(ctx, slashertypes.MinSpan, currentEpoch, validatorChunkIndex)
|
||||
@@ -91,6 +83,8 @@ func (s *Service) checkSurroundVotes(
|
||||
return nil, errors.Wrap(err, "could not update updatedMaxChunks")
|
||||
}
|
||||
|
||||
chunksCounts += len(minChunkByChunkIndex) + len(maxChunkByChunkIndex)
|
||||
|
||||
// Group (already grouped by validator chunk index) attestation wrappers by chunk index.
|
||||
attWrappersByChunkIndex := s.groupByChunkIndex(attWrappers)
|
||||
|
||||
@@ -114,20 +108,42 @@ func (s *Service) checkSurroundVotes(
|
||||
slashings[root] = slashing
|
||||
}
|
||||
|
||||
// Save updated chunks into the database.
|
||||
if err := s.saveUpdatedChunks(ctx, minChunkByChunkIndex, slashertypes.MinSpan, validatorChunkIndex); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save chunks for min spans")
|
||||
// Memoize the updated chunks for the current validator chunk index.
|
||||
minChunkByChunkIndexByValidatorChunkIndex[validatorChunkIndex] = minChunkByChunkIndex
|
||||
maxChunkByChunkIndexByValidatorChunkIndex[validatorChunkIndex] = maxChunkByChunkIndex
|
||||
|
||||
if chunksCounts >= maxChunkBeforeFlush {
|
||||
// Save the updated chunks to disk if we have reached the maximum number of chunks to store in memory.
|
||||
if err := s.saveChunksToDisk(ctx, slashertypes.MinSpan, minChunkByChunkIndexByValidatorChunkIndex); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save updated min chunks to disk")
|
||||
}
|
||||
|
||||
if err := s.saveChunksToDisk(ctx, slashertypes.MaxSpan, maxChunkByChunkIndexByValidatorChunkIndex); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save updated max chunks to disk")
|
||||
}
|
||||
|
||||
// Reset the chunks counts.
|
||||
chunksCounts = 0
|
||||
|
||||
// Reset memoized chunks.
|
||||
minChunkByChunkIndexByValidatorChunkIndex = make(map[uint64]map[uint64]Chunker, attWrappersByValidatorChunkIndexCount)
|
||||
maxChunkByChunkIndexByValidatorChunkIndex = make(map[uint64]map[uint64]Chunker, attWrappersByValidatorChunkIndexCount)
|
||||
}
|
||||
|
||||
if err := s.saveUpdatedChunks(ctx, maxChunkByChunkIndex, slashertypes.MaxSpan, validatorChunkIndex); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save chunks for max spans")
|
||||
// Update the latest updated epoch for all validators involved to the current chunk.
|
||||
indexes := s.params.validatorIndexesInChunk(validatorChunkIndex)
|
||||
for _, index := range indexes {
|
||||
s.latestEpochUpdatedForValidator[index] = currentEpoch
|
||||
}
|
||||
}
|
||||
|
||||
// Update the latest written epoch for all validators involved to the current chunk.
|
||||
indices := s.params.validatorIndexesInChunk(validatorChunkIndex)
|
||||
for _, idx := range indices {
|
||||
s.latestEpochWrittenForValidator[idx] = currentEpoch
|
||||
}
|
||||
// Save the updated chunks to disk.
|
||||
if err := s.saveChunksToDisk(ctx, slashertypes.MinSpan, minChunkByChunkIndexByValidatorChunkIndex); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save updated min chunks to disk")
|
||||
}
|
||||
|
||||
if err := s.saveChunksToDisk(ctx, slashertypes.MaxSpan, maxChunkByChunkIndexByValidatorChunkIndex); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save updated max chunks to disk")
|
||||
}
|
||||
|
||||
return slashings, nil
|
||||
@@ -239,7 +255,7 @@ func (s *Service) checkDoubleVotes(
|
||||
// updatedChunkByChunkIndex loads the chunks from the database for validators corresponding to
|
||||
// the `validatorChunkIndex`.
|
||||
// It then updates the chunks with the neutral element for corresponding validators from
|
||||
// the epoch just after the latest epoch written to the current epoch.
|
||||
// the epoch just after the latest updated epoch to the current epoch.
|
||||
// A mapping between chunk index and chunk is returned to the caller.
|
||||
func (s *Service) updatedChunkByChunkIndex(
|
||||
ctx context.Context,
|
||||
@@ -247,56 +263,97 @@ func (s *Service) updatedChunkByChunkIndex(
|
||||
currentEpoch primitives.Epoch,
|
||||
validatorChunkIndex uint64,
|
||||
) (map[uint64]Chunker, error) {
|
||||
chunkByChunkIndex := map[uint64]Chunker{}
|
||||
// Every validator may have a first epoch to update.
|
||||
// For a given validator,
|
||||
// - If it has no latest updated epoch, then the first epoch to update is set to 0.
|
||||
// - If the latest updated epoch is the current epoch, then there is no epoch to update.
|
||||
// Thus, then there is no first epoch to update.
|
||||
// - In all other cases, the first epoch to update is the latest updated epoch + 1.
|
||||
|
||||
// minFirstEpochToUpdate is set to the smallest first epoch to update for all validators in the chunk
|
||||
// corresponding to the `validatorChunkIndex`.
|
||||
var minFirstEpochToUpdate *primitives.Epoch
|
||||
|
||||
neededChunkIndexesMap := map[uint64]bool{}
|
||||
|
||||
validatorIndexes := s.params.validatorIndexesInChunk(validatorChunkIndex)
|
||||
for _, validatorIndex := range validatorIndexes {
|
||||
// Retrieve the latest epoch written for the validator.
|
||||
latestEpochWritten, ok := s.latestEpochWrittenForValidator[validatorIndex]
|
||||
|
||||
// Start from the epoch just after the latest epoch written.
|
||||
epochToWrite, err := latestEpochWritten.SafeAdd(1)
|
||||
// Retrieve the first epoch to write for the validator index.
|
||||
isAnEpochToUpdate, firstEpochToUpdate, err := s.firstEpochToUpdate(validatorIndex, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not add 1 to latest epoch written")
|
||||
return nil, errors.Wrapf(err, "could not get first epoch to write for validator index %d with current epoch %d", validatorIndex, currentEpoch)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
epochToWrite = 0
|
||||
if !isAnEpochToUpdate {
|
||||
// If there is no epoch to write, skip.
|
||||
continue
|
||||
}
|
||||
|
||||
// It is useless to update more than `historyLength` epochs, since
|
||||
// the chunks are circular and we will be overwritten at least one.
|
||||
if currentEpoch-epochToWrite >= s.params.historyLength {
|
||||
epochToWrite = currentEpoch + 1 - s.params.historyLength
|
||||
// If, for this validator index, the chunk corresponding to the first epoch to write
|
||||
// (and all following epochs until the current epoch) are already flagged as needed,
|
||||
// skip.
|
||||
if minFirstEpochToUpdate != nil && *minFirstEpochToUpdate <= firstEpochToUpdate {
|
||||
continue
|
||||
}
|
||||
|
||||
for epochToWrite <= currentEpoch {
|
||||
// Get the chunk index for the latest epoch written.
|
||||
chunkIndex := s.params.chunkIndex(epochToWrite)
|
||||
minFirstEpochToUpdate = &firstEpochToUpdate
|
||||
|
||||
// Add new needed chunk indexes to the map.
|
||||
for i := firstEpochToUpdate; i <= currentEpoch; i++ {
|
||||
chunkIndex := s.params.chunkIndex(i)
|
||||
neededChunkIndexesMap[chunkIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Get the list of needed chunk indexes.
|
||||
neededChunkIndexes := make([]uint64, 0, len(neededChunkIndexesMap))
|
||||
for chunkIndex := range neededChunkIndexesMap {
|
||||
neededChunkIndexes = append(neededChunkIndexes, chunkIndex)
|
||||
}
|
||||
|
||||
// Retrieve needed chunks from the database.
|
||||
chunkByChunkIndex, err := s.loadChunksFromDisk(ctx, validatorChunkIndex, chunkKind, neededChunkIndexes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not load chunks from disk")
|
||||
}
|
||||
|
||||
for _, validatorIndex := range validatorIndexes {
|
||||
// Retrieve the first epoch to write for the validator index.
|
||||
isAnEpochToUpdate, firstEpochToUpdate, err := s.firstEpochToUpdate(validatorIndex, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get first epoch to write for validator index %d with current epoch %d", validatorIndex, currentEpoch)
|
||||
}
|
||||
|
||||
if !isAnEpochToUpdate {
|
||||
// If there is no epoch to write, skip.
|
||||
continue
|
||||
}
|
||||
|
||||
epochToUpdate := firstEpochToUpdate
|
||||
|
||||
for epochToUpdate <= currentEpoch {
|
||||
// Get the chunk index for the ecpoh to write.
|
||||
chunkIndex := s.params.chunkIndex(epochToUpdate)
|
||||
|
||||
// Get the chunk corresponding to the chunk index from the `chunkByChunkIndex` map.
|
||||
currentChunk, ok := chunkByChunkIndex[chunkIndex]
|
||||
if !ok {
|
||||
// If the chunk is not in the map, retrieve it from the database.
|
||||
currentChunk, err = s.getChunkFromDatabase(ctx, chunkKind, validatorChunkIndex, chunkIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get chunk")
|
||||
}
|
||||
return nil, errors.Errorf("chunk at index %d does not exist", chunkIndex)
|
||||
}
|
||||
|
||||
// Update the current chunk with the neutral element for the validator index for the latest epoch written.
|
||||
for s.params.chunkIndex(epochToWrite) == chunkIndex && epochToWrite <= currentEpoch {
|
||||
// Update the current chunk with the neutral element for the validator index for the epoch to write.
|
||||
for s.params.chunkIndex(epochToUpdate) == chunkIndex && epochToUpdate <= currentEpoch {
|
||||
if err := setChunkRawDistance(
|
||||
s.params,
|
||||
currentChunk.Chunk(),
|
||||
validatorIndex,
|
||||
epochToWrite,
|
||||
epochToUpdate,
|
||||
currentChunk.NeutralElement(),
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
epochToWrite++
|
||||
epochToUpdate++
|
||||
}
|
||||
|
||||
chunkByChunkIndex[chunkIndex] = currentChunk
|
||||
@@ -306,6 +363,40 @@ func (s *Service) updatedChunkByChunkIndex(
|
||||
return chunkByChunkIndex, nil
|
||||
}
|
||||
|
||||
// firstEpochToUpdate, given a validator index and the current epoch, returns a boolean indicating
|
||||
// if there is an epoch to write. If it is the case, it returns the first epoch to write.
|
||||
func (s *Service) firstEpochToUpdate(validatorIndex primitives.ValidatorIndex, currentEpoch primitives.Epoch) (bool, primitives.Epoch, error) {
|
||||
latestEpochUpdated, ok := s.latestEpochUpdatedForValidator[validatorIndex]
|
||||
|
||||
// Start from the epoch just after the latest updated epoch.
|
||||
epochToUpdate, err := latestEpochUpdated.SafeAdd(1)
|
||||
if err != nil {
|
||||
return false, primitives.Epoch(0), errors.Wrap(err, "could not add 1 to latest updated epoch")
|
||||
}
|
||||
|
||||
if !ok {
|
||||
epochToUpdate = 0
|
||||
}
|
||||
|
||||
if latestEpochUpdated == currentEpoch {
|
||||
// If the latest updated epoch is the current epoch, we do not need to update anything.
|
||||
return false, primitives.Epoch(0), nil
|
||||
}
|
||||
|
||||
// Latest updated epoch should not be greater than the current epoch.
|
||||
if latestEpochUpdated > currentEpoch {
|
||||
return false, primitives.Epoch(0), errors.Errorf("epoch to write `%d` should not be greater than the current epoch `%d`", epochToUpdate, currentEpoch)
|
||||
}
|
||||
|
||||
// It is useless to update more than `historyLength` epochs, since
|
||||
// the chunks are circular and we will be overwritten at least one.
|
||||
if currentEpoch-epochToUpdate >= s.params.historyLength {
|
||||
epochToUpdate = currentEpoch + 1 - s.params.historyLength
|
||||
}
|
||||
|
||||
return true, epochToUpdate, nil
|
||||
}
|
||||
|
||||
// Updates spans and detects any slashable attester offenses along the way.
|
||||
// 1. Determine the chunks we need to use for updating for the validator indices
|
||||
// in a validator chunk index, then retrieve those chunks from the database.
|
||||
@@ -487,7 +578,7 @@ func (s *Service) getChunkFromDatabase(
|
||||
chunkIndex uint64,
|
||||
) (Chunker, error) {
|
||||
// We can ensure we load the appropriate chunk we need by fetching from the DB.
|
||||
diskChunks, err := s.loadChunks(ctx, validatorChunkIndex, chunkKind, []uint64{chunkIndex})
|
||||
diskChunks, err := s.loadChunksFromDisk(ctx, validatorChunkIndex, chunkKind, []uint64{chunkIndex})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not load chunk at index %d", chunkIndex)
|
||||
}
|
||||
@@ -502,7 +593,7 @@ func (s *Service) getChunkFromDatabase(
|
||||
// Load chunks for a specified list of chunk indices. We attempt to load it from the database.
|
||||
// If the data exists, then we initialize a chunk of a specified kind. Otherwise, we create
|
||||
// an empty chunk, add it to our map, and then return it to the caller.
|
||||
func (s *Service) loadChunks(
|
||||
func (s *Service) loadChunksFromDisk(
|
||||
ctx context.Context,
|
||||
validatorChunkIndex uint64,
|
||||
chunkKind slashertypes.ChunkKind,
|
||||
@@ -511,17 +602,36 @@ func (s *Service) loadChunks(
|
||||
ctx, span := trace.StartSpan(ctx, "Slasher.loadChunks")
|
||||
defer span.End()
|
||||
|
||||
chunkKeys := make([][]byte, 0, len(chunkIndexes))
|
||||
for _, chunkIndex := range chunkIndexes {
|
||||
chunkKeys = append(chunkKeys, s.params.flatSliceID(validatorChunkIndex, chunkIndex))
|
||||
chunksCount := len(chunkIndexes)
|
||||
|
||||
if chunksCount == 0 {
|
||||
return map[uint64]Chunker{}, nil
|
||||
}
|
||||
|
||||
// Build chunk keys.
|
||||
chunkKeys := make([][]byte, 0, chunksCount)
|
||||
for _, chunkIndex := range chunkIndexes {
|
||||
chunkKey := s.params.flatSliceID(validatorChunkIndex, chunkIndex)
|
||||
chunkKeys = append(chunkKeys, chunkKey)
|
||||
}
|
||||
|
||||
// Load the chunks from the database.
|
||||
rawChunks, chunksExist, err := s.serviceCfg.Database.LoadSlasherChunks(ctx, chunkKind, chunkKeys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not load slasher chunk index")
|
||||
}
|
||||
|
||||
chunksByChunkIdx := make(map[uint64]Chunker, len(rawChunks))
|
||||
// Perform basic checks.
|
||||
if len(rawChunks) != chunksCount {
|
||||
return nil, errors.Errorf("expected %d chunks, got %d", chunksCount, len(rawChunks))
|
||||
}
|
||||
|
||||
if len(chunksExist) != chunksCount {
|
||||
return nil, errors.Errorf("expected %d chunks exist, got %d", chunksCount, len(chunksExist))
|
||||
}
|
||||
|
||||
// Initialize the chunks.
|
||||
chunksByChunkIdx := make(map[uint64]Chunker, chunksCount)
|
||||
for i := 0; i < len(rawChunks); i++ {
|
||||
// If the chunk exists in the database, we initialize it from the raw bytes data.
|
||||
// If it does not exist, we initialize an empty chunk.
|
||||
@@ -558,21 +668,35 @@ func (s *Service) loadChunks(
|
||||
return chunksByChunkIdx, nil
|
||||
}
|
||||
|
||||
// Saves updated chunks to disk given the required database schema.
|
||||
func (s *Service) saveUpdatedChunks(
|
||||
func (s *Service) saveChunksToDisk(
|
||||
ctx context.Context,
|
||||
updatedChunksByChunkIdx map[uint64]Chunker,
|
||||
chunkKind slashertypes.ChunkKind,
|
||||
validatorChunkIndex uint64,
|
||||
chunkByChunkIndexByValidatorChunkIndex map[uint64]map[uint64]Chunker,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "Slasher.saveUpdatedChunks")
|
||||
ctx, span := trace.StartSpan(ctx, "Slasher.saveChunksToDisk")
|
||||
defer span.End()
|
||||
chunkKeys := make([][]byte, 0, len(updatedChunksByChunkIdx))
|
||||
chunks := make([][]uint16, 0, len(updatedChunksByChunkIdx))
|
||||
for chunkIdx, chunk := range updatedChunksByChunkIdx {
|
||||
chunkKeys = append(chunkKeys, s.params.flatSliceID(validatorChunkIndex, chunkIdx))
|
||||
chunks = append(chunks, chunk.Chunk())
|
||||
|
||||
// Compute the total number of chunks to save.
|
||||
chunksCount := 0
|
||||
for _, chunkByChunkIndex := range chunkByChunkIndexByValidatorChunkIndex {
|
||||
chunksCount += len(chunkByChunkIndex)
|
||||
}
|
||||
chunksSavedTotal.Add(float64(len(chunks)))
|
||||
|
||||
// Create needed arrays.
|
||||
chunkKeys := make([][]byte, 0, chunksCount)
|
||||
chunks := make([][]uint16, 0, chunksCount)
|
||||
|
||||
// Fill the arrays.
|
||||
for validatorChunkIndex, chunkByChunkIndex := range chunkByChunkIndexByValidatorChunkIndex {
|
||||
for chunkIndex, chunk := range chunkByChunkIndex {
|
||||
chunkKeys = append(chunkKeys, s.params.flatSliceID(validatorChunkIndex, chunkIndex))
|
||||
chunks = append(chunks, chunk.Chunk())
|
||||
}
|
||||
}
|
||||
|
||||
// Update prometheus metrics.
|
||||
chunksSavedTotal.Add(float64(chunksCount))
|
||||
|
||||
// Save the chunks to disk.
|
||||
return s.serviceCfg.Database.SaveSlasherChunks(ctx, chunkKind, chunkKeys, chunks)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package slasher
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -833,7 +834,7 @@ func Test_processQueuedAttestations_OverlappingChunkIndices(t *testing.T) {
|
||||
require.LogsDoNotContain(t, hook, "Could not detect")
|
||||
}
|
||||
|
||||
func Test_epochUpdateForValidators(t *testing.T) {
|
||||
func Test_updatedChunkByChunkIndex(t *testing.T) {
|
||||
neutralMin, neutralMax := uint16(65535), uint16(0)
|
||||
|
||||
testCases := []struct {
|
||||
@@ -1066,7 +1067,7 @@ func Test_epochUpdateForValidators(t *testing.T) {
|
||||
historyLength: tt.historyLength,
|
||||
},
|
||||
serviceCfg: &ServiceConfig{Database: slasherDB},
|
||||
latestEpochWrittenForValidator: tt.latestUpdatedEpochByValidatorIndex,
|
||||
latestEpochUpdatedForValidator: tt.latestUpdatedEpochByValidatorIndex,
|
||||
}
|
||||
|
||||
// Save min initial chunks if they exist.
|
||||
@@ -1076,7 +1077,11 @@ func Test_epochUpdateForValidators(t *testing.T) {
|
||||
minChunkerByChunkerIndex[chunkIndex] = &MinSpanChunksSlice{data: minChunk}
|
||||
}
|
||||
|
||||
err := service.saveUpdatedChunks(ctx, minChunkerByChunkerIndex, slashertypes.MinSpan, tt.validatorChunkIndex)
|
||||
minChunkerByChunkerIndexByValidatorChunkerIndex := map[uint64]map[uint64]Chunker{
|
||||
tt.validatorChunkIndex: minChunkerByChunkerIndex,
|
||||
}
|
||||
|
||||
err := service.saveChunksToDisk(ctx, slashertypes.MinSpan, minChunkerByChunkerIndexByValidatorChunkerIndex)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1087,7 +1092,11 @@ func Test_epochUpdateForValidators(t *testing.T) {
|
||||
maxChunkerByChunkerIndex[chunkIndex] = &MaxSpanChunksSlice{data: maxChunk}
|
||||
}
|
||||
|
||||
err := service.saveUpdatedChunks(ctx, maxChunkerByChunkerIndex, slashertypes.MaxSpan, tt.validatorChunkIndex)
|
||||
maxChunkerByChunkerIndexByValidatorChunkerIndex := map[uint64]map[uint64]Chunker{
|
||||
tt.validatorChunkIndex: maxChunkerByChunkerIndex,
|
||||
}
|
||||
|
||||
err := service.saveChunksToDisk(ctx, slashertypes.MaxSpan, maxChunkerByChunkerIndexByValidatorChunkerIndex)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1269,7 +1278,7 @@ func testLoadChunks(t *testing.T, kind slashertypes.ChunkKind) {
|
||||
emptyChunk = EmptyMaxSpanChunksSlice(defaultParams)
|
||||
}
|
||||
chunkIdx := uint64(2)
|
||||
received, err := s.loadChunks(ctx, 0, kind, []uint64{chunkIdx})
|
||||
received, err := s.loadChunksFromDisk(ctx, 0, kind, []uint64{chunkIdx})
|
||||
require.NoError(t, err)
|
||||
wanted := map[uint64]Chunker{
|
||||
chunkIdx: emptyChunk,
|
||||
@@ -1301,15 +1310,15 @@ func testLoadChunks(t *testing.T, kind slashertypes.ChunkKind) {
|
||||
4: existingChunk,
|
||||
6: existingChunk,
|
||||
}
|
||||
err = s.saveUpdatedChunks(
|
||||
ctx,
|
||||
updatedChunks,
|
||||
kind,
|
||||
0, // validatorChunkIndex
|
||||
)
|
||||
|
||||
chunkByChunkIndexByValidatorChunkIndex := map[uint64]map[uint64]Chunker{
|
||||
0: updatedChunks,
|
||||
}
|
||||
|
||||
err = s.saveChunksToDisk(ctx, kind, chunkByChunkIndexByValidatorChunkIndex)
|
||||
require.NoError(t, err)
|
||||
// Check if the retrieved chunks match what we just saved to disk.
|
||||
received, err = s.loadChunks(ctx, 0, kind, []uint64{2, 4, 6})
|
||||
received, err = s.loadChunksFromDisk(ctx, 0, kind, []uint64{2, 4, 6})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, updatedChunks, received)
|
||||
}
|
||||
@@ -1351,7 +1360,54 @@ func TestService_processQueuedAttestations(t *testing.T) {
|
||||
tickerChan <- 1
|
||||
cancel()
|
||||
s.wg.Wait()
|
||||
assert.LogsContain(t, hook, "Processing queued")
|
||||
assert.LogsContain(t, hook, "Start processing queued attestations")
|
||||
assert.LogsContain(t, hook, "Done processing queued attestations")
|
||||
}
|
||||
|
||||
func Benchmark_saveChunksToDisk(b *testing.B) {
|
||||
// Define the parameters.
|
||||
const (
|
||||
chunkKind = slashertypes.MinSpan
|
||||
validatorsChunksCount = 6000 // Corresponds to 1_536_000 validators x 256 validators / chunk
|
||||
chunkIndex uint64 = 13
|
||||
validatorChunkIndex uint64 = 42
|
||||
)
|
||||
|
||||
params := DefaultParams()
|
||||
|
||||
// Get a context.
|
||||
ctx := context.Background()
|
||||
|
||||
chunkByChunkIndexByValidatorChunkIndex := make(map[uint64]map[uint64]Chunker, validatorsChunksCount)
|
||||
|
||||
// Populate the chunkers.
|
||||
for i := 0; i < validatorsChunksCount; i++ {
|
||||
data := make([]uint16, params.chunkSize)
|
||||
for j := 0; j < int(params.chunkSize); j++ {
|
||||
data[j] = uint16(rand.Intn(1 << 16))
|
||||
}
|
||||
|
||||
chunker := map[uint64]Chunker{chunkIndex: &MinSpanChunksSlice{params: params, data: data}}
|
||||
chunkByChunkIndexByValidatorChunkIndex[uint64(i)] = chunker
|
||||
}
|
||||
|
||||
// Initialize the slasher database.
|
||||
slasherDB := dbtest.SetupSlasherDB(b)
|
||||
|
||||
// Initialize the slasher service.
|
||||
service, err := New(ctx, &ServiceConfig{Database: slasherDB})
|
||||
require.NoError(b, err)
|
||||
|
||||
// Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
|
||||
// Run the benchmark.
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StartTimer()
|
||||
err = service.saveChunksToDisk(ctx, slashertypes.MinSpan, chunkByChunkIndexByValidatorChunkIndex)
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCheckSlashableAttestations(b *testing.B) {
|
||||
@@ -1444,6 +1500,66 @@ func runAttestationsBenchmark(b *testing.B, s *Service, numAtts, numValidators u
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_checkSurroundVotes(b *testing.B) {
|
||||
const (
|
||||
// Approximatively the number of Holesky active validators on 2024-02-16
|
||||
// This number is both a multiple of 32 (the number of slots per epoch) and 256 (the number of validators per chunk)
|
||||
validatorsCount = 1_638_400
|
||||
slotsPerEpoch = 32
|
||||
|
||||
targetEpoch = 42
|
||||
sourceEpoch = 43
|
||||
currentEpoch = 43
|
||||
)
|
||||
// Create a context.
|
||||
ctx := context.Background()
|
||||
|
||||
// Initialize the slasher database.
|
||||
slasherDB := dbtest.SetupSlasherDB(b)
|
||||
|
||||
// Initialize the slasher service.
|
||||
service, err := New(ctx, &ServiceConfig{Database: slasherDB})
|
||||
require.NoError(b, err)
|
||||
|
||||
// Create the attesting validators indexes.
|
||||
// The best case scenario would be to have all validators attesting for a slot with contiguous indexes.
|
||||
// So for 1_638_400 validators with 32 slots per epoch, we would have 48_000 attestation wrappers per slot.
|
||||
// With 256 validators per chunk, we would have only 188 modified chunks.
|
||||
//
|
||||
// In this benchmark, we use the worst case scenario where attestating validators are evenly splitted across all validators chunks.
|
||||
// We also suppose that only one chunk per validator chunk index is modified.
|
||||
// For one given validator index, multiple chunk indexes could be modified.
|
||||
//
|
||||
// With 1_638_400 validators we have 6400 chunks. If exactly 8 validators per chunks attest, we have:
|
||||
// 6_400 chunks * 8 = 51_200 validators attesting per slot. And 51_200 validators * 32 slots = 1_638_400
|
||||
// attesting validators per epoch.
|
||||
// ==> Attesting validator indexes will be computed as follows:
|
||||
// validator chunk index 0 validator chunk index 1 validator chunk index 6_399
|
||||
// [0, 32, 64, 96, 128, 160, 192, 224 | 256, 288, 320, 352, 384, 416, 448, 480 | ... | ..., 1_638_606, 1_638_368, 1_638_400]
|
||||
//
|
||||
|
||||
attestingValidatorsCount := validatorsCount / slotsPerEpoch
|
||||
validatorIndexes := make([]uint64, attestingValidatorsCount)
|
||||
for i := 0; i < attestingValidatorsCount; i++ {
|
||||
validatorIndexes[i] = 32 * uint64(i)
|
||||
}
|
||||
|
||||
// Create the attestation wrapper.
|
||||
// This benchmark assume that all validators produced the exact same head, source and target votes.
|
||||
attWrapper := createAttestationWrapperEmptySig(b, sourceEpoch, targetEpoch, validatorIndexes, nil)
|
||||
attWrappers := []*slashertypes.IndexedAttestationWrapper{attWrapper}
|
||||
|
||||
// Run the benchmark.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StartTimer()
|
||||
_, err = service.checkSurroundVotes(ctx, attWrappers, currentEpoch)
|
||||
b.StopTimer()
|
||||
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
// createAttestationWrapperEmptySig creates an attestation wrapper with source and target,
|
||||
// for validators with indices, and a beacon block root (corresponding to the head vote).
|
||||
// For source and target epochs, the corresponding root is null.
|
||||
|
||||
@@ -1,45 +1,142 @@
|
||||
// nolint:dupword
|
||||
//
|
||||
// Package slasher defines an optimized implementation of Ethereum proof-of-stake slashing
|
||||
// detection, namely focused on catching "surround vote" slashable
|
||||
// offenses as explained here: https://blog.ethereum.org/2020/01/13/validated-staking-on-eth2-1-incentives/.
|
||||
//
|
||||
// Surround vote detection is a difficult problem if done naively, as slasher
|
||||
// needs to keep track of every single attestation by every single validator
|
||||
// in the network and be ready to efficiently detect whether incoming attestations
|
||||
// are slashable with respect to older ones. To do this, the Sigma Prime team
|
||||
// created an elaborate design document: https://hackmd.io/@sproul/min-max-slasher
|
||||
// offering an optimal solution.
|
||||
//
|
||||
// Attesting histories are kept for each validator in two separate arrays known
|
||||
// as min and max spans, which are explained in our design document:
|
||||
// https://hackmd.io/@prysmaticlabs/slasher.
|
||||
//
|
||||
// A regular pair of min and max spans for a validator look as follows
|
||||
// with length = H where H is the amount of epochs worth of history
|
||||
// we want to persist for slashing detection.
|
||||
//
|
||||
// validator_1_min_span = [2, 2, 2, ..., 2]
|
||||
// validator_1_max_span = [0, 0, 0, ..., 0]
|
||||
//
|
||||
// Instead of always dealing with length H arrays, which can be prohibitively
|
||||
// expensive to handle in memory, we split these arrays into chunks of length C.
|
||||
// For C = 3, for example, the 0th chunk of validator 1's min and max spans would look
|
||||
// as follows:
|
||||
//
|
||||
// validator_1_min_span_chunk_0 = [2, 2, 2]
|
||||
// validator_1_max_span_chunk_0 = [2, 2, 2]
|
||||
//
|
||||
// Next, on disk, we take chunks for K validators, and store them as flat slices.
|
||||
// For example, if H = 3, C = 3, and K = 3, then we can store 3 validators' chunks as a flat
|
||||
// slice as follows:
|
||||
//
|
||||
// val0 val1 val2
|
||||
// | | |
|
||||
// { } { } { }
|
||||
// [2, 2, 2, 2, 2, 2, 2, 2, 2]
|
||||
//
|
||||
// This is known as 2D chunking, pioneered by the Sigma Prime team here:
|
||||
// https://hackmd.io/@sproul/min-max-slasher. The parameters H, C, and K will be
|
||||
// used extensively throughout this package.
|
||||
/*
|
||||
Package slasher defines an optimized implementation of Ethereum proof-of-stake slashing
|
||||
detection, namely focused on catching "surround vote" slashable
|
||||
offenses as explained here: https://blog.ethereum.org/2020/01/13/validated-staking-on-eth2-1-incentives/.
|
||||
|
||||
Surround vote detection is a difficult problem if done naively, as slasher
|
||||
needs to keep track of every single attestation by every single validator
|
||||
in the network and be ready to efficiently detect whether incoming attestations
|
||||
are slashable with respect to older ones. To do this, the Sigma Prime team
|
||||
created an elaborate design document: https://hackmd.io/@sproul/min-max-slasher
|
||||
offering an optimal solution.
|
||||
|
||||
Attesting histories are kept for each validator in two separate arrays known
|
||||
as min and max spans, which are explained in our design document:
|
||||
https://hackmd.io/@prysmaticlabs/slasher.
|
||||
|
||||
This is known as 2D chunking, pioneered by the Sigma Prime team here:
|
||||
https://hackmd.io/@sproul/min-max-slasher. The parameters H, C, and K will be
|
||||
used extensively throughout this package.
|
||||
|
||||
Attestations are represented as following: `<source epoch>====><target epoch>`
|
||||
N: Number of epochs worth of history we want to keep for each validator.
|
||||
|
||||
In the following example:
|
||||
- N = 4096
|
||||
- Validators 257 and 258 have some attestations
|
||||
- All other validators have no attestations
|
||||
|
||||
For MIN SPAN, `∞“ is actually set to the max `uint16` value: 65535
|
||||
|
||||
validator 257 : 8193=======>8195 8196=>8197=============>8200 8204=>8205=>8206=>8207=========>8209=>8210=>8211=>8212=>8213=>8214 8219=>8220 8221=>8222
|
||||
validator 258 : 8193=======>8196=>8197=>8198=>8199=>8200=>8201=======>8203=>8204=>8205=>8206=>8207===>8208=>8209=>8210=>8211=>8212=>8213=>8214=>8215=>8216=>8217=>8218=>8219=>8220=>8221
|
||||
|
||||
/----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\
|
||||
| MIN SPAN | kN+0 kN+1 kN+2 kN+3 kN+4 kN+5 kN+6 kN+7 kN+8 kN+9 kN+10 kN+11 kN+12 kN+13 kN+14 kN+15 | kN+16 kN+17 kN+18 kN+19 kN+20 kN+21 kN+22 kN+23 kN+24 kN+25 kN+26 kN+27 kN+28 kN+29 kN+30 kN+31 | ... | (k+1)N-16 (k+1)N-15 (k+1)N-14 (k+1)N-13 (k+1)N-12 (k+1)N-11 (k+1)N-10 (k+1)N-9 (k+1)N-8 (k+1)N-7 (k+1)N-6 (k+1)N-5 (k+1)N-4 (k+1)N-3 (k+1)N-2 (k+1)N-1 |
|
||||
|-------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| validator 0 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| validator 1 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| validator 2 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
|
||||
| validator 254 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| validator 255 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
|-------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| validator 256 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| validator 257 | 3 4 3 2 4 8 7 6 5 4 3 2 2 2 3 3 | 2 2 2 2 2 7 6 5 4 3 2 3 2 ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| validator 258 | 4 3 3 2 2 2 2 2 3 3 2 2 2 2 2 2 | 2 2 2 2 2 2 2 2 2 2 2 2 ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
|
||||
| validator 510 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| validator 511 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
|-------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
|
||||
|-------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| validator M - 256 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| validator M - 255 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
|
||||
| validator M - 1 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
\----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------/
|
||||
|
||||
/----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\
|
||||
| MAX SPAN | kN+0 kN+1 kN+2 kN+3 kN+4 kN+5 kN+6 kN+7 kN+8 kN+9 kN+10 kN+11 kN+12 kN+13 kN+14 kN+15 | kN+16 kN+17 kN+18 kN+19 kN+20 kN+21 kN+22 kN+23 kN+24 kN+25 kN+26 kN+27 kN+28 kN+29 kN+30 kN+31 | ... | (k+1)N-16 (k+1)N-15 (k+1)N-14 (k+1)N-13 (k+1)N-12 (k+1)N-11 (k+1)N-10 (k+1)N-9 (k+1)N-8 (k+1)N-7 (k+1)N-6 (k+1)N-5 (k+1)N-4 (k+1)N-3 (k+1)N-2 (k+1)N-1 |
|
||||
|-------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| validator 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| validator 1 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| validator 2 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
|
||||
| validator 14 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| validator 15 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| ------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| validator 256 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| validator 257 | 0 0 1 0 0 0 2 1 0 0 0 0 0 0 0 0 | 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| validator 258 | 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
|
||||
| validator 510 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| validator 511 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| ------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
|
||||
| ------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| validator M - 256 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| validator M - 255 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
|
||||
| validator M - 1 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
|
||||
\ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------/
|
||||
|
||||
How to know if an incoming attestation will surround a pre-existing one?
|
||||
------------------------------------------------------------------------
|
||||
Example with an incoming attestation 8197====>8199 for validator 257.
|
||||
- First, we retrieve the MIN SPAN value for the source epoch of the incoming attestation (here the source epoch is 8197). We get the value 8.
|
||||
- Then, for the incoming attestation, we compute `target - source`. We get the value 8199 - 8197 = 2.
|
||||
- 8 >= 2, so the incoming attestation will NOT surround any pre-existing one.
|
||||
|
||||
Example with an incoming attestation 8202====>8206 for validator 257.
|
||||
- First, we retrieve the MIN SPAN value for the source epoch of the incoming attestation (here the source epoch is 8202). We get the value 3.
|
||||
- Then, for the incoming attestation, we compute `target - source`. We get the value 8206 - 8202 = 4.
|
||||
- 3 < 4, so the incoming attestation will surround a pre-existing one. (In this precise case, it will surround 8204=>8205)
|
||||
|
||||
How to know if an incoming attestation will be surrounded by a pre-existing one?
|
||||
--------------------------------------------------------------------------------
|
||||
Example with an incoming attestation 8197====>8199 for validator 257.
|
||||
- First, we retrieve the MAX SPAN value for the source epoch of the incoming attestation (here the source epoch is 8197). We get the value 0.
|
||||
- Then, for the incoming attestation, we compute `target - source`. We get the value 8199 - 8197 = 2.
|
||||
- 0 <= 2, so the incoming attestation will NOT be surrounded by any pre-existing one.
|
||||
|
||||
Example with an incoming attestation 8198====>8199 for validator 257.
|
||||
- First, we retrieve the MAX SPAN value for the source epoch of the incoming attestation (here the source epoch is 8198). We get the value 2.
|
||||
- Then, for the incoming attestation, we compute `target - source`. We get the value 8199 - 8198 = 1.
|
||||
- 2 > 1, so the incoming attestation will be surrounded by a pre-existing one. (In this precise case, it will be surrounded by 8197=>8200)
|
||||
|
||||
Data are stored on disk by chunk.
|
||||
For example: For MIN SPAN, validators 256 to 511 included, epochs 8208 to 8223 included, the corresponding chunk is:
|
||||
/---------------------------------------------------------------------------------------------------------------------\
|
||||
| MIN SPAN | kN+16 kN+17 kN+18 kN+19 kN+20 kN+21 kN+22 kN+23 kN+24 kN+25 kN+26 kN+27 kN+28 kN+29 kN+30 kN+31 |
|
||||
|-------------------+-------------------------------------------------------------------------------------------------|
|
||||
| validator 256 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| validator 257 | 2 2 2 2 2 7 6 5 4 3 2 3 2 ∞ ∞ ∞ |
|
||||
| validator 258 | 2 2 2 2 2 2 2 2 2 2 2 2 ∞ ∞ ∞ ∞ |
|
||||
| ................. | ............................................................................................... |
|
||||
| validator 510 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
| validator 511 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|
||||
\---------------------------------------------------------------------------------------------------------------------/
|
||||
|
||||
Chunks are stored into the database a flat array of bytes.
|
||||
For this example, the stored value will be:
|
||||
| validator 256 | validator 257 | validator 258 |...| validator 510 | validator 511 |
|
||||
[∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,2,2,2,2,2,7,6,5,4,3,2,3,2,∞,∞,∞,2,2,2,2,2,2,2,2,2,2,2,2,∞,∞,∞,∞,...,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞]
|
||||
|
||||
A chunk contains 256 validators * 16 epochs = 4096 values.
|
||||
A chunk value is stored on 2 bytes (uint16).
|
||||
==> A chunk takes 8192 bytes = 8KB
|
||||
|
||||
There is 4096 epochs / 16 epochs per chunk = 256 chunks per batch of 256 validators.
|
||||
Storing all values fo a batch of 256 validators takes 256 * 8KB = 2MB
|
||||
With 1_048_576 validators, we need 4096 * 2MB = 8GB
|
||||
|
||||
Storing both MIN and MAX spans for 1_048_576 validators takes 16GB.
|
||||
|
||||
Each chunk is stored snappy-compressed in the database.
|
||||
If all validators attest ideally, a MIN SPAN chunk will contain only `2`s, and and MAX SPAN chunk will contain only `0`s.
|
||||
This will compress very well, and will let us store a lot of data in a small amount of space.
|
||||
*/
|
||||
|
||||
package slasher
|
||||
|
||||
@@ -129,7 +129,7 @@ func (s *Service) processAttestations(
|
||||
validAttestationsCount := len(validAttestations)
|
||||
validInFutureAttestationsCount := len(validInFutureAttestations)
|
||||
|
||||
// Log useful infrormation
|
||||
// Log useful information.
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentSlot": currentSlot,
|
||||
"currentEpoch": currentEpoch,
|
||||
@@ -137,7 +137,9 @@ func (s *Service) processAttestations(
|
||||
"numDeferredAtts": validInFutureAttestationsCount,
|
||||
"numDroppedAtts": numDropped,
|
||||
"attsQueueSize": queuedAttestationsCount,
|
||||
}).Info("Processing queued attestations for slashing detection")
|
||||
}).Info("Start processing queued attestations")
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Check for attestatinos slashings (double, sourrounding, surrounded votes).
|
||||
slashings, err := s.checkSlashableAttestations(ctx, currentEpoch, validAttestations)
|
||||
@@ -154,6 +156,13 @@ func (s *Service) processAttestations(
|
||||
return nil
|
||||
}
|
||||
|
||||
end := time.Since(start)
|
||||
log.WithField("elapsed", end).Info("Done processing queued attestations")
|
||||
|
||||
if len(slashings) > 0 {
|
||||
log.WithField("numSlashings", len(slashings)).Warn("Slashable attestation offenses found")
|
||||
}
|
||||
|
||||
return processedAttesterSlashings
|
||||
}
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ type Service struct {
|
||||
attsSlotTicker *slots.SlotTicker
|
||||
blocksSlotTicker *slots.SlotTicker
|
||||
pruningSlotTicker *slots.SlotTicker
|
||||
latestEpochWrittenForValidator map[primitives.ValidatorIndex]primitives.Epoch
|
||||
latestEpochUpdatedForValidator map[primitives.ValidatorIndex]primitives.Epoch
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ func New(ctx context.Context, srvCfg *ServiceConfig) (*Service, error) {
|
||||
blksQueue: newBlocksQueue(),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
latestEpochWrittenForValidator: make(map[primitives.ValidatorIndex]primitives.Epoch),
|
||||
latestEpochUpdatedForValidator: make(map[primitives.ValidatorIndex]primitives.Epoch),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ func (s *Service) run() {
|
||||
return
|
||||
}
|
||||
for _, item := range epochsByValidator {
|
||||
s.latestEpochWrittenForValidator[item.ValidatorIndex] = item.Epoch
|
||||
s.latestEpochUpdatedForValidator[item.ValidatorIndex] = item.Epoch
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info(
|
||||
"Finished retrieving last epoch written per validator",
|
||||
@@ -162,7 +162,7 @@ func (s *Service) Stop() error {
|
||||
defer innerCancel()
|
||||
log.Info("Flushing last epoch written for each validator to disk, please wait")
|
||||
if err := s.serviceCfg.Database.SaveLastEpochsWrittenForValidators(
|
||||
ctx, s.latestEpochWrittenForValidator,
|
||||
ctx, s.latestEpochUpdatedForValidator,
|
||||
); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
@@ -185,13 +185,14 @@ func (f *FieldTrie) CopyTrie() *FieldTrie {
|
||||
copy(dstFieldTrie[i], layer)
|
||||
}
|
||||
return &FieldTrie{
|
||||
fieldLayers: dstFieldTrie,
|
||||
field: f.field,
|
||||
dataType: f.dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: f.length,
|
||||
numOfElems: f.numOfElems,
|
||||
fieldLayers: dstFieldTrie,
|
||||
field: f.field,
|
||||
dataType: f.dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: f.length,
|
||||
numOfElems: f.numOfElems,
|
||||
isTransferred: f.isTransferred,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,9 +58,9 @@ func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, er
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalState {
|
||||
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.validatorsVal(), currentEpoch)
|
||||
return stateutil.UnrealizedCheckpointBalances(cp, pp, stateutil.NewValMultiValueSliceReader(b.validatorsMultiValue, b), currentEpoch)
|
||||
} else {
|
||||
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.validators, currentEpoch)
|
||||
return stateutil.UnrealizedCheckpointBalances(cp, pp, stateutil.NewValSliceReader(b.validators), currentEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -198,9 +199,14 @@ func (b *BeaconState) addDirtyIndices(index types.FieldIndex, indices []uint64)
|
||||
return
|
||||
}
|
||||
totalIndicesLen := len(b.dirtyIndices[index]) + len(indices)
|
||||
// Reduce duplicates to verify that these are indeed unique.
|
||||
if totalIndicesLen > indicesLimit {
|
||||
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
|
||||
totalIndicesLen = len(b.dirtyIndices[index]) + len(indices)
|
||||
}
|
||||
if totalIndicesLen > indicesLimit {
|
||||
b.rebuildTrie[index] = true
|
||||
b.dirtyIndices[index] = []uint64{}
|
||||
b.dirtyIndices[index] = make([]uint64, 0, indicesLimit)
|
||||
} else {
|
||||
b.dirtyIndices[index] = append(b.dirtyIndices[index], indices...)
|
||||
}
|
||||
|
||||
@@ -416,6 +416,26 @@ func TestCopyAllTries(t *testing.T) {
|
||||
assert.NotEqual(t, rt, newRt)
|
||||
}
|
||||
|
||||
func TestDuplicateDirtyIndices(t *testing.T) {
|
||||
newState := &BeaconState{
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64),
|
||||
}
|
||||
for i := uint64(0); i < indicesLimit-5; i++ {
|
||||
newState.dirtyIndices[types.Balances] = append(newState.dirtyIndices[types.Balances], i)
|
||||
}
|
||||
// Append duplicates
|
||||
newState.dirtyIndices[types.Balances] = append(newState.dirtyIndices[types.Balances], []uint64{0, 1, 2, 3, 4}...)
|
||||
|
||||
// We would remove the duplicates and stay under the threshold
|
||||
newState.addDirtyIndices(types.Balances, []uint64{9997, 9998})
|
||||
assert.Equal(t, false, newState.rebuildTrie[types.Balances])
|
||||
|
||||
// We would trigger above the threshold.
|
||||
newState.addDirtyIndices(types.Balances, []uint64{10000, 10001, 10002, 10003})
|
||||
assert.Equal(t, true, newState.rebuildTrie[types.Balances])
|
||||
}
|
||||
|
||||
func generateState(t *testing.T) state.BeaconState {
|
||||
count := uint64(100)
|
||||
vals := make([]*ethpb.Validator, 0, count)
|
||||
|
||||
@@ -1158,8 +1158,16 @@ func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interf
|
||||
}
|
||||
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
var newTrie *fieldtrie.FieldTrie
|
||||
// We choose to only copy the validator
|
||||
// trie as it is pretty expensive to regenerate
|
||||
// in the event of late blocks.
|
||||
if index == types.Validators {
|
||||
newTrie = fTrie.CopyTrie()
|
||||
} else {
|
||||
newTrie = fTrie.TransferTrie()
|
||||
}
|
||||
fTrie.FieldReference().MinusRef()
|
||||
newTrie := fTrie.TransferTrie()
|
||||
b.stateFieldLeaves[index] = newTrie
|
||||
fTrie = newTrie
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"trie_helpers.go",
|
||||
"unrealized_justification.go",
|
||||
"validator_map_handler.go",
|
||||
"validator_reader.go",
|
||||
"validator_root.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stateutil",
|
||||
@@ -26,6 +27,7 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/multi-value-slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
@@ -56,6 +58,7 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/multi-value-slice:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// UnrealizedCheckpointBalances returns the total current active balance, the
|
||||
@@ -13,17 +12,21 @@ import (
|
||||
// current epoch correctly attested for target balance. It takes the current and
|
||||
// previous epoch participation bits as parameters so implicitly only works for
|
||||
// beacon states post-Altair.
|
||||
func UnrealizedCheckpointBalances(cp, pp []byte, validators []*ethpb.Validator, currentEpoch primitives.Epoch) (uint64, uint64, uint64, error) {
|
||||
func UnrealizedCheckpointBalances(cp, pp []byte, validators ValReader, currentEpoch primitives.Epoch) (uint64, uint64, uint64, error) {
|
||||
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
|
||||
activeBalance := uint64(0)
|
||||
currentTarget := uint64(0)
|
||||
prevTarget := uint64(0)
|
||||
if len(cp) < len(validators) || len(pp) < len(validators) {
|
||||
if len(cp) < validators.Len() || len(pp) < validators.Len() {
|
||||
return 0, 0, 0, errors.New("participation does not match validator set")
|
||||
}
|
||||
|
||||
var err error
|
||||
for i, v := range validators {
|
||||
valLength := validators.Len()
|
||||
for i := 0; i < valLength; i++ {
|
||||
v, err := validators.At(i)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
active := v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch
|
||||
if active && !v.Slashed {
|
||||
activeBalance, err = math.Add64(activeBalance, v.EffectiveBalance)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
multi_value_slice "github.com/prysmaticlabs/prysm/v5/container/multi-value-slice"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
@@ -25,7 +26,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
pp := make([]byte, len(validators))
|
||||
|
||||
t.Run("No one voted last two epochs", func(tt *testing.T) {
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 0)
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 0)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
@@ -35,7 +36,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
t.Run("bad votes in last two epochs", func(tt *testing.T) {
|
||||
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00})
|
||||
copy(pp, []byte{0x00, 0x00, 0x00, 0x00})
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
@@ -45,7 +46,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
t.Run("two votes in last epoch", func(tt *testing.T) {
|
||||
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
|
||||
copy(pp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag)})
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, current)
|
||||
@@ -55,7 +56,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
t.Run("two votes in previous epoch", func(tt *testing.T) {
|
||||
copy(cp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0x00})
|
||||
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
@@ -66,7 +67,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
validators[0].EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().MinDepositAmount
|
||||
copy(cp, []byte{0xFF, 0xFF, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0})
|
||||
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 0xFF, 0xFF})
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
|
||||
require.NoError(tt, err)
|
||||
expectedActive -= params.BeaconConfig().MinDepositAmount
|
||||
require.Equal(tt, expectedActive, active)
|
||||
@@ -76,7 +77,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
|
||||
t.Run("slash a validator", func(tt *testing.T) {
|
||||
validators[1].Slashed = true
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
|
||||
require.NoError(tt, err)
|
||||
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
|
||||
require.Equal(tt, expectedActive, active)
|
||||
@@ -85,7 +86,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
})
|
||||
t.Run("Exit a validator", func(tt *testing.T) {
|
||||
validators[4].ExitEpoch = 1
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 2)
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 2)
|
||||
require.NoError(tt, err)
|
||||
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
|
||||
require.Equal(tt, expectedActive, active)
|
||||
@@ -93,3 +94,105 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance, previous)
|
||||
})
|
||||
}
|
||||
|
||||
func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
targetFlag := params.BeaconConfig().TimelyTargetFlagIndex
|
||||
expectedActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
|
||||
|
||||
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
mv := &multi_value_slice.Slice[*ethpb.Validator]{}
|
||||
mv.Init(validators)
|
||||
|
||||
cp := make([]byte, len(validators))
|
||||
pp := make([]byte, len(validators))
|
||||
|
||||
t.Run("No one voted last two epochs", func(tt *testing.T) {
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 0)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
require.Equal(tt, uint64(0), previous)
|
||||
})
|
||||
|
||||
t.Run("bad votes in last two epochs", func(tt *testing.T) {
|
||||
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00})
|
||||
copy(pp, []byte{0x00, 0x00, 0x00, 0x00})
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
require.Equal(tt, uint64(0), previous)
|
||||
})
|
||||
|
||||
t.Run("two votes in last epoch", func(tt *testing.T) {
|
||||
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
|
||||
copy(pp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag)})
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, current)
|
||||
require.Equal(tt, uint64(0), previous)
|
||||
})
|
||||
|
||||
t.Run("two votes in previous epoch", func(tt *testing.T) {
|
||||
copy(cp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0x00})
|
||||
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
|
||||
})
|
||||
|
||||
t.Run("votes in both epochs, decreased balance in first validator", func(tt *testing.T) {
|
||||
validators[0].EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().MinDepositAmount
|
||||
copy(cp, []byte{0xFF, 0xFF, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0})
|
||||
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 0xFF, 0xFF})
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
|
||||
require.NoError(tt, err)
|
||||
expectedActive -= params.BeaconConfig().MinDepositAmount
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
|
||||
})
|
||||
|
||||
t.Run("slash a validator", func(tt *testing.T) {
|
||||
validators[1].Slashed = true
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
|
||||
require.NoError(tt, err)
|
||||
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
|
||||
})
|
||||
t.Run("Exit a validator", func(tt *testing.T) {
|
||||
validators[4].ExitEpoch = 1
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 2)
|
||||
require.NoError(tt, err)
|
||||
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
|
||||
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance, previous)
|
||||
})
|
||||
}
|
||||
|
||||
type testObject struct {
|
||||
id uint64
|
||||
}
|
||||
|
||||
func (o *testObject) Id() uint64 {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func (o *testObject) SetId(id uint64) {
|
||||
o.id = id
|
||||
}
|
||||
|
||||
59
beacon-chain/state/stateutil/validator_reader.go
Normal file
59
beacon-chain/state/stateutil/validator_reader.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
multi_value_slice "github.com/prysmaticlabs/prysm/v5/container/multi-value-slice"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ValReader specifies an interface through which we can access the validator registry.
|
||||
type ValReader interface {
|
||||
Len() int
|
||||
At(i int) (*ethpb.Validator, error)
|
||||
}
|
||||
|
||||
// ValSliceReader describes a struct that conforms to the ValReader interface
|
||||
type ValSliceReader struct {
|
||||
Validators []*ethpb.Validator
|
||||
}
|
||||
|
||||
// NewValSliceReader constructs a ValSliceReader object.
|
||||
func NewValSliceReader(vals []*ethpb.Validator) ValSliceReader {
|
||||
return ValSliceReader{Validators: vals}
|
||||
}
|
||||
|
||||
// Len is the length of the validator registry.
|
||||
func (v ValSliceReader) Len() int {
|
||||
return len(v.Validators)
|
||||
}
|
||||
|
||||
// At returns the validator at the provided index.
|
||||
func (v ValSliceReader) At(i int) (*ethpb.Validator, error) {
|
||||
return v.Validators[i], nil
|
||||
}
|
||||
|
||||
// ValMultiValueSliceReader describes a struct that conforms to the ValReader interface.
|
||||
// This struct is specifically designed for accessing validator data from a
|
||||
// multivalue slice.
|
||||
type ValMultiValueSliceReader struct {
|
||||
ValMVSlice *multi_value_slice.Slice[*ethpb.Validator]
|
||||
Identifier multi_value_slice.Identifiable
|
||||
}
|
||||
|
||||
// NewValMultiValueSliceReader constructs a new val reader object.
|
||||
func NewValMultiValueSliceReader(valSlice *multi_value_slice.Slice[*ethpb.Validator],
|
||||
identifier multi_value_slice.Identifiable) ValMultiValueSliceReader {
|
||||
return ValMultiValueSliceReader{
|
||||
ValMVSlice: valSlice,
|
||||
Identifier: identifier,
|
||||
}
|
||||
}
|
||||
|
||||
// Len is the length of the validator registry.
|
||||
func (v ValMultiValueSliceReader) Len() int {
|
||||
return v.ValMVSlice.Len(v.Identifier)
|
||||
}
|
||||
|
||||
// At returns the validator at the provided index.
|
||||
func (v ValMultiValueSliceReader) At(i int) (*ethpb.Validator, error) {
|
||||
return v.ValMVSlice.At(v.Identifier, uint64(i))
|
||||
}
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"batch.go",
|
||||
"batcher.go",
|
||||
"blobs.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"pool.go",
|
||||
"service.go",
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrChainBroken indicates a backfill batch can't be imported to the db because it is not known to be the ancestor
|
||||
@@ -73,9 +73,9 @@ type batch struct {
|
||||
bs *blobSync
|
||||
}
|
||||
|
||||
func (b batch) logFields() log.Fields {
|
||||
func (b batch) logFields() logrus.Fields {
|
||||
return map[string]interface{}{
|
||||
"batch_id": b.id(),
|
||||
"batchId": b.id(),
|
||||
"state": b.state.String(),
|
||||
"scheduled": b.scheduled.String(),
|
||||
"seq": b.seq,
|
||||
@@ -139,7 +139,7 @@ func (b batch) withResults(results verifiedROBlocks, bs *blobSync) batch {
|
||||
|
||||
func (b batch) postBlobSync() batch {
|
||||
if b.blobsNeeded() > 0 {
|
||||
log.WithFields(b.logFields()).WithField("blobs_missing", b.blobsNeeded()).Error("batch still missing blobs after downloading from peer")
|
||||
log.WithFields(b.logFields()).WithField("blobsMissing", b.blobsNeeded()).Error("Batch still missing blobs after downloading from peer")
|
||||
b.bs = nil
|
||||
b.results = []blocks.ROBlock{}
|
||||
return b.withState(batchErrRetryable)
|
||||
@@ -153,14 +153,14 @@ func (b batch) withState(s batchState) batch {
|
||||
switch b.state {
|
||||
case batchErrRetryable:
|
||||
b.retries += 1
|
||||
log.WithFields(b.logFields()).Info("sequencing batch for retry")
|
||||
log.WithFields(b.logFields()).Info("Sequencing batch for retry")
|
||||
case batchInit, batchNil:
|
||||
b.firstScheduled = b.scheduled
|
||||
}
|
||||
}
|
||||
if s == batchImportComplete {
|
||||
backfillBatchTimeRoundtrip.Observe(float64(time.Since(b.firstScheduled).Milliseconds()))
|
||||
log.WithFields(b.logFields()).Debug("Backfill batch imported.")
|
||||
log.WithFields(b.logFields()).Debug("Backfill batch imported")
|
||||
}
|
||||
b.state = s
|
||||
b.seq += 1
|
||||
|
||||
@@ -112,7 +112,7 @@ func (bbv *blobBatchVerifier) newVerifier(rb blocks.ROBlob) verification.BlobVer
|
||||
return m[rb.Index]
|
||||
}
|
||||
|
||||
func (bbv blobBatchVerifier) VerifiedROBlobs(_ context.Context, blk blocks.ROBlock, _ []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
|
||||
func (bbv *blobBatchVerifier) VerifiedROBlobs(_ context.Context, blk blocks.ROBlock, _ []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
|
||||
m, ok := bbv.verifiers[blk.Root()]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(verification.ErrMissingVerification, "no record of verifiers for root %#x", blk.Root())
|
||||
|
||||
5
beacon-chain/sync/backfill/log.go
Normal file
5
beacon-chain/sync/backfill/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package backfill
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "backfill")
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type batchWorkerPool interface {
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/dbval"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
@@ -149,12 +148,12 @@ func (s *Service) initVerifier(ctx context.Context) (*verifier, sync.ContextByte
|
||||
}
|
||||
keys, err := cps.PublicKeys()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "Unable to retrieve public keys for all validators in the origin state")
|
||||
return nil, nil, errors.Wrap(err, "unable to retrieve public keys for all validators in the origin state")
|
||||
}
|
||||
vr := cps.GenesisValidatorsRoot()
|
||||
ctxMap, err := sync.ContextByteVersionsForValRoot(bytesutil.ToBytes32(vr))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root = %#x", vr)
|
||||
return nil, nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root %#x", vr)
|
||||
}
|
||||
v, err := newBackfillVerifier(vr, keys)
|
||||
return v, ctxMap, err
|
||||
@@ -164,10 +163,10 @@ func (s *Service) updateComplete() bool {
|
||||
b, err := s.pool.complete()
|
||||
if err != nil {
|
||||
if errors.Is(err, errEndSequence) {
|
||||
log.WithField("backfill_slot", b.begin).Info("Backfill is complete.")
|
||||
log.WithField("backfillSlot", b.begin).Info("Backfill is complete")
|
||||
return true
|
||||
}
|
||||
log.WithError(err).Error("Backfill service received unhandled error from worker pool.")
|
||||
log.WithError(err).Error("Backfill service received unhandled error from worker pool")
|
||||
return true
|
||||
}
|
||||
s.batchSeq.update(b)
|
||||
@@ -187,11 +186,11 @@ func (s *Service) importBatches(ctx context.Context) {
|
||||
for i := range importable {
|
||||
ib := importable[i]
|
||||
if len(ib.results) == 0 {
|
||||
log.WithFields(ib.logFields()).Error("Batch with no results, skipping importer.")
|
||||
log.WithFields(ib.logFields()).Error("Batch with no results, skipping importer")
|
||||
}
|
||||
_, err := s.batchImporter(ctx, current, ib, s.store)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(ib.logFields()).Debug("Backfill batch failed to import.")
|
||||
log.WithError(err).WithFields(ib.logFields()).Debug("Backfill batch failed to import")
|
||||
s.downscore(ib)
|
||||
s.batchSeq.update(ib.withState(batchErrRetryable))
|
||||
// If a batch fails, the subsequent batches are no longer considered importable.
|
||||
@@ -204,8 +203,8 @@ func (s *Service) importBatches(ctx context.Context) {
|
||||
|
||||
nt := s.batchSeq.numTodo()
|
||||
log.WithField("imported", imported).WithField("importable", len(importable)).
|
||||
WithField("batches_remaining", nt).
|
||||
Info("Backfill batches processed.")
|
||||
WithField("batchesRemaining", nt).
|
||||
Info("Backfill batches processed")
|
||||
|
||||
backfillRemainingBatches.Set(float64(nt))
|
||||
}
|
||||
@@ -220,7 +219,7 @@ func (s *Service) scheduleTodos() {
|
||||
// and then we'll have the parent_root expected by 90 to ensure it matches the root for 89,
|
||||
// at which point we know we can process [80..90).
|
||||
if errors.Is(err, errMaxBatches) {
|
||||
log.Debug("Backfill batches waiting for descendent batch to complete.")
|
||||
log.Debug("Backfill batches waiting for descendent batch to complete")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -232,17 +231,17 @@ func (s *Service) scheduleTodos() {
|
||||
// Start begins the runloop of backfill.Service in the current goroutine.
|
||||
func (s *Service) Start() {
|
||||
if !s.enabled {
|
||||
log.Info("Backfill service not enabled.")
|
||||
log.Info("Backfill service not enabled")
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
defer func() {
|
||||
log.Info("Backfill service is shutting down.")
|
||||
log.Info("Backfill service is shutting down")
|
||||
cancel()
|
||||
}()
|
||||
clock, err := s.cw.WaitForClock(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Backfill service failed to start while waiting for genesis data.")
|
||||
log.WithError(err).Error("Backfill service failed to start while waiting for genesis data")
|
||||
return
|
||||
}
|
||||
s.clock = clock
|
||||
@@ -250,39 +249,39 @@ func (s *Service) Start() {
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize blob verifier in backfill service.")
|
||||
log.WithError(err).Error("Could not initialize blob verifier in backfill service")
|
||||
return
|
||||
}
|
||||
|
||||
if s.store.isGenesisSync() {
|
||||
log.Info("Backfill short-circuit; node synced from genesis.")
|
||||
log.Info("Backfill short-circuit; node synced from genesis")
|
||||
return
|
||||
}
|
||||
status := s.store.status()
|
||||
// Exit early if there aren't going to be any batches to backfill.
|
||||
if primitives.Slot(status.LowSlot) <= s.ms(s.clock.CurrentSlot()) {
|
||||
log.WithField("minimum_required_slot", s.ms(s.clock.CurrentSlot())).
|
||||
WithField("backfill_lowest_slot", status.LowSlot).
|
||||
Info("Exiting backfill service; minimum block retention slot > lowest backfilled block.")
|
||||
log.WithField("minimumRequiredSlot", s.ms(s.clock.CurrentSlot())).
|
||||
WithField("backfillLowestSlot", status.LowSlot).
|
||||
Info("Exiting backfill service; minimum block retention slot > lowest backfilled block")
|
||||
return
|
||||
}
|
||||
s.verifier, s.ctxMap, err = s.initVerifier(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to initialize backfill verifier.")
|
||||
log.WithError(err).Error("Unable to initialize backfill verifier")
|
||||
return
|
||||
}
|
||||
|
||||
if s.initSyncWaiter != nil {
|
||||
log.Info("Backfill service waiting for initial-sync to reach head before starting.")
|
||||
log.Info("Backfill service waiting for initial-sync to reach head before starting")
|
||||
if err := s.initSyncWaiter(); err != nil {
|
||||
log.WithError(err).Error("Error waiting for init-sync to complete.")
|
||||
log.WithError(err).Error("Error waiting for init-sync to complete")
|
||||
return
|
||||
}
|
||||
}
|
||||
s.pool.spawn(ctx, s.nWorkers, clock, s.pa, s.verifier, s.ctxMap, s.newBlobVerifier, s.blobStore)
|
||||
s.batchSeq = newBatchSequencer(s.nWorkers, s.ms(s.clock.CurrentSlot()), primitives.Slot(status.LowSlot), primitives.Slot(s.batchSize))
|
||||
if err = s.initBatches(); err != nil {
|
||||
log.WithError(err).Error("Non-recoverable error in backfill service.")
|
||||
log.WithError(err).Error("Non-recoverable error in backfill service")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -296,7 +295,7 @@ func (s *Service) Start() {
|
||||
s.importBatches(ctx)
|
||||
batchesWaiting.Set(float64(s.batchSeq.countWithState(batchImportable)))
|
||||
if err := s.batchSeq.moveMinimum(s.ms(s.clock.CurrentSlot())); err != nil {
|
||||
log.WithError(err).Error("Non-recoverable error while adjusting backfill minimum slot.")
|
||||
log.WithError(err).Error("Non-recoverable error while adjusting backfill minimum slot")
|
||||
}
|
||||
s.scheduleTodos()
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/dbval"
|
||||
)
|
||||
|
||||
var errBatchDisconnected = errors.New("Highest block root in backfill batch doesn't match next parent_root")
|
||||
var errBatchDisconnected = errors.New("highest block root in backfill batch doesn't match next parent_root")
|
||||
|
||||
// NewUpdater correctly initializes a StatusUpdater value with the required database value.
|
||||
func NewUpdater(ctx context.Context, store BeaconDB) (*Store, error) {
|
||||
|
||||
@@ -56,7 +56,7 @@ func testBlocksWithKeys(t *testing.T, nBlocks uint64, nBlobs int, vr []byte) ([]
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
vr := make([]byte, 32)
|
||||
copy(vr, []byte("yooooo"))
|
||||
copy(vr, "yooooo")
|
||||
blks, _, _, pks := testBlocksWithKeys(t, 2, 0, vr)
|
||||
pubkeys := make([][fieldparams.BLSPubkeyLength]byte, len(pks))
|
||||
for i := range pks {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type workerId int
|
||||
@@ -31,14 +30,14 @@ func (w *p2pWorker) run(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case b := <-w.todo:
|
||||
log.WithFields(b.logFields()).WithField("backfill_worker", w.id).Debug("Backfill worker received batch.")
|
||||
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("Backfill worker received batch")
|
||||
if b.state == batchBlobSync {
|
||||
w.done <- w.handleBlobs(ctx, b)
|
||||
} else {
|
||||
w.done <- w.handleBlocks(ctx, b)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
log.WithField("backfill_worker", w.id).Info("Backfill worker exiting after context canceled.")
|
||||
log.WithField("backfillWorker", w.id).Info("Backfill worker exiting after context canceled")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -73,7 +72,7 @@ func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
|
||||
bdl += vb[i].SizeSSZ()
|
||||
}
|
||||
backfillBlocksApproximateBytes.Add(float64(bdl))
|
||||
log.WithFields(b.logFields()).WithField("dlbytes", bdl).Debug("backfill batch block bytes downloaded")
|
||||
log.WithFields(b.logFields()).WithField("dlbytes", bdl).Debug("Backfill batch block bytes downloaded")
|
||||
bs, err := newBlobSync(cs, vb, &blobSyncConfig{retentionStart: blobRetentionStart, nbv: w.nbv, store: w.bfs})
|
||||
if err != nil {
|
||||
return b.withRetryableError(err)
|
||||
@@ -97,7 +96,7 @@ func (w *p2pWorker) handleBlobs(ctx context.Context, b batch) batch {
|
||||
// All blobs are the same size, so we can compute 1 and use it for all in the batch.
|
||||
sz := blobs[0].SizeSSZ() * len(blobs)
|
||||
backfillBlobsApproximateBytes.Add(float64(sz))
|
||||
log.WithFields(b.logFields()).WithField("dlbytes", sz).Debug("backfill batch blob bytes downloaded")
|
||||
log.WithFields(b.logFields()).WithField("dlbytes", sz).Debug("Backfill batch blob bytes downloaded")
|
||||
}
|
||||
return b.postBlobSync()
|
||||
}
|
||||
|
||||
@@ -151,14 +151,14 @@ func newBlockBatch(start, reqEnd primitives.Slot, size uint64) (blockBatch, bool
|
||||
return nb, true
|
||||
}
|
||||
|
||||
func (bat blockBatch) next(reqEnd primitives.Slot, size uint64) (blockBatch, bool) {
|
||||
if bat.error() != nil {
|
||||
return bat, false
|
||||
func (bb blockBatch) next(reqEnd primitives.Slot, size uint64) (blockBatch, bool) {
|
||||
if bb.error() != nil {
|
||||
return bb, false
|
||||
}
|
||||
if bat.nonLinear() {
|
||||
if bb.nonLinear() {
|
||||
return blockBatch{}, false
|
||||
}
|
||||
return newBlockBatch(bat.end.Add(1), reqEnd, size)
|
||||
return newBlockBatch(bb.end.Add(1), reqEnd, size)
|
||||
}
|
||||
|
||||
// blocks returns the list of linear, canonical blocks read from the db.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user