Compare commits

...

26 Commits

Author SHA1 Message Date
Kasey Kirkham
c306f70d5d metric to track batch retries 2024-02-28 17:23:17 -06:00
james-prysm
6d3c6a6331 move setting route handlers to registration from start (#13676)
* moving route registration out of the start function and into registration

* moving where grpc is set
2024-02-28 11:30:31 +00:00
Nishant Das
f1615c4c88 Employ Dynamic Cache Sizes (#13640)
* dynamic cache sizes

* tests

* gosimple

* fix it

* add tests

* comments

* skip test

* Update beacon-chain/blockchain/receive_block_test.go

Co-authored-by: terence <terence@prysmaticlabs.com>

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2024-02-28 10:46:52 +00:00
Preston Van Loon
87b127365f Update bazel-lib to include https://github.com/aspect-build/bazel-lib/pull/768 (#13675) 2024-02-27 21:24:44 +00:00
Manu NALEPA
5215ed03fd Set the log level for running on <network> as INFO. (#13670)
In case of custom ethereum network, keep the log as `WARN`.
2024-02-27 09:48:22 +00:00
james-prysm
0453d18395 small cleanup on functions (#13666) 2024-02-26 22:23:23 +00:00
kasey
0132c1b17d download checkpoint sync origin blobs in init-sync (#13665)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-02-26 22:00:15 +00:00
Radosław Kapka
d9d2ee75de Do not log zero sync committee messages (#13662) 2024-02-26 17:57:26 +00:00
Nishant Das
ddb321e0ce add changes (#13661) 2024-02-26 13:45:17 +00:00
Nishant Das
5735379963 Use a Validator Reader When Computing Unrealized Balances (#13656)
* employ a val reader to prevent constant copies

* clean it up and fix tests

* gaz

* radek's review

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-02-24 12:10:00 +00:00
Nishant Das
1d5a09c05d Optimize Adding Dirty Indices (#13660)
* add it in

* add in test

* potuz's review
2024-02-24 10:08:17 +00:00
kasey
70e1b11aeb blob save fsync feature flag (#13652)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-02-23 23:14:35 +00:00
Dhruv Bodani
e100fb0c08 Add support for sync committee selections (#13633)
* add support for sync committee selections

* go mod tidy

* remove unused fields

* fix build

* fix build

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-02-23 13:53:42 +00:00
Nishant Das
789c3f8078 add changes (#13657) 2024-02-23 10:51:46 +00:00
Radosław Kapka
0b261cba5e Unify log fields (#13654)
* unify fields

* fix tests
2024-02-22 22:40:36 +00:00
Radosław Kapka
7a9608ea20 Normalize backfill logs/errors (#13642)
* Normalize backfill logs

* improve flag desc

* review
2024-02-22 12:32:32 +00:00
Nishant Das
f795e09ecf do not store it (#13637) 2024-02-22 02:08:40 +00:00
Nishant Das
e6a6365bdd Use Max Request Limit in Initial Sync (#13641)
* use max limit

* manu's review
2024-02-22 01:12:43 +00:00
kasey
4c66e4d060 avoid part path collisions with mem addr entropy (#13648)
* avoid part path collisions with mem addr entropy

* Regression test

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-02-21 22:47:33 +00:00
Preston Van Loon
daad29d0de blob save: add better data checking for empty blob issues (#13647) 2024-02-21 21:57:43 +00:00
terence
9f67ad9496 Fix blob batch verifier pointer receiver (#13649) 2024-02-21 20:34:55 +00:00
terence
0ee0653a15 Remove unused bolt buckets (#13638) 2024-02-21 19:08:13 +00:00
Sammy Rosso
4ff91bebf8 Switch gomock library (#13639)
* Update gomock

* Update mockgen

* Gaz

* Go mod

* Cleanup

* Regenerate gomock

* Manually fix import
2024-02-21 18:37:17 +00:00
Radosław Kapka
f85e027141 Normalize filesystem/blob logs (#13644) 2024-02-21 17:34:57 +00:00
Manu NALEPA
e09ae75c9f Normalize checkpoint logs. (#13643) 2024-02-21 15:20:44 +00:00
Manu NALEPA
cb80d5ad32 Slasher: Reduce surrounding/surrounded attestations processing time (#13629)
* Improve package documentation.

* `processAttestations`: Improve logging.

* Add `Benchmark_checkSurroundVotes` benchmark.

* Implement `saveChunksToDisk` as remplacement of `saveUpdatedChunks`.

The idea is to open only on DB transaction for all validator chunk indexes instead of
one DB transaction per validator chunk index.

It saves the overhead due to transaction start/stop of the DB.

Result of `Benchmark_checkSurroundVotes`:
- Before this commit: 133 seconds
- After this commit: 5.05 seconds

* `LoadSlasherChunks` and `SaveSlasherChunks`: Batch.

* `loadChunks` ==> `loadChunksFromDisk`

* `updatedChunkByChunkIndex`: Don't update if `latestEpochWritten == currentEpoch `.

* `updatedChunkByChunkIndex`: Load all needed chunks once.

* `latestEpochWritten` ==> `latestEpochUpdated`.

* `checkSurroundVotes`: Dump to disk at most every `25_600` chunks.

* `SaveAttestationRecordsForValidators`: Batch.

* `batchSize`: Use as package const and add comment.
2024-02-21 15:12:37 +00:00
217 changed files with 2798 additions and 1279 deletions

View File

@@ -113,6 +113,13 @@ http_archive(
url = "https://github.com/GoogleContainerTools/distroless/archive/9dc924b9fe812eec2fa0061824dcad39eb09d0d6.tar.gz", # 2024-01-24
)
http_archive(
name = "aspect_bazel_lib",
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
strip_prefix = "bazel-lib-2.5.0",
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
)
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
aspect_bazel_lib_dependencies()

View File

@@ -108,10 +108,10 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
}
log.
WithField("block_slot", b.Block().Slot()).
WithField("state_slot", s.Slot()).
WithField("state_root", hexutil.Encode(sr[:])).
WithField("block_root", hexutil.Encode(br[:])).
WithField("blockSlot", b.Block().Slot()).
WithField("stateSlot", s.Slot()).
WithField("stateRoot", hexutil.Encode(sr[:])).
WithField("blockRoot", hexutil.Encode(br[:])).
Info("Downloaded checkpoint sync state and block.")
return &OriginData{
st: s,

View File

@@ -310,8 +310,8 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
for _, failure := range errorJson.Failures {
w := request[failure.Index].Message
log.WithFields(log.Fields{
"validator_index": w.ValidatorIndex,
"withdrawal_address": w.ToExecutionAddress,
"validatorIndex": w.ValidatorIndex,
"withdrawalAddress": w.ToExecutionAddress,
}).Error(failure.Message)
}
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)

View File

@@ -57,8 +57,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
b := bytes.NewBuffer(nil)
if r.Body == nil {
log.WithFields(log.Fields{
"body-base64": "(nil value)",
"url": r.URL.String(),
"bodyBase64": "(nil value)",
"url": r.URL.String(),
}).Info("builder http request")
return nil
}
@@ -74,8 +74,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
}
r.Body = io.NopCloser(b)
log.WithFields(log.Fields{
"body-base64": string(body),
"url": r.URL.String(),
"bodyBase64": string(body),
"url": r.URL.String(),
}).Info("builder http request")
return nil

View File

@@ -63,7 +63,7 @@ func TestSaveHead_Different(t *testing.T) {
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -238,7 +238,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))

View File

@@ -1531,6 +1531,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
// 12 and recover. Notice that it takes two epochs to fully recover, and we stay
// optimistic for the whole time.
func TestStore_NoViableHead_Liveness(t *testing.T) {
t.Skip("Requires #13664 to be fixed")
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.SlotsPerEpoch = 6

View File

@@ -32,6 +32,9 @@ import (
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
// This defines how many epochs since finality the run time will begin to expand our respective cache sizes.
var epochsSinceFinalityExpandCache = primitives.Epoch(4)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
@@ -188,6 +191,11 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
// We apply the same heuristic to some of our more important caches.
if err := s.handleCaches(); err != nil {
return err
}
// Reports on block and fork choice metrics.
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
@@ -361,6 +369,27 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
}
func (s *Service) handleCaches() error {
currentEpoch := slots.ToEpoch(s.CurrentSlot())
// Prevent `sinceFinality` going underflow.
var sinceFinality primitives.Epoch
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
if finalized == nil {
return errNilFinalizedInStore
}
if currentEpoch > finalized.Epoch {
sinceFinality = currentEpoch - finalized.Epoch
}
if sinceFinality >= epochsSinceFinalityExpandCache {
helpers.ExpandCommitteeCache()
return nil
}
helpers.CompressCommitteeCache()
return nil
}
// This performs the state transition function and returns the poststate or an
// error if the block fails to verify the consensus rules
func (s *Service) validateStateTransition(ctx context.Context, preState state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {

View File

@@ -308,6 +308,29 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
}
func TestHandleCaches_EnablingLargeSize(t *testing.T) {
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.handleCaches())
assert.LogsContain(t, hook, "Expanding committee cache size")
}
func TestHandleCaches_DisablingLargeSize(t *testing.T) {
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.handleCaches())
s.genesisTime = time.Now()
require.NoError(t, s.handleCaches())
assert.LogsContain(t, hook, "Reducing committee cache size")
}
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
service, tr := minimalTestService(t)
pool := tr.blsPool

View File

@@ -199,6 +199,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Start a blockchain service's main event loop.
func (s *Service) Start() {
saved := s.cfg.FinalizedStateAtStartUp
defer s.removeStartupState()
if saved != nil && !saved.IsNil() {
if err := s.StartFromSavedState(saved); err != nil {
@@ -418,7 +419,7 @@ func (s *Service) startFromExecutionChain() error {
log.Error("event data is not type *statefeed.ChainStartedData")
return
}
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
s.onExecutionChainStart(s.ctx, data.StartTime)
return
}
@@ -550,6 +551,10 @@ func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
return s.cfg.BeaconDB.HasBlock(ctx, root)
}
func (s *Service) removeStartupState() {
s.cfg.FinalizedStateAtStartUp = nil
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
currentTime := prysmTime.Now()
if currentTime.After(genesisTime) {

View File

@@ -17,12 +17,16 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
mathutil "github.com/prysmaticlabs/prysm/v5/math"
log "github.com/sirupsen/logrus"
)
const (
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
maxCommitteesCacheSize = int(32)
maxCommitteesCacheSize = int(4)
// expandedCommitteeCacheSize defines the expanded size of the committee cache in the event we
// do not have finality to deal with long forks better.
expandedCommitteeCacheSize = int(32)
)
var (
@@ -43,6 +47,7 @@ type CommitteeCache struct {
CommitteeCache *lru.Cache
lock sync.RWMutex
inProgress map[string]bool
size int
}
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
@@ -67,6 +72,33 @@ func (c *CommitteeCache) Clear() {
defer c.lock.Unlock()
c.CommitteeCache = lruwrpr.New(maxCommitteesCacheSize)
c.inProgress = make(map[string]bool)
c.size = maxCommitteesCacheSize
}
// ExpandCommitteeCache expands the size of the committee cache.
func (c *CommitteeCache) ExpandCommitteeCache() {
c.lock.Lock()
defer c.lock.Unlock()
if c.size == expandedCommitteeCacheSize {
return
}
c.CommitteeCache.Resize(expandedCommitteeCacheSize)
c.size = expandedCommitteeCacheSize
log.Warnf("Expanding committee cache size from %d to %d", maxCommitteesCacheSize, expandedCommitteeCacheSize)
}
// CompressCommitteeCache compresses the size of the committee cache.
func (c *CommitteeCache) CompressCommitteeCache() {
c.lock.Lock()
defer c.lock.Unlock()
if c.size == maxCommitteesCacheSize {
return
}
c.CommitteeCache.Resize(maxCommitteesCacheSize)
c.size = maxCommitteesCacheSize
log.Warnf("Reducing committee cache size from %d to %d", expandedCommitteeCacheSize, maxCommitteesCacheSize)
}
// Committee fetches the shuffled indices by slot and committee index. Every list of indices

View File

@@ -74,3 +74,11 @@ func (c *FakeCommitteeCache) MarkNotInProgress(seed [32]byte) error {
func (c *FakeCommitteeCache) Clear() {
return
}
func (c *FakeCommitteeCache) ExpandCommitteeCache() {
return
}
func (c *FakeCommitteeCache) CompressCommitteeCache() {
return
}

View File

@@ -74,10 +74,10 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
"index": index,
"deposit root": hex.EncodeToString(depositRoot[:]),
"block": blockNum,
"deposit": d,
"index": index,
"depositRoot": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache")
}

View File

@@ -33,10 +33,10 @@ func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum ui
}
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
"index": index,
"deposit root": hex.EncodeToString(depositRoot[:]),
"block": blockNum,
"deposit": d,
"index": index,
"depositRoot": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache")
}

View File

@@ -99,7 +99,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
// from the above method by not using fork data from the state and instead retrieving it
// via the respective epoch.
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock) error {
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
currentEpoch := slots.ToEpoch(blk.Block().Slot())
fork, err := forks.Fork(currentEpoch)
if err != nil {
@@ -115,7 +115,9 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
}
proposerPubKey := proposer.PublicKey
sig := blk.Signature()
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, blk.Block().HashTreeRoot)
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, func() ([32]byte, error) {
return blkRoot, nil
})
}
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.

View File

@@ -79,11 +79,13 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
}
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorsRoot())
assert.NoError(t, err)
blkRoot, err := altairBlk.Block.HashTreeRoot()
assert.NoError(t, err)
rt, err := signing.ComputeSigningRoot(altairBlk.Block, domain)
assert.NoError(t, err)
sig := keys[0].Sign(rt[:]).Marshal()
altairBlk.Signature = sig
wsb, err := consensusblocks.NewSignedBeaconBlock(altairBlk)
require.NoError(t, err)
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb))
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb, blkRoot))
}

View File

@@ -391,6 +391,16 @@ func UpdateCachedCheckpointToStateRoot(state state.ReadOnlyBeaconState, cp *fork
return nil
}
// ExpandCommitteeCache resizes the cache to a higher limit.
func ExpandCommitteeCache() {
committeeCache.ExpandCommitteeCache()
}
// CompressCommitteeCache resizes the cache to a lower limit.
func CompressCommitteeCache() {
committeeCache.CompressCommitteeCache()
}
// ClearCache clears the beacon committee cache and sync committee cache.
func ClearCache() {
committeeCache.Clear()

View File

@@ -96,6 +96,7 @@ go_test(
"//testing/benchmark:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",

View File

@@ -21,6 +21,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
@@ -48,7 +49,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
epoch := time.CurrentEpoch(beaconState)
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
require.NoError(t, err)
@@ -135,7 +136,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
epoch := time.CurrentEpoch(beaconState)
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
require.NoError(t, err)

View File

@@ -23,6 +23,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
@@ -50,7 +51,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
epoch := time.CurrentEpoch(beaconState)
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
require.NoError(t, err)
@@ -124,7 +125,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
BlockHash: make([]byte, 32),
}
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(params.BeaconConfig().SlotsPerEpoch)))
e := beaconState.Eth1Data()
e.DepositCount = 100
require.NoError(t, beaconState.SetEth1Data(e))
@@ -137,7 +138,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
epoch := time.CurrentEpoch(beaconState)
randaoReveal, err := util.RandaoReveal(beaconState, epoch, privKeys)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
require.NoError(t, err)

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"blob.go",
"ephemeral.go",
"log.go",
"metrics.go",
"pruner.go",
],

View File

@@ -16,12 +16,15 @@ import (
"github.com/prysmaticlabs/prysm/v5/io/file"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
var (
errIndexOutOfBounds = errors.New("blob index in file name >= MaxBlobsPerBlock")
errIndexOutOfBounds = errors.New("blob index in file name >= MaxBlobsPerBlock")
errEmptyBlobWritten = errors.New("zero bytes written to disk when saving blob sidecar")
errSidecarEmptySSZData = errors.New("sidecar marshalled to an empty ssz byte slice")
errNoBasePath = errors.New("BlobStorage base path not specified in init")
)
const (
@@ -34,14 +37,26 @@ const (
// BlobStorageOption is a functional option for configuring a BlobStorage.
type BlobStorageOption func(*BlobStorage) error
// WithBasePath is a required option that sets the base path of blob storage.
func WithBasePath(base string) BlobStorageOption {
return func(b *BlobStorage) error {
b.base = base
return nil
}
}
// WithBlobRetentionEpochs is an option that changes the number of epochs blobs will be persisted.
func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption {
return func(b *BlobStorage) error {
pruner, err := newBlobPruner(b.fs, e)
if err != nil {
return err
}
b.pruner = pruner
b.retentionEpochs = e
return nil
}
}
// WithSaveFsync is an option that causes Save to call fsync before renaming part files for improved durability.
func WithSaveFsync(fsync bool) BlobStorageOption {
return func(b *BlobStorage) error {
b.fsync = fsync
return nil
}
}
@@ -49,30 +64,36 @@ func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption {
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
// initialized once per beacon node.
func NewBlobStorage(base string, opts ...BlobStorageOption) (*BlobStorage, error) {
base = path.Clean(base)
if err := file.MkdirAll(base); err != nil {
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
}
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
b := &BlobStorage{
fs: fs,
}
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
b := &BlobStorage{}
for _, o := range opts {
if err := o(b); err != nil {
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
return nil, errors.Wrap(err, "failed to create blob storage")
}
}
if b.pruner == nil {
log.Warn("Initializing blob filesystem storage with pruning disabled")
if b.base == "" {
return nil, errNoBasePath
}
b.base = path.Clean(b.base)
if err := file.MkdirAll(b.base); err != nil {
return nil, errors.Wrapf(err, "failed to create blob storage at %s", b.base)
}
b.fs = afero.NewBasePathFs(afero.NewOsFs(), b.base)
pruner, err := newBlobPruner(b.fs, b.retentionEpochs)
if err != nil {
return nil, err
}
b.pruner = pruner
return b, nil
}
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
type BlobStorage struct {
fs afero.Fs
pruner *blobPruner
base string
retentionEpochs primitives.Epoch
fsync bool
fs afero.Fs
pruner *blobPruner
}
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
@@ -83,7 +104,7 @@ func (bs *BlobStorage) WarmCache() {
}
go func() {
if err := bs.pruner.prune(0); err != nil {
log.WithError(err).Error("Error encountered while warming up blob pruner cache.")
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
}
}()
}
@@ -98,7 +119,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
return err
}
if exists {
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("ignoring a duplicate blob sidecar Save attempt")
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("Ignoring a duplicate blob sidecar save attempt")
return nil
}
if bs.pruner != nil {
@@ -111,11 +132,14 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
sidecarData, err := sidecar.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "failed to serialize sidecar data")
} else if len(sidecarData) == 0 {
return errSidecarEmptySSZData
}
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
return err
}
partPath := fname.partPath()
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
partialMoved := false
// Ensure the partial file is deleted.
@@ -126,9 +150,9 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
// It's expected to error if the save is successful.
err = bs.fs.Remove(partPath)
if err == nil {
log.WithFields(log.Fields{
log.WithFields(logrus.Fields{
"partPath": partPath,
}).Debugf("removed partial file")
}).Debugf("Removed partial file")
}
}()
@@ -138,7 +162,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
return errors.Wrap(err, "failed to create partial file")
}
_, err = partialFile.Write(sidecarData)
n, err := partialFile.Write(sidecarData)
if err != nil {
closeErr := partialFile.Close()
if closeErr != nil {
@@ -146,11 +170,24 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
}
return errors.Wrap(err, "failed to write to partial file")
}
err = partialFile.Close()
if err != nil {
if bs.fsync {
if err := partialFile.Sync(); err != nil {
return err
}
}
if err := partialFile.Close(); err != nil {
return err
}
if n != len(sidecarData) {
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
}
if n == 0 {
return errEmptyBlobWritten
}
// Atomically rename the partial file to its final name.
err = bs.fs.Rename(partPath, sszPath)
if err != nil {
@@ -257,16 +294,12 @@ func (p blobNamer) dir() string {
return rootString(p.root)
}
func (p blobNamer) fname(ext string) string {
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, ext))
}
func (p blobNamer) partPath() string {
return p.fname(partExt)
func (p blobNamer) partPath(entropy string) string {
return path.Join(p.dir(), fmt.Sprintf("%s-%d.%s", entropy, p.index, partExt))
}
func (p blobNamer) path() string {
return p.fname(sszExt)
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, sszExt))
}
func rootString(root [32]byte) string {

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"os"
"path"
"sync"
"testing"
"time"
@@ -101,6 +102,30 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
_, err = b.Get(blob.BlockRoot(), blob.Index)
require.ErrorIs(t, err, os.ErrNotExist)
})
t.Run("race conditions", func(t *testing.T) {
// There was a bug where saving the same blob in multiple go routines would cause a partial blob
// to be empty. This test ensures that several routines can safely save the same blob at the
// same time. This isn't ideal behavior from the caller, but should be handled safely anyway.
// See https://github.com/prysmaticlabs/prysm/pull/13648
b, err := NewBlobStorage(WithBasePath(t.TempDir()))
require.NoError(t, err)
blob := testSidecars[0]
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
require.NoError(t, b.Save(blob))
}()
}
wg.Wait()
res, err := b.Get(blob.BlockRoot(), blob.Index)
require.NoError(t, err)
require.DeepSSZEqual(t, blob, res)
})
}
// pollUntil polls a condition function until it returns true or a timeout is reached.
@@ -243,6 +268,8 @@ func BenchmarkPruning(b *testing.B) {
}
func TestNewBlobStorage(t *testing.T) {
_, err := NewBlobStorage(path.Join(t.TempDir(), "good"))
_, err := NewBlobStorage()
require.ErrorIs(t, err, errNoBasePath)
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
require.NoError(t, err)
}

View File

@@ -0,0 +1,5 @@
package filesystem
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "filesystem")

View File

@@ -16,7 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
@@ -87,7 +87,7 @@ func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
}()
} else {
defer func() {
log.WithFields(log.Fields{
log.WithFields(logrus.Fields{
"upToEpoch": slots.ToEpoch(pruneBefore),
"duration": time.Since(start).String(),
"filesRemoved": totalPruned,

View File

@@ -100,37 +100,24 @@ func StoreDatafilePath(dirPath string) string {
}
var Buckets = [][]byte{
attestationsBucket,
blocksBucket,
stateBucket,
proposerSlashingsBucket,
attesterSlashingsBucket,
voluntaryExitsBucket,
chainMetadataBucket,
checkpointBucket,
powchainBucket,
stateSummaryBucket,
stateValidatorsBucket,
// Indices buckets.
attestationHeadBlockRootBucket,
attestationSourceRootIndicesBucket,
attestationSourceEpochIndicesBucket,
attestationTargetRootIndicesBucket,
attestationTargetEpochIndicesBucket,
blockSlotIndicesBucket,
stateSlotIndicesBucket,
blockParentRootIndicesBucket,
finalizedBlockRootsIndexBucket,
blockRootValidatorHashesBucket,
// State management service bucket.
newStateServiceCompatibleBucket,
// Migrations
migrationsBucket,
feeRecipientBucket,
registrationBucket,
blobsBucket,
}
// KVStoreOption is a functional option that modifies a kv.Store.

View File

@@ -7,20 +7,15 @@ package kv
// it easy to scan for keys that have a certain shard number as a prefix and return those
// corresponding attestations.
var (
attestationsBucket = []byte("attestations")
blobsBucket = []byte("blobs")
blocksBucket = []byte("blocks")
stateBucket = []byte("state")
stateSummaryBucket = []byte("state-summary")
proposerSlashingsBucket = []byte("proposer-slashings")
attesterSlashingsBucket = []byte("attester-slashings")
voluntaryExitsBucket = []byte("voluntary-exits")
chainMetadataBucket = []byte("chain-metadata")
checkpointBucket = []byte("check-point")
powchainBucket = []byte("powchain")
stateValidatorsBucket = []byte("state-validators")
feeRecipientBucket = []byte("fee-recipient")
registrationBucket = []byte("registration")
blocksBucket = []byte("blocks")
stateBucket = []byte("state")
stateSummaryBucket = []byte("state-summary")
chainMetadataBucket = []byte("chain-metadata")
checkpointBucket = []byte("check-point")
powchainBucket = []byte("powchain")
stateValidatorsBucket = []byte("state-validators")
feeRecipientBucket = []byte("fee-recipient")
registrationBucket = []byte("registration")
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
slotsHasObjectBucket = []byte("slots-has-objects")
@@ -28,16 +23,11 @@ var (
archivedRootBucket = []byte("archived-index-root")
// Key indices buckets.
blockParentRootIndicesBucket = []byte("block-parent-root-indices")
blockSlotIndicesBucket = []byte("block-slot-indices")
stateSlotIndicesBucket = []byte("state-slot-indices")
attestationHeadBlockRootBucket = []byte("attestation-head-block-root-indices")
attestationSourceRootIndicesBucket = []byte("attestation-source-root-indices")
attestationSourceEpochIndicesBucket = []byte("attestation-source-epoch-indices")
attestationTargetRootIndicesBucket = []byte("attestation-target-root-indices")
attestationTargetEpochIndicesBucket = []byte("attestation-target-epoch-indices")
finalizedBlockRootsIndexBucket = []byte("finalized-block-roots-index")
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
blockParentRootIndicesBucket = []byte("block-parent-root-indices")
blockSlotIndicesBucket = []byte("block-slot-indices")
stateSlotIndicesBucket = []byte("state-slot-indices")
finalizedBlockRootsIndexBucket = []byte("finalized-block-roots-index")
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
// Specific item keys.
headBlockRootKey = []byte("head-root")
@@ -69,9 +59,6 @@ var (
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
savedStateSlotsKey = []byte("saved-state-slots")
// New state management service compatibility bucket.
newStateServiceCompatibleBucket = []byte("new-state-compatible")
// Migrations
migrationsBucket = []byte("migrations")
)

View File

@@ -48,7 +48,6 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",

View File

@@ -23,6 +23,10 @@ import (
const (
attestationRecordKeySize = 32 // Bytes.
rootSize = 32 // Bytes.
// For database performance reasons, database read/write operations
// are chunked into batches of maximum `batchSize` elements.
batchSize = 10_000
)
// LastEpochWrittenForValidators given a list of validator indices returns the latest
@@ -259,14 +263,23 @@ func (s *Store) AttestationRecordForValidator(
// then only the first one is (arbitrarily) saved in the `attestationDataRootsBucket` bucket.
func (s *Store) SaveAttestationRecordsForValidators(
ctx context.Context,
attestations []*slashertypes.IndexedAttestationWrapper,
attWrappers []*slashertypes.IndexedAttestationWrapper,
) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveAttestationRecordsForValidators")
defer span.End()
encodedTargetEpoch := make([][]byte, len(attestations))
encodedRecords := make([][]byte, len(attestations))
for i, attestation := range attestations {
attWrappersCount := len(attWrappers)
// If no attestations are provided, skip.
if attWrappersCount == 0 {
return nil
}
// Build encoded target epochs and encoded records
encodedTargetEpoch := make([][]byte, attWrappersCount)
encodedRecords := make([][]byte, attWrappersCount)
for i, attestation := range attWrappers {
encEpoch := encodeTargetEpoch(attestation.IndexedAttestation.Data.Target.Epoch)
value, err := encodeAttestationRecord(attestation)
@@ -278,60 +291,115 @@ func (s *Store) SaveAttestationRecordsForValidators(
encodedRecords[i] = value
}
return s.db.Update(func(tx *bolt.Tx) error {
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
dataRootsBkt := tx.Bucket(attestationDataRootsBucket)
// Save attestation records in the database by batch.
for stop := attWrappersCount; stop >= 0; stop -= batchSize {
start := max(0, stop-batchSize)
for i := len(attestations) - 1; i >= 0; i-- {
attestation := attestations[i]
attWrappersBatch := attWrappers[start:stop]
encodedTargetEpochBatch := encodedTargetEpoch[start:stop]
encodedRecordsBatch := encodedRecords[start:stop]
if err := attRecordsBkt.Put(attestation.DataRoot[:], encodedRecords[i]); err != nil {
return err
}
for _, valIdx := range attestation.IndexedAttestation.AttestingIndices {
encIdx := encodeValidatorIndex(primitives.ValidatorIndex(valIdx))
key := append(encodedTargetEpoch[i], encIdx...)
if err := dataRootsBkt.Put(key, attestation.DataRoot[:]); err != nil {
return err
}
}
// Perform basic check.
if len(encodedTargetEpochBatch) != len(encodedRecordsBatch) {
return fmt.Errorf(
"cannot save attestation records, got %d target epochs and %d records",
len(encodedTargetEpochBatch), len(encodedRecordsBatch),
)
}
return nil
})
currentBatchSize := len(encodedTargetEpochBatch)
// Save attestation records in the database.
if err := s.db.Update(func(tx *bolt.Tx) error {
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
dataRootsBkt := tx.Bucket(attestationDataRootsBucket)
for i := currentBatchSize - 1; i >= 0; i-- {
attWrapper := attWrappersBatch[i]
dataRoot := attWrapper.DataRoot
encodedTargetEpoch := encodedTargetEpochBatch[i]
encodedRecord := encodedRecordsBatch[i]
if err := attRecordsBkt.Put(dataRoot[:], encodedRecord); err != nil {
return err
}
for _, validatorIndex := range attWrapper.IndexedAttestation.AttestingIndices {
encodedIndex := encodeValidatorIndex(primitives.ValidatorIndex(validatorIndex))
key := append(encodedTargetEpoch, encodedIndex...)
if err := dataRootsBkt.Put(key, dataRoot[:]); err != nil {
return err
}
}
}
return nil
}); err != nil {
return errors.Wrap(err, "failed to save attestation records")
}
}
return nil
}
// LoadSlasherChunks given a chunk kind and a disk keys, retrieves chunks for a validator
// min or max span used by slasher from our database.
func (s *Store) LoadSlasherChunks(
ctx context.Context, kind slashertypes.ChunkKind, diskKeys [][]byte,
ctx context.Context, kind slashertypes.ChunkKind, chunkKeys [][]byte,
) ([][]uint16, []bool, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.LoadSlasherChunk")
defer span.End()
chunks := make([][]uint16, 0)
var exists []bool
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(slasherChunksBucket)
for _, diskKey := range diskKeys {
key := append(ssz.MarshalUint8(make([]byte, 0), uint8(kind)), diskKey...)
chunkBytes := bkt.Get(key)
if chunkBytes == nil {
chunks = append(chunks, []uint16{})
exists = append(exists, false)
continue
keysCount := len(chunkKeys)
chunks := make([][]uint16, 0, keysCount)
exists := make([]bool, 0, keysCount)
encodedKeys := make([][]byte, 0, keysCount)
// Encode kind.
encodedKind := ssz.MarshalUint8(make([]byte, 0), uint8(kind))
// Encode keys.
for _, chunkKey := range chunkKeys {
encodedKey := append(encodedKind, chunkKey...)
encodedKeys = append(encodedKeys, encodedKey)
}
// Read chunks from the database by batch.
for start := 0; start < keysCount; start += batchSize {
stop := min(start+batchSize, len(encodedKeys))
encodedKeysBatch := encodedKeys[start:stop]
if err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(slasherChunksBucket)
for _, encodedKey := range encodedKeysBatch {
chunkBytes := bkt.Get(encodedKey)
if chunkBytes == nil {
chunks = append(chunks, []uint16{})
exists = append(exists, false)
continue
}
chunk, err := decodeSlasherChunk(chunkBytes)
if err != nil {
return err
}
chunks = append(chunks, chunk)
exists = append(exists, true)
}
chunk, err := decodeSlasherChunk(chunkBytes)
if err != nil {
return err
}
chunks = append(chunks, chunk)
exists = append(exists, true)
return nil
}); err != nil {
return nil, nil, err
}
return nil
})
return chunks, exists, err
}
return chunks, exists, nil
}
// SaveSlasherChunks given a chunk kind, list of disk keys, and list of chunks,
@@ -341,25 +409,60 @@ func (s *Store) SaveSlasherChunks(
) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveSlasherChunks")
defer span.End()
encodedKeys := make([][]byte, len(chunkKeys))
encodedChunks := make([][]byte, len(chunkKeys))
for i := 0; i < len(chunkKeys); i++ {
encodedKeys[i] = append(ssz.MarshalUint8(make([]byte, 0), uint8(kind)), chunkKeys[i]...)
encodedChunk, err := encodeSlasherChunk(chunks[i])
// Ensure we have the same number of keys and chunks.
if len(chunkKeys) != len(chunks) {
return fmt.Errorf(
"cannot save slasher chunks, got %d keys and %d chunks",
len(chunkKeys), len(chunks),
)
}
chunksCount := len(chunks)
// Encode kind.
encodedKind := ssz.MarshalUint8(make([]byte, 0), uint8(kind))
// Encode keys and chunks.
encodedKeys := make([][]byte, chunksCount)
encodedChunks := make([][]byte, chunksCount)
for i := 0; i < chunksCount; i++ {
chunkKey, chunk := chunkKeys[i], chunks[i]
encodedKey := append(encodedKind, chunkKey...)
encodedChunk, err := encodeSlasherChunk(chunk)
if err != nil {
return err
return errors.Wrapf(err, "failed to encode slasher chunk for key %v", chunkKey)
}
encodedKeys[i] = encodedKey
encodedChunks[i] = encodedChunk
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(slasherChunksBucket)
for i := 0; i < len(chunkKeys); i++ {
if err := bkt.Put(encodedKeys[i], encodedChunks[i]); err != nil {
return err
// Save chunks in the database by batch.
for start := 0; start < chunksCount; start += batchSize {
stop := min(start+batchSize, len(encodedKeys))
encodedKeysBatch := encodedKeys[start:stop]
encodedChunksBatch := encodedChunks[start:stop]
batchSize := len(encodedKeysBatch)
if err := s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(slasherChunksBucket)
for i := 0; i < batchSize; i++ {
if err := bkt.Put(encodedKeysBatch[i], encodedChunksBatch[i]); err != nil {
return err
}
}
return nil
}); err != nil {
return errors.Wrap(err, "failed to save slasher chunks")
}
return nil
})
}
return nil
}
// CheckDoubleBlockProposals takes in a list of proposals and for each,

View File

@@ -14,33 +14,56 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) {
ctx := context.Background()
beaconDB := setupDB(t)
valIdx := primitives.ValidatorIndex(1)
target := primitives.Epoch(5)
source := primitives.Epoch(4)
attRecord, err := beaconDB.AttestationRecordForValidator(ctx, valIdx, target)
require.NoError(t, err)
require.Equal(t, true, attRecord == nil)
const attestationsCount = 11_000
sr := [32]byte{1}
err = beaconDB.SaveAttestationRecordsForValidators(
ctx,
[]*slashertypes.IndexedAttestationWrapper{
createAttestationWrapper(source, target, []uint64{uint64(valIdx)}, sr[:]),
},
)
// Create context.
ctx := context.Background()
// Create database.
beaconDB := setupDB(t)
// Define the validator index.
validatorIndex := primitives.ValidatorIndex(1)
// Defines attestations to save and retrieve.
attWrappers := make([]*slashertypes.IndexedAttestationWrapper, attestationsCount)
for i := 0; i < attestationsCount; i++ {
var dataRoot [32]byte
binary.LittleEndian.PutUint64(dataRoot[:], uint64(i))
attWrapper := createAttestationWrapper(
primitives.Epoch(i),
primitives.Epoch(i+1),
[]uint64{uint64(validatorIndex)},
dataRoot[:],
)
attWrappers[i] = attWrapper
}
// Check on a sample of validators that no attestation records are available.
for i := 0; i < attestationsCount; i += 100 {
attRecord, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
require.NoError(t, err)
require.Equal(t, true, attRecord == nil)
}
// Save the attestation records to the database.
err := beaconDB.SaveAttestationRecordsForValidators(ctx, attWrappers)
require.NoError(t, err)
attRecord, err = beaconDB.AttestationRecordForValidator(ctx, valIdx, target)
require.NoError(t, err)
assert.DeepEqual(t, target, attRecord.IndexedAttestation.Data.Target.Epoch)
assert.DeepEqual(t, source, attRecord.IndexedAttestation.Data.Source.Epoch)
assert.DeepEqual(t, sr, attRecord.DataRoot)
// Check on a sample of validators that attestation records are available.
for i := 0; i < attestationsCount; i += 100 {
expected := attWrappers[i]
actual, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
require.NoError(t, err)
require.DeepEqual(t, expected.IndexedAttestation.Data.Source.Epoch, actual.IndexedAttestation.Data.Source.Epoch)
}
}
func TestStore_LastEpochWrittenForValidators(t *testing.T) {
@@ -138,61 +161,113 @@ func TestStore_CheckAttesterDoubleVotes(t *testing.T) {
}
func TestStore_SlasherChunk_SaveRetrieve(t *testing.T) {
// Define test parameters.
const (
elemsPerChunk = 16
totalChunks = 11_000
)
// Create context.
ctx := context.Background()
// Create database.
beaconDB := setupDB(t)
elemsPerChunk := 16
totalChunks := 64
chunkKeys := make([][]byte, totalChunks)
chunks := make([][]uint16, totalChunks)
// Create min chunk keys and chunks.
minChunkKeys := make([][]byte, totalChunks)
minChunks := make([][]uint16, totalChunks)
for i := 0; i < totalChunks; i++ {
// Create chunk key.
chunkKey := ssz.MarshalUint64(make([]byte, 0), uint64(i))
minChunkKeys[i] = chunkKey
// Create chunk.
chunk := make([]uint16, elemsPerChunk)
for j := 0; j < len(chunk); j++ {
chunk[j] = uint16(0)
chunk[j] = uint16(i + j)
}
chunks[i] = chunk
chunkKeys[i] = ssz.MarshalUint64(make([]byte, 0), uint64(i))
minChunks[i] = chunk
}
// We save chunks for min spans.
err := beaconDB.SaveSlasherChunks(ctx, slashertypes.MinSpan, chunkKeys, chunks)
// Create max chunk keys and chunks.
maxChunkKeys := make([][]byte, totalChunks)
maxChunks := make([][]uint16, totalChunks)
for i := 0; i < totalChunks; i++ {
// Create chunk key.
chunkKey := ssz.MarshalUint64(make([]byte, 0), uint64(i+1))
maxChunkKeys[i] = chunkKey
// Create chunk.
chunk := make([]uint16, elemsPerChunk)
for j := 0; j < len(chunk); j++ {
chunk[j] = uint16(i + j + 1)
}
maxChunks[i] = chunk
}
// Save chunks for min spans.
err := beaconDB.SaveSlasherChunks(ctx, slashertypes.MinSpan, minChunkKeys, minChunks)
require.NoError(t, err)
// We expect no chunks to be stored for max spans.
// Expect no chunks to be stored for max spans.
_, chunksExist, err := beaconDB.LoadSlasherChunks(
ctx, slashertypes.MaxSpan, chunkKeys,
ctx, slashertypes.MaxSpan, minChunkKeys,
)
require.NoError(t, err)
require.Equal(t, len(chunks), len(chunksExist))
require.Equal(t, len(minChunks), len(chunksExist))
for _, exists := range chunksExist {
require.Equal(t, false, exists)
}
// We check we saved the right chunks.
// Check the right chunks are saved.
retrievedChunks, chunksExist, err := beaconDB.LoadSlasherChunks(
ctx, slashertypes.MinSpan, chunkKeys,
ctx, slashertypes.MinSpan, minChunkKeys,
)
require.NoError(t, err)
require.Equal(t, len(chunks), len(retrievedChunks))
require.Equal(t, len(chunks), len(chunksExist))
require.Equal(t, len(minChunks), len(retrievedChunks))
require.Equal(t, len(minChunks), len(chunksExist))
for i, exists := range chunksExist {
require.Equal(t, true, exists)
require.DeepEqual(t, chunks[i], retrievedChunks[i])
require.DeepEqual(t, minChunks[i], retrievedChunks[i])
}
// We save chunks for max spans.
err = beaconDB.SaveSlasherChunks(ctx, slashertypes.MaxSpan, chunkKeys, chunks)
// Save chunks for max spans.
err = beaconDB.SaveSlasherChunks(ctx, slashertypes.MaxSpan, maxChunkKeys, maxChunks)
require.NoError(t, err)
// We check we saved the right chunks.
// Check right chunks are saved.
retrievedChunks, chunksExist, err = beaconDB.LoadSlasherChunks(
ctx, slashertypes.MaxSpan, chunkKeys,
ctx, slashertypes.MaxSpan, maxChunkKeys,
)
require.NoError(t, err)
require.Equal(t, len(chunks), len(retrievedChunks))
require.Equal(t, len(chunks), len(chunksExist))
require.Equal(t, len(maxChunks), len(retrievedChunks))
require.Equal(t, len(maxChunks), len(chunksExist))
for i, exists := range chunksExist {
require.Equal(t, true, exists)
require.DeepEqual(t, chunks[i], retrievedChunks[i])
require.DeepEqual(t, maxChunks[i], retrievedChunks[i])
}
// Check the right chunks are still saved for min span.
retrievedChunks, chunksExist, err = beaconDB.LoadSlasherChunks(
ctx, slashertypes.MinSpan, minChunkKeys,
)
require.NoError(t, err)
require.Equal(t, len(minChunks), len(retrievedChunks))
require.Equal(t, len(minChunks), len(chunksExist))
for i, exists := range chunksExist {
require.Equal(t, true, exists)
require.DeepEqual(t, minChunks[i], retrievedChunks[i])
}
}

View File

@@ -269,7 +269,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
}
log.WithFields(logrus.Fields{
"ChainStartTime": chainStartTime,
"chainStartTime": chainStartTime,
}).Info("Minimum number of validators reached for beacon-chain to start")
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.ChainStarted,

View File

@@ -583,6 +583,9 @@ func (s *Service) run(done <-chan struct{}) {
s.runError = nil
s.initPOWService()
// Do not keep storing the finalized state as it is
// no longer of use.
s.removeStartupState()
chainstartTicker := time.NewTicker(logPeriod)
defer chainstartTicker.Stop()
@@ -636,7 +639,7 @@ func (s *Service) logTillChainStart(ctx context.Context) {
}
fields := logrus.Fields{
"Additional validators needed": valNeeded,
"additionalValidatorsNeeded": valNeeded,
}
if secondsLeft > 0 {
fields["Generating genesis state in"] = time.Duration(secondsLeft) * time.Second
@@ -910,3 +913,7 @@ func (s *Service) migrateOldDepositTree(eth1DataInDB *ethpb.ETH1ChainData) error
s.depositTrie = newDepositTrie
return nil
}
func (s *Service) removeStartupState() {
s.cfg.finalizedStateAtStartup = nil
}

View File

@@ -11,7 +11,6 @@ import (
// MuxConfig contains configuration that should be used when registering the beacon node in the gateway.
type MuxConfig struct {
Handler gateway.MuxHandler
EthPbMux *gateway.PbMux
V1AlphaPbMux *gateway.PbMux
}

View File

@@ -44,11 +44,11 @@ func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.A
// logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target)
func logMessageTimelyFlagsForIndex(idx primitives.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
return logrus.Fields{
"ValidatorIndex": idx,
"Slot": data.Slot,
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
"validatorIndex": idx,
"slot": data.Slot,
"source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
"target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
"head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
}
}
@@ -146,12 +146,12 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
aggregatedPerf.totalCorrectTarget++
}
}
logFields["CorrectHead"] = latestPerf.timelyHead
logFields["CorrectSource"] = latestPerf.timelySource
logFields["CorrectTarget"] = latestPerf.timelyTarget
logFields["InclusionSlot"] = latestPerf.inclusionSlot
logFields["NewBalance"] = balance
logFields["BalanceChange"] = balanceChg
logFields["correctHead"] = latestPerf.timelyHead
logFields["correctSource"] = latestPerf.timelySource
logFields["correctTarget"] = latestPerf.timelyTarget
logFields["inclusionSlot"] = latestPerf.inclusionSlot
logFields["newBalance"] = balance
logFields["balanceChange"] = balanceChg
s.latestPerformance[primitives.ValidatorIndex(idx)] = latestPerf
s.aggregatedPerformance[primitives.ValidatorIndex(idx)] = aggregatedPerf
@@ -167,7 +167,7 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping unaggregated attestation due to state not found in cache")
return
}
@@ -190,13 +190,13 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
defer s.Unlock()
if s.trackedIndex(att.AggregatorIndex) {
log.WithFields(logrus.Fields{
"AggregatorIndex": att.AggregatorIndex,
"Slot": att.Aggregate.Data.Slot,
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
"aggregatorIndex": att.AggregatorIndex,
"slot": att.Aggregate.Data.Slot,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.BeaconBlockRoot)),
"SourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
"sourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Source.Root)),
"TargetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Target.Root)),
}).Info("Processed attestation aggregation")
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
@@ -209,7 +209,7 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping aggregated attestation due to state not found in cache")
return
}

View File

@@ -55,8 +55,8 @@ func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
s.processIncludedAttestation(context.Background(), state, att)
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}
@@ -124,8 +124,8 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
}
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processUnaggregatedAttestation(context.Background(), att)
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
wanted1 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}
@@ -162,7 +162,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
},
}
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x000000000000 prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
require.LogsContain(t, hook, "Skipping aggregated attestation due to state not found in cache")
logrus.SetLevel(logrus.InfoLevel)
}
@@ -200,9 +200,9 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x68656c6c6f2d prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
require.LogsContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2")
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12")
}
func TestProcessAttestations(t *testing.T) {
@@ -240,8 +240,8 @@ func TestProcessAttestations(t *testing.T) {
wrappedBlock, err := blocks.NewBeaconBlock(block)
require.NoError(t, err)
s.processAttestations(ctx, state, wrappedBlock)
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)

View File

@@ -39,7 +39,7 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
}
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping block collection due to state not found in cache")
return
}
@@ -90,13 +90,13 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b
parentRoot := blk.ParentRoot()
log.WithFields(logrus.Fields{
"ProposerIndex": blk.ProposerIndex(),
"Slot": blk.Slot(),
"Version": blk.Version(),
"ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
"BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
"NewBalance": balance,
"BalanceChange": balanceChg,
"proposerIndex": blk.ProposerIndex(),
"slot": blk.Slot(),
"version": blk.Version(),
"parentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
"newBalance": balance,
"balanceChange": balanceChg,
}).Info("Proposed beacon block was included")
}
}
@@ -109,11 +109,11 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
idx := slashing.Header_1.Header.ProposerIndex
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"ProposerIndex": idx,
"Slot": blk.Slot(),
"SlashingSlot": slashing.Header_1.Header.Slot,
"BodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
"BodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
"proposerIndex": idx,
"slot": blk.Slot(),
"slashingSlot": slashing.Header_1.Header.Slot,
"bodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
"bodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
}).Info("Proposer slashing was included")
}
}
@@ -122,16 +122,16 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
if s.trackedIndex(primitives.ValidatorIndex(idx)) {
log.WithFields(logrus.Fields{
"AttesterIndex": idx,
"BlockInclusionSlot": blk.Slot(),
"AttestationSlot1": slashing.Attestation_1.Data.Slot,
"BeaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
"AttestationSlot2": slashing.Attestation_2.Data.Slot,
"BeaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
"attesterIndex": idx,
"blockInclusionSlot": blk.Slot(),
"attestationSlot1": slashing.Attestation_1.Data.Slot,
"beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
"sourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
"targetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
"attestationSlot2": slashing.Attestation_2.Data.Slot,
"beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
"sourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
"targetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
}).Info("Attester slashing was included")
}
}
@@ -159,19 +159,19 @@ func (s *Service) logAggregatedPerformance() {
percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount)
log.WithFields(logrus.Fields{
"ValidatorIndex": idx,
"StartEpoch": p.startEpoch,
"StartBalance": p.startBalance,
"TotalRequested": p.totalRequestedCount,
"AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
"BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
"CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
"CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
"CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
"TotalProposedBlocks": p.totalProposedCount,
"TotalAggregations": p.totalAggregations,
"TotalSyncContributions": p.totalSyncCommitteeContributions,
"validatorIndex": idx,
"startEpoch": p.startEpoch,
"startBalance": p.startBalance,
"totalRequested": p.totalRequestedCount,
"attestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
"balanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
"correctlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
"correctlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
"correctlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
"averageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
"totalProposedBlocks": p.totalProposedCount,
"totalAggregations": p.totalAggregations,
"totalSyncContributions": p.totalSyncCommitteeContributions,
}).Info("Aggregated performance since launch")
}
}

View File

@@ -44,7 +44,7 @@ func TestProcessSlashings(t *testing.T) {
},
},
},
wantedErr: "\"Proposer slashing was included\" BodyRoot1= BodyRoot2= ProposerIndex=2",
wantedErr: "\"Proposer slashing was included\" bodyRoot1= bodyRoot2= prefix=monitor proposerIndex=2",
},
{
name: "Proposer slashing an untracked index",
@@ -89,8 +89,8 @@ func TestProcessSlashings(t *testing.T) {
},
},
},
wantedErr: "\"Attester slashing was included\" AttestationSlot1=0 AttestationSlot2=0 AttesterIndex=1 " +
"BeaconBlockRoot1=0x000000000000 BeaconBlockRoot2=0x000000000000 BlockInclusionSlot=0 SourceEpoch1=1 SourceEpoch2=0 TargetEpoch1=0 TargetEpoch2=0",
wantedErr: "\"Attester slashing was included\" attestationSlot1=0 attestationSlot2=0 attesterIndex=1 " +
"beaconBlockRoot1=0x000000000000 beaconBlockRoot2=0x000000000000 blockInclusionSlot=0 prefix=monitor sourceEpoch1=1 sourceEpoch2=0 targetEpoch1=0 targetEpoch2=0",
},
{
name: "Attester slashing untracked index",
@@ -150,7 +150,7 @@ func TestProcessProposedBlock(t *testing.T) {
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
Body: &ethpb.BeaconBlockBody{},
},
wantedErr: "\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=0x68656c6c6f2d NewBalance=32000000000 ParentRoot=0x68656c6c6f2d ProposerIndex=12 Slot=6 Version=0 prefix=monitor",
wantedErr: "\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=0x68656c6c6f2d newBalance=32000000000 parentRoot=0x68656c6c6f2d prefix=monitor proposerIndex=12 slot=6 version=0",
},
{
name: "Block proposed by untracked validator",
@@ -225,10 +225,10 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
root, err := b.GetBlock().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" BodyRoot1=0x000100000000 BodyRoot2=0x000200000000 ProposerIndex=%d SlashingSlot=0 Slot=1 prefix=monitor", idx)
wanted3 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=3 ExpectedContribCount=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor"
wanted4 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor"
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" bodyRoot1=0x000100000000 bodyRoot2=0x000200000000 prefix=monitor proposerIndex=%d slashingSlot=0 slot=1", idx)
wanted3 := "\"Sync committee contribution included\" balanceChange=0 contribCount=3 expectedContribCount=3 newBalance=32000000000 prefix=monitor validatorIndex=1"
wanted4 := "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=1 newBalance=32000000000 prefix=monitor validatorIndex=2"
wrapped, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
s.processBlock(ctx, wrapped)
@@ -278,10 +278,10 @@ func TestLogAggregatedPerformance(t *testing.T) {
}
s.logAggregatedPerformance()
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" +
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " +
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " +
"StartEpoch=0 TotalAggregations=0 TotalProposedBlocks=1 TotalRequested=15 TotalSyncContributions=0 " +
"ValidatorIndex=1 prefix=monitor"
wanted := "\"Aggregated performance since launch\" attestationInclusion=\"80.00%\"" +
" averageInclusionDistance=1.2 balanceChangePct=\"0.95%\" correctlyVotedHeadPct=\"66.67%\" " +
"correctlyVotedSourcePct=\"91.67%\" correctlyVotedTargetPct=\"100.00%\" prefix=monitor startBalance=31700000000 " +
"startEpoch=0 totalAggregations=0 totalProposedBlocks=1 totalRequested=15 totalSyncContributions=0 " +
"validatorIndex=1"
require.LogsContain(t, hook, wanted)
}

View File

@@ -14,8 +14,8 @@ func (s *Service) processExitsFromBlock(blk interfaces.ReadOnlyBeaconBlock) {
idx := exit.Exit.ValidatorIndex
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"ValidatorIndex": idx,
"Slot": blk.Slot(),
"validatorIndex": idx,
"slot": blk.Slot(),
}).Info("Voluntary exit was included")
}
}
@@ -28,7 +28,7 @@ func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
defer s.RUnlock()
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"ValidatorIndex": idx,
"validatorIndex": idx,
}).Info("Voluntary exit was processed")
}
}

View File

@@ -43,7 +43,7 @@ func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
wb, err := blocks.NewBeaconBlock(block)
require.NoError(t, err)
s.processExitsFromBlock(wb)
require.LogsContain(t, hook, "\"Voluntary exit was included\" Slot=0 ValidatorIndex=2")
require.LogsContain(t, hook, "\"Voluntary exit was included\" prefix=monitor slot=0 validatorIndex=2")
}
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
@@ -99,7 +99,7 @@ func TestProcessExitP2PTrackedIndices(t *testing.T) {
Signature: make([]byte, 96),
}
s.processExit(exit)
require.LogsContain(t, hook, "\"Voluntary exit was processed\" ValidatorIndex=1")
require.LogsContain(t, hook, "\"Voluntary exit was processed\" prefix=monitor validatorIndex=1")
}
func TestProcessExitP2PUntrackedIndices(t *testing.T) {

View File

@@ -21,7 +21,7 @@ func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedCon
aggPerf.totalSyncCommitteeAggregations++
s.aggregatedPerformance[idx] = aggPerf
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
log.WithField("validatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
}
}
@@ -69,11 +69,11 @@ func (s *Service) processSyncAggregate(state state.BeaconState, blk interfaces.R
fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib))
log.WithFields(logrus.Fields{
"ValidatorIndex": validatorIdx,
"ExpectedContribCount": len(committeeIndices),
"ContribCount": contrib,
"NewBalance": balance,
"BalanceChange": balanceChg,
"validatorIndex": validatorIdx,
"expectedContribCount": len(committeeIndices),
"contribCount": contrib,
"newBalance": balance,
"balanceChange": balanceChg,
}).Info("Sync committee contribution included")
}
}

View File

@@ -22,8 +22,8 @@ func TestProcessSyncCommitteeContribution(t *testing.T) {
}
s.processSyncCommitteeContribution(contrib)
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1")
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" prefix=monitor validatorIndex=1")
require.LogsDoNotContain(t, hook, "validatorIndex=2")
}
func TestProcessSyncAggregate(t *testing.T) {
@@ -53,7 +53,7 @@ func TestProcessSyncAggregate(t *testing.T) {
require.NoError(t, err)
s.processSyncAggregate(beaconState, wrappedBlock)
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor")
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 ContribCount=2 ExpectedContribCount=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor")
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=4 newBalance=32000000000 prefix=monitor validatorIndex=1")
require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=100000000 contribCount=2 expectedContribCount=2 newBalance=32000000000 prefix=monitor validatorIndex=12")
require.LogsDoNotContain(t, hook, "validatorIndex=2")
}

View File

@@ -111,7 +111,7 @@ func (s *Service) Start() {
sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] })
log.WithFields(logrus.Fields{
"ValidatorIndices": tracked,
"validatorIndices": tracked,
}).Info("Starting service")
go s.run()
@@ -134,7 +134,7 @@ func (s *Service) run() {
}
epoch := slots.ToEpoch(st.Slot())
log.WithField("Epoch", epoch).Info("Synced to head epoch, starting reporting performance")
log.WithField("epoch", epoch).Info("Synced to head epoch, starting reporting performance")
s.Lock()
s.initializePerformanceStructures(st, epoch)
@@ -157,7 +157,7 @@ func (s *Service) initializePerformanceStructures(state state.BeaconState, epoch
for idx := range s.TrackedValidators {
balance, err := state.BalanceAtIndex(idx)
if err != nil {
log.WithError(err).WithField("ValidatorIndex", idx).Error(
log.WithError(err).WithField("validatorIndex", idx).Error(
"Could not fetch starting balance, skipping aggregated logs.")
balance = 0
}
@@ -276,7 +276,7 @@ func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) {
for idx := range s.TrackedValidators {
syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx)
if err != nil {
log.WithError(err).WithField("ValidatorIndex", idx).Error(
log.WithError(err).WithField("validatorIndex", idx).Error(
"Sync committee assignments will not be reported")
delete(s.trackedSyncCommitteeIndices, idx)
} else if len(syncIdx) == 0 {

View File

@@ -148,7 +148,7 @@ func TestStart(t *testing.T) {
// wait for Logrus
time.Sleep(1000 * time.Millisecond)
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"")
require.LogsContain(t, hook, "\"Starting service\" prefix=monitor validatorIndices=\"[1 2 12 15]\"")
s.Lock()
require.Equal(t, s.isLogging, true, "monitor is not running")
s.Unlock()
@@ -237,7 +237,7 @@ func TestMonitorRoutine(t *testing.T) {
// Wait for Logrus
time.Sleep(1000 * time.Millisecond)
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
require.LogsContain(t, hook, wanted1)
}

View File

@@ -151,19 +151,19 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) {
c := params.BeaconConfig()
c.TerminalTotalDifficulty = cliCtx.String(flags.TerminalTotalDifficultyOverride.Name)
log.WithField("terminal block difficult", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
log.WithField("terminalBlockDifficulty", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
params.OverrideBeaconConfig(c)
}
if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) {
c := params.BeaconConfig()
c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name))
log.WithField("terminal block hash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
log.WithField("terminalBlockHash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
params.OverrideBeaconConfig(c)
}
if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) {
c := params.BeaconConfig()
c.TerminalBlockHashActivationEpoch = primitives.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
log.WithField("terminal block hash activation epoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
log.WithField("terminalBlockHashActivationEpoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
params.OverrideBeaconConfig(c)
}

View File

@@ -118,6 +118,7 @@ type BeaconNode struct {
BackfillOpts []backfill.ServiceOption
initialSyncComplete chan struct{}
BlobStorage *filesystem.BlobStorage
BlobStorageOptions []filesystem.BlobStorageOption
blobRetentionEpochs primitives.Epoch
verifyInitWaiter *verification.InitializerWaiter
syncChecker *initialsync.SyncChecker
@@ -209,6 +210,16 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
return nil, err
}
// Allow tests to set it as an opt.
if beacon.BlobStorage == nil {
beacon.BlobStorageOptions = append(beacon.BlobStorageOptions, filesystem.WithSaveFsync(features.Get().BlobSaveFsync))
blobs, err := filesystem.NewBlobStorage(beacon.BlobStorageOptions...)
if err != nil {
return nil, err
}
beacon.BlobStorage = blobs
}
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, err
@@ -326,6 +337,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
}
beacon.collector = c
// Do not store the finalized state as it has been provided to the respective services during
// their initialization.
beacon.finalizedStateAtStartUp = nil
return beacon, nil
}
func initSyncWaiter(ctx context.Context, complete chan struct{}) func() error {
@@ -422,7 +437,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("database-path", dbPath).Info("Checking DB")
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(b.ctx, dbPath)
if err != nil {
@@ -525,7 +540,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("database-path", dbPath).Info("Checking DB")
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil {
@@ -987,7 +1002,6 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
apigateway.WithGatewayAddr(gatewayAddress),
apigateway.WithRemoteAddr(selfAddress),
apigateway.WithPbHandlers(muxs),
apigateway.WithMuxHandler(gatewayConfig.Handler),
apigateway.WithRemoteCert(selfCert),
apigateway.WithMaxCallRecvMsgSize(maxCallSize),
apigateway.WithAllowedOrigins(allowedOrigins),

View File

@@ -43,6 +43,15 @@ func WithBlobStorage(bs *filesystem.BlobStorage) Option {
}
}
// WithBlobStorageOptions appends 1 or more filesystem.BlobStorageOption on the beacon node,
// to be used when initializing blob storage.
func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
return func(bn *BeaconNode) error {
bn.BlobStorageOptions = append(bn.BlobStorageOptions, opt...)
return nil
}
}
// WithBlobRetentionEpochs sets the blobRetentionEpochs value, used in kv store initialization.
func WithBlobRetentionEpochs(e primitives.Epoch) Option {
return func(bn *BeaconNode) error {

View File

@@ -42,7 +42,7 @@ func (s *Service) prepareForkChoiceAtts() {
switch slotInterval.Interval {
case 0:
duration := time.Since(t)
log.WithField("Duration", duration).Debug("Aggregated unaggregated attestations")
log.WithField("duration", duration).Debug("Aggregated unaggregated attestations")
batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds()))
case 1:
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))

View File

@@ -118,10 +118,10 @@ go_test(
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//mock:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -13,7 +13,8 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/mock/gomock"
"go.uber.org/mock/gomock"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"

View File

@@ -80,7 +80,7 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
httputil.WriteError(w, handleWrapError(err, "could not get optimistic mode info", http.StatusInternalServerError))
return
}
root, err := helpers.BlockRootAtSlot(st, st.Slot()-1)
root, err := helpers.BlockRootAtSlot(st, slots.PrevSlot(st.Slot()))
if err != nil {
httputil.WriteError(w, handleWrapError(err, "could not get block root", http.StatusInternalServerError))
return

View File

@@ -14,6 +14,7 @@ import (
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/network/httputil"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// BlockRewardsFetcher is a interface that provides access to reward related responses
@@ -123,7 +124,7 @@ func (rs *BlockRewardService) GetStateForRewards(ctx context.Context, blk interf
// We want to run several block processing functions that update the proposer's balance.
// This will allow us to calculate proposer rewards for each operation (atts, slashings etc).
// To do this, we replay the state up to the block's slot, but before processing the block.
st, err := rs.Replayer.ReplayerForSlot(blk.Slot()-1).ReplayToSlot(ctx, blk.Slot())
st, err := rs.Replayer.ReplayerForSlot(slots.PrevSlot(blk.Slot())).ReplayToSlot(ctx, blk.Slot())
if err != nil {
return nil, &httputil.DefaultJsonError{
Message: "Could not get state: " + err.Error(),

View File

@@ -90,9 +90,9 @@ go_test(
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -1092,6 +1092,12 @@ func (s *Server) BeaconCommitteeSelections(w http.ResponseWriter, _ *http.Reques
httputil.HandleError(w, "Endpoint not implemented", 501)
}
// SyncCommitteeSelections responds with appropriate message and status code according the spec:
// https://ethereum.github.io/beacon-APIs/#/Validator/submitSyncCommitteeSelections.
func (s *Server) SyncCommitteeSelections(w http.ResponseWriter, _ *http.Request) {
httputil.HandleError(w, "Endpoint not implemented", 501)
}
// attestationDependentRoot is get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch - 1) - 1)
// or the genesis block root in the case of underflow.
func attestationDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {

View File

@@ -10,7 +10,6 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/mock/gomock"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
@@ -22,6 +21,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/assert"
mock2 "github.com/prysmaticlabs/prysm/v5/testing/mock"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"go.uber.org/mock/gomock"
)
func TestProduceBlockV2(t *testing.T) {

View File

@@ -238,7 +238,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
m, err := p.BlobStorage.Indices(bytesutil.ToBytes32(root))
if err != nil {
log.WithFields(log.Fields{
"block root": hexutil.Encode(root),
"blockRoot": hexutil.Encode(root),
}).Error(errors.Wrapf(err, "could not retrieve blob indices for root %#x", root))
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob indices for root %#x", root), Reason: core.Internal}
}
@@ -254,8 +254,8 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
vblob, err := p.BlobStorage.Get(bytesutil.ToBytes32(root), index)
if err != nil {
log.WithFields(log.Fields{
"block root": hexutil.Encode(root),
"blob index": index,
"blockRoot": hexutil.Encode(root),
"blobIndex": index,
}).Error(errors.Wrapf(err, "could not retrieve blob for block root %#x at index %d", root, index))
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index), Reason: core.Internal}
}

View File

@@ -162,7 +162,7 @@ common_deps = [
"@com_github_d4l3k_messagediff//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -4,7 +4,6 @@ import (
"context"
"testing"
"github.com/golang/mock/gomock"
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
@@ -18,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/mock"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"go.uber.org/mock/gomock"
)
func TestServer_StreamAltairBlocksVerified_ContextCanceled(t *testing.T) {

View File

@@ -12,6 +12,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
synccontribution "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation/sync_contribution"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
)
@@ -20,7 +21,7 @@ func (vs *Server) setSyncAggregate(ctx context.Context, blk interfaces.SignedBea
return
}
syncAggregate, err := vs.getSyncAggregate(ctx, blk.Block().Slot()-1, blk.Block().ParentRoot())
syncAggregate, err := vs.getSyncAggregate(ctx, slots.PrevSlot(blk.Block().Slot()), blk.Block().ParentRoot())
if err != nil {
log.WithError(err).Error("Could not get sync aggregate")
emptySig := [96]byte{0xC0}

View File

@@ -146,8 +146,8 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
if shouldRebuildTrie(canonicalEth1Data.DepositCount, uint64(len(upToEth1DataDeposits))) {
log.WithFields(logrus.Fields{
"unfinalized deposits": len(upToEth1DataDeposits),
"total deposit count": canonicalEth1Data.DepositCount,
"unfinalizedDeposits": len(upToEth1DataDeposits),
"totalDepositCount": canonicalEth1Data.DepositCount,
}).Warn("Too many unfinalized deposits, building a deposit trie from scratch.")
return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
}

View File

@@ -838,7 +838,7 @@ func TestProposer_ComputeStateRoot_OK(t *testing.T) {
require.NoError(t, err)
proposerIdx, err := helpers.BeaconProposerIndex(ctx, beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot())))
req.Block.Body.RandaoReveal = randaoReveal
currentEpoch := coretime.CurrentEpoch(beaconState)
req.Signature, err = signing.ComputeDomainAndSign(beaconState, currentEpoch, req.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])

View File

@@ -193,7 +193,7 @@ func (vs *Server) WaitForChainStart(_ *emptypb.Empty, stream ethpb.BeaconNodeVal
if err != nil {
return status.Error(codes.Canceled, "Context canceled")
}
log.WithField("starttime", clock.GenesisTime()).Debug("Received chain started event")
log.WithField("startTime", clock.GenesisTime()).Debug("Received chain started event")
log.Debug("Sending genesis time notification to connected validator clients")
gvr := clock.GenesisValidatorsRoot()
res := &ethpb.ChainStartResponse{

View File

@@ -4,7 +4,6 @@ import (
"context"
"testing"
"github.com/golang/mock/gomock"
mockChain "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
@@ -19,6 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/mock"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"go.uber.org/mock/gomock"
)
func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) {

View File

@@ -6,7 +6,6 @@ import (
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/prysmaticlabs/prysm/v5/async/event"
mockChain "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
@@ -24,6 +23,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
"go.uber.org/mock/gomock"
"google.golang.org/protobuf/types/known/emptypb"
)

View File

@@ -196,7 +196,7 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
if (headCurrentParticipation[valIndex] != 0) || (headPreviousParticipation[valIndex] != 0) ||
(prevCurrentParticipation[valIndex] != 0) || (prevPreviousParticipation[valIndex] != 0) {
log.WithField("ValidatorIndex", valIndex).Infof("Participation flag found")
log.WithField("validatorIndex", valIndex).Infof("Participation flag found")
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,

View File

@@ -79,6 +79,7 @@ type Service struct {
credentialError error
connectedRPCClients map[net.Addr]bool
clientConnectionLock sync.Mutex
validatorServer *validatorv1alpha1.Server
}
// Config options for the beacon node RPC server.
@@ -187,6 +188,268 @@ func NewService(ctx context.Context, cfg *Config) *Service {
}
s.grpcServer = grpc.NewServer(opts...)
var stateCache stategen.CachedGetter
if s.cfg.StateGen != nil {
stateCache = s.cfg.StateGen.CombinedCache()
}
withCache := stategen.WithCache(stateCache)
ch := stategen.NewCanonicalHistory(s.cfg.BeaconDB, s.cfg.ChainInfoFetcher, s.cfg.ChainInfoFetcher, withCache)
stater := &lookup.BeaconDbStater{
BeaconDB: s.cfg.BeaconDB,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
}
blocker := &lookup.BeaconDbBlocker{
BeaconDB: s.cfg.BeaconDB,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlobStorage: s.cfg.BlobStorage,
}
rewardFetcher := &rewards.BlockRewardService{Replayer: ch}
s.initializeRewardServerRoutes(&rewards.Server{
Blocker: blocker,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
Stater: stater,
HeadFetcher: s.cfg.HeadFetcher,
BlockRewardFetcher: rewardFetcher,
})
s.initializeBuilderServerRoutes(&rpcBuilder.Server{
FinalizationFetcher: s.cfg.FinalizationFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
Stater: stater,
})
s.initializeBlobServerRoutes(&blob.Server{
Blocker: blocker,
})
coreService := &core.Service{
HeadFetcher: s.cfg.HeadFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
Broadcaster: s.cfg.Broadcaster,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
OperationNotifier: s.cfg.OperationNotifier,
AttestationCache: cache.NewAttestationCache(),
StateGen: s.cfg.StateGen,
P2P: s.cfg.Broadcaster,
FinalizedFetcher: s.cfg.FinalizationFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
}
validatorServer := &validatorv1alpha1.Server{
Ctx: s.ctx,
AttPool: s.cfg.AttestationsPool,
ExitPool: s.cfg.ExitPool,
HeadFetcher: s.cfg.HeadFetcher,
ForkFetcher: s.cfg.ForkFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
GenesisFetcher: s.cfg.GenesisFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
BlockFetcher: s.cfg.ExecutionChainService,
DepositFetcher: s.cfg.DepositFetcher,
ChainStartFetcher: s.cfg.ChainStartFetcher,
Eth1InfoFetcher: s.cfg.ExecutionChainService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
SyncChecker: s.cfg.SyncService,
StateNotifier: s.cfg.StateNotifier,
BlockNotifier: s.cfg.BlockNotifier,
OperationNotifier: s.cfg.OperationNotifier,
P2P: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
BlobReceiver: s.cfg.BlobReceiver,
MockEth1Votes: s.cfg.MockEth1Votes,
Eth1BlockFetcher: s.cfg.ExecutionChainService,
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
SlashingsPool: s.cfg.SlashingsPool,
StateGen: s.cfg.StateGen,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
ReplayerBuilder: ch,
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
BeaconDB: s.cfg.BeaconDB,
BlockBuilder: s.cfg.BlockBuilder,
BLSChangesPool: s.cfg.BLSChangesPool,
ClockWaiter: s.cfg.ClockWaiter,
CoreService: coreService,
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
PayloadIDCache: s.cfg.PayloadIDCache,
}
s.validatorServer = validatorServer
s.initializeValidatorServerRoutes(&validator.Server{
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
AttestationsPool: s.cfg.AttestationsPool,
PeerManager: s.cfg.PeerManager,
Broadcaster: s.cfg.Broadcaster,
V1Alpha1Server: validatorServer,
Stater: stater,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
BeaconDB: s.cfg.BeaconDB,
BlockBuilder: s.cfg.BlockBuilder,
OperationNotifier: s.cfg.OperationNotifier,
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
PayloadIDCache: s.cfg.PayloadIDCache,
CoreService: coreService,
BlockRewardFetcher: rewardFetcher,
})
nodeServer := &nodev1alpha1.Server{
LogsStreamer: logs.NewStreamServer(),
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
GenesisFetcher: s.cfg.GenesisFetcher,
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
}
s.initializeNodeServerRoutes(&node.Server{
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
})
beaconChainServer := &beaconv1alpha1.Server{
Ctx: s.ctx,
BeaconDB: s.cfg.BeaconDB,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
CanonicalFetcher: s.cfg.CanonicalFetcher,
ChainStartFetcher: s.cfg.ChainStartFetcher,
DepositFetcher: s.cfg.DepositFetcher,
BlockFetcher: s.cfg.ExecutionChainService,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
StateNotifier: s.cfg.StateNotifier,
BlockNotifier: s.cfg.BlockNotifier,
AttestationNotifier: s.cfg.OperationNotifier,
Broadcaster: s.cfg.Broadcaster,
StateGen: s.cfg.StateGen,
SyncChecker: s.cfg.SyncService,
ReceivedAttestationsBuffer: make(chan *ethpbv1alpha1.Attestation, attestationBufferSize),
CollectedAttestationsBuffer: make(chan []*ethpbv1alpha1.Attestation, attestationBufferSize),
ReplayerBuilder: ch,
CoreService: coreService,
}
s.initializeBeaconServerRoutes(&beacon.Server{
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlockNotifier: s.cfg.BlockNotifier,
OperationNotifier: s.cfg.OperationNotifier,
Broadcaster: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
StateGenService: s.cfg.StateGen,
Stater: stater,
Blocker: blocker,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
VoluntaryExitsPool: s.cfg.ExitPool,
V1Alpha1ValidatorServer: validatorServer,
SyncChecker: s.cfg.SyncService,
ExecutionPayloadReconstructor: s.cfg.ExecutionPayloadReconstructor,
BLSChangesPool: s.cfg.BLSChangesPool,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
CoreService: coreService,
})
s.initializeConfigRoutes()
s.initializeEventsServerRoutes(&events.Server{
StateNotifier: s.cfg.StateNotifier,
OperationNotifier: s.cfg.OperationNotifier,
HeadFetcher: s.cfg.HeadFetcher,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
})
s.initializeLightClientServerRoutes(&lightclient.Server{
Blocker: blocker,
Stater: stater,
HeadFetcher: s.cfg.HeadFetcher,
})
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
ethpbv1alpha1.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
if s.cfg.EnableDebugRPCEndpoints {
log.Info("Enabled debug gRPC endpoints")
debugServer := &debugv1alpha1.Server{
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BeaconDB: s.cfg.BeaconDB,
StateGen: s.cfg.StateGen,
HeadFetcher: s.cfg.HeadFetcher,
PeerManager: s.cfg.PeerManager,
PeersFetcher: s.cfg.PeersFetcher,
ReplayerBuilder: ch,
}
s.initializeDebugServerRoutes(&debug.Server{
BeaconDB: s.cfg.BeaconDB,
HeadFetcher: s.cfg.HeadFetcher,
Stater: stater,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
ForkFetcher: s.cfg.ForkFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
})
ethpbv1alpha1.RegisterDebugServer(s.grpcServer, debugServer)
}
ethpbv1alpha1.RegisterBeaconNodeValidatorServer(s.grpcServer, validatorServer)
// Register reflection service on gRPC server.
reflection.Register(s.grpcServer)
s.initializePrysmBeaconServerRoutes(&beaconprysm.Server{
SyncChecker: s.cfg.SyncService,
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
Stater: stater,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
})
s.initializePrysmNodeServerRoutes(&nodeprysm.Server{
BeaconDB: s.cfg.BeaconDB,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
})
s.initializePrysmValidatorServerRoutes(&validatorprysm.Server{CoreService: coreService})
return s
}
@@ -226,6 +489,7 @@ func (s *Service) initializeValidatorServerRoutes(validatorServer *validator.Ser
s.cfg.Router.HandleFunc("/eth/v1/validator/blinded_blocks/{slot}", validatorServer.ProduceBlindedBlock).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v3/validator/blocks/{slot}", validatorServer.ProduceBlockV3).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/validator/beacon_committee_selections", validatorServer.BeaconCommitteeSelections).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/sync_committee_selections", validatorServer.SyncCommitteeSelections).Methods(http.MethodPost)
}
func (s *Service) initializeNodeServerRoutes(nodeServer *node.Server) {
@@ -320,270 +584,7 @@ func (s *Service) initializePrysmValidatorServerRoutes(validatorServerPrysm *val
// Start the gRPC server.
func (s *Service) Start() {
grpcprometheus.EnableHandlingTimeHistogram()
var stateCache stategen.CachedGetter
if s.cfg.StateGen != nil {
stateCache = s.cfg.StateGen.CombinedCache()
}
withCache := stategen.WithCache(stateCache)
ch := stategen.NewCanonicalHistory(s.cfg.BeaconDB, s.cfg.ChainInfoFetcher, s.cfg.ChainInfoFetcher, withCache)
stater := &lookup.BeaconDbStater{
BeaconDB: s.cfg.BeaconDB,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
}
blocker := &lookup.BeaconDbBlocker{
BeaconDB: s.cfg.BeaconDB,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlobStorage: s.cfg.BlobStorage,
}
rewardFetcher := &rewards.BlockRewardService{Replayer: ch}
s.initializeRewardServerRoutes(&rewards.Server{
Blocker: blocker,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
Stater: stater,
HeadFetcher: s.cfg.HeadFetcher,
BlockRewardFetcher: rewardFetcher,
})
s.initializeBuilderServerRoutes(&rpcBuilder.Server{
FinalizationFetcher: s.cfg.FinalizationFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
Stater: stater,
})
s.initializeBlobServerRoutes(&blob.Server{
Blocker: blocker,
})
coreService := &core.Service{
HeadFetcher: s.cfg.HeadFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
Broadcaster: s.cfg.Broadcaster,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
OperationNotifier: s.cfg.OperationNotifier,
AttestationCache: cache.NewAttestationCache(),
StateGen: s.cfg.StateGen,
P2P: s.cfg.Broadcaster,
FinalizedFetcher: s.cfg.FinalizationFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
}
validatorServer := &validatorv1alpha1.Server{
Ctx: s.ctx,
AttPool: s.cfg.AttestationsPool,
ExitPool: s.cfg.ExitPool,
HeadFetcher: s.cfg.HeadFetcher,
ForkFetcher: s.cfg.ForkFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
GenesisFetcher: s.cfg.GenesisFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
BlockFetcher: s.cfg.ExecutionChainService,
DepositFetcher: s.cfg.DepositFetcher,
ChainStartFetcher: s.cfg.ChainStartFetcher,
Eth1InfoFetcher: s.cfg.ExecutionChainService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
SyncChecker: s.cfg.SyncService,
StateNotifier: s.cfg.StateNotifier,
BlockNotifier: s.cfg.BlockNotifier,
OperationNotifier: s.cfg.OperationNotifier,
P2P: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
BlobReceiver: s.cfg.BlobReceiver,
MockEth1Votes: s.cfg.MockEth1Votes,
Eth1BlockFetcher: s.cfg.ExecutionChainService,
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
SlashingsPool: s.cfg.SlashingsPool,
StateGen: s.cfg.StateGen,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
ReplayerBuilder: ch,
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
BeaconDB: s.cfg.BeaconDB,
BlockBuilder: s.cfg.BlockBuilder,
BLSChangesPool: s.cfg.BLSChangesPool,
ClockWaiter: s.cfg.ClockWaiter,
CoreService: coreService,
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
PayloadIDCache: s.cfg.PayloadIDCache,
}
s.initializeValidatorServerRoutes(&validator.Server{
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
AttestationsPool: s.cfg.AttestationsPool,
PeerManager: s.cfg.PeerManager,
Broadcaster: s.cfg.Broadcaster,
V1Alpha1Server: validatorServer,
Stater: stater,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
BeaconDB: s.cfg.BeaconDB,
BlockBuilder: s.cfg.BlockBuilder,
OperationNotifier: s.cfg.OperationNotifier,
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
PayloadIDCache: s.cfg.PayloadIDCache,
CoreService: coreService,
BlockRewardFetcher: rewardFetcher,
})
nodeServer := &nodev1alpha1.Server{
LogsStreamer: logs.NewStreamServer(),
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
GenesisFetcher: s.cfg.GenesisFetcher,
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
}
s.initializeNodeServerRoutes(&node.Server{
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
})
beaconChainServer := &beaconv1alpha1.Server{
Ctx: s.ctx,
BeaconDB: s.cfg.BeaconDB,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
CanonicalFetcher: s.cfg.CanonicalFetcher,
ChainStartFetcher: s.cfg.ChainStartFetcher,
DepositFetcher: s.cfg.DepositFetcher,
BlockFetcher: s.cfg.ExecutionChainService,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
StateNotifier: s.cfg.StateNotifier,
BlockNotifier: s.cfg.BlockNotifier,
AttestationNotifier: s.cfg.OperationNotifier,
Broadcaster: s.cfg.Broadcaster,
StateGen: s.cfg.StateGen,
SyncChecker: s.cfg.SyncService,
ReceivedAttestationsBuffer: make(chan *ethpbv1alpha1.Attestation, attestationBufferSize),
CollectedAttestationsBuffer: make(chan []*ethpbv1alpha1.Attestation, attestationBufferSize),
ReplayerBuilder: ch,
CoreService: coreService,
}
s.initializeBeaconServerRoutes(&beacon.Server{
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlockNotifier: s.cfg.BlockNotifier,
OperationNotifier: s.cfg.OperationNotifier,
Broadcaster: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
StateGenService: s.cfg.StateGen,
Stater: stater,
Blocker: blocker,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
VoluntaryExitsPool: s.cfg.ExitPool,
V1Alpha1ValidatorServer: validatorServer,
SyncChecker: s.cfg.SyncService,
ExecutionPayloadReconstructor: s.cfg.ExecutionPayloadReconstructor,
BLSChangesPool: s.cfg.BLSChangesPool,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
CoreService: coreService,
})
s.initializeConfigRoutes()
s.initializeEventsServerRoutes(&events.Server{
StateNotifier: s.cfg.StateNotifier,
OperationNotifier: s.cfg.OperationNotifier,
HeadFetcher: s.cfg.HeadFetcher,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
})
s.initializeLightClientServerRoutes(&lightclient.Server{
Blocker: blocker,
Stater: stater,
HeadFetcher: s.cfg.HeadFetcher,
})
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
ethpbv1alpha1.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
if s.cfg.EnableDebugRPCEndpoints {
log.Info("Enabled debug gRPC endpoints")
debugServer := &debugv1alpha1.Server{
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BeaconDB: s.cfg.BeaconDB,
StateGen: s.cfg.StateGen,
HeadFetcher: s.cfg.HeadFetcher,
PeerManager: s.cfg.PeerManager,
PeersFetcher: s.cfg.PeersFetcher,
ReplayerBuilder: ch,
}
s.initializeDebugServerRoutes(&debug.Server{
BeaconDB: s.cfg.BeaconDB,
HeadFetcher: s.cfg.HeadFetcher,
Stater: stater,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
ForkFetcher: s.cfg.ForkFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
})
ethpbv1alpha1.RegisterDebugServer(s.grpcServer, debugServer)
}
ethpbv1alpha1.RegisterBeaconNodeValidatorServer(s.grpcServer, validatorServer)
// Register reflection service on gRPC server.
reflection.Register(s.grpcServer)
validatorServer.PruneBlobsBundleCacheRoutine()
s.initializePrysmBeaconServerRoutes(&beaconprysm.Server{
SyncChecker: s.cfg.SyncService,
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
Stater: stater,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
})
s.initializePrysmNodeServerRoutes(&nodeprysm.Server{
BeaconDB: s.cfg.BeaconDB,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
})
s.initializePrysmValidatorServerRoutes(&validatorprysm.Server{CoreService: coreService})
s.validatorServer.PruneBlobsBundleCacheRoutine()
go func() {
if s.listener != nil {
if err := s.grpcServer.Serve(s.listener); err != nil {

View File

@@ -152,11 +152,11 @@ func TestServer_InitializeRoutes(t *testing.T) {
"/eth/v1/validator/sync_committee_subscriptions": {http.MethodPost},
"/eth/v1/validator/beacon_committee_selections": {http.MethodPost},
"/eth/v1/validator/sync_committee_contribution": {http.MethodGet},
//"/eth/v1/validator/sync_committee_selections": {http.MethodPost}, // not implemented
"/eth/v1/validator/contribution_and_proofs": {http.MethodPost},
"/eth/v1/validator/prepare_beacon_proposer": {http.MethodPost},
"/eth/v1/validator/register_validator": {http.MethodPost},
"/eth/v1/validator/liveness/{epoch}": {http.MethodPost},
"/eth/v1/validator/sync_committee_selections": {http.MethodPost},
"/eth/v1/validator/contribution_and_proofs": {http.MethodPost},
"/eth/v1/validator/prepare_beacon_proposer": {http.MethodPost},
"/eth/v1/validator/register_validator": {http.MethodPost},
"/eth/v1/validator/liveness/{epoch}": {http.MethodPost},
}
prysmCustomRoutes := map[string][]string{

View File

@@ -4,14 +4,12 @@ import (
"bytes"
"context"
"fmt"
"time"
"github.com/pkg/errors"
slashertypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -20,8 +18,6 @@ import (
func (s *Service) checkSlashableAttestations(
ctx context.Context, currentEpoch primitives.Epoch, atts []*slashertypes.IndexedAttestationWrapper,
) (map[[fieldparams.RootLength]byte]*ethpb.AttesterSlashing, error) {
start := time.Now()
slashings := map[[fieldparams.RootLength]byte]*ethpb.AttesterSlashing{}
// Double votes
@@ -30,8 +26,6 @@ func (s *Service) checkSlashableAttestations(
return nil, errors.Wrap(err, "could not check slashable double votes")
}
log.WithField("elapsed", time.Since(start)).Debug("Done checking double votes")
for root, slashing := range doubleVoteSlashings {
slashings[root] = slashing
}
@@ -53,19 +47,6 @@ func (s *Service) checkSlashableAttestations(
slashings[root] = slashing
}
elapsed := time.Since(start)
fields := logrus.Fields{
"numAttestations": len(atts),
"elapsed": elapsed,
}
log.WithFields(fields).Info("Done checking slashable attestations")
if len(slashings) > 0 {
log.WithField("numSlashings", len(slashings)).Warn("Slashable attestation offenses found")
}
return slashings, nil
}
@@ -75,10 +56,21 @@ func (s *Service) checkSurroundVotes(
attWrappers []*slashertypes.IndexedAttestationWrapper,
currentEpoch primitives.Epoch,
) (map[[fieldparams.RootLength]byte]*ethpb.AttesterSlashing, error) {
// With 256 validators and 16 epochs per chunk, there is 4096 `uint16` elements per chunk.
// 4096 `uint16` elements = 8192 bytes = 8KB
// 25_600 chunks * 8KB = 200MB
const maxChunkBeforeFlush = 25_600
slashings := map[[fieldparams.RootLength]byte]*ethpb.AttesterSlashing{}
// Group attestation wrappers by validator chunk index.
attWrappersByValidatorChunkIndex := s.groupByValidatorChunkIndex(attWrappers)
attWrappersByValidatorChunkIndexCount := len(attWrappersByValidatorChunkIndex)
minChunkByChunkIndexByValidatorChunkIndex := make(map[uint64]map[uint64]Chunker, attWrappersByValidatorChunkIndexCount)
maxChunkByChunkIndexByValidatorChunkIndex := make(map[uint64]map[uint64]Chunker, attWrappersByValidatorChunkIndexCount)
chunksCounts := 0
for validatorChunkIndex, attWrappers := range attWrappersByValidatorChunkIndex {
minChunkByChunkIndex, err := s.updatedChunkByChunkIndex(ctx, slashertypes.MinSpan, currentEpoch, validatorChunkIndex)
@@ -91,6 +83,8 @@ func (s *Service) checkSurroundVotes(
return nil, errors.Wrap(err, "could not update updatedMaxChunks")
}
chunksCounts += len(minChunkByChunkIndex) + len(maxChunkByChunkIndex)
// Group (already grouped by validator chunk index) attestation wrappers by chunk index.
attWrappersByChunkIndex := s.groupByChunkIndex(attWrappers)
@@ -114,20 +108,42 @@ func (s *Service) checkSurroundVotes(
slashings[root] = slashing
}
// Save updated chunks into the database.
if err := s.saveUpdatedChunks(ctx, minChunkByChunkIndex, slashertypes.MinSpan, validatorChunkIndex); err != nil {
return nil, errors.Wrap(err, "could not save chunks for min spans")
// Memoize the updated chunks for the current validator chunk index.
minChunkByChunkIndexByValidatorChunkIndex[validatorChunkIndex] = minChunkByChunkIndex
maxChunkByChunkIndexByValidatorChunkIndex[validatorChunkIndex] = maxChunkByChunkIndex
if chunksCounts >= maxChunkBeforeFlush {
// Save the updated chunks to disk if we have reached the maximum number of chunks to store in memory.
if err := s.saveChunksToDisk(ctx, slashertypes.MinSpan, minChunkByChunkIndexByValidatorChunkIndex); err != nil {
return nil, errors.Wrap(err, "could not save updated min chunks to disk")
}
if err := s.saveChunksToDisk(ctx, slashertypes.MaxSpan, maxChunkByChunkIndexByValidatorChunkIndex); err != nil {
return nil, errors.Wrap(err, "could not save updated max chunks to disk")
}
// Reset the chunks counts.
chunksCounts = 0
// Reset memoized chunks.
minChunkByChunkIndexByValidatorChunkIndex = make(map[uint64]map[uint64]Chunker, attWrappersByValidatorChunkIndexCount)
maxChunkByChunkIndexByValidatorChunkIndex = make(map[uint64]map[uint64]Chunker, attWrappersByValidatorChunkIndexCount)
}
if err := s.saveUpdatedChunks(ctx, maxChunkByChunkIndex, slashertypes.MaxSpan, validatorChunkIndex); err != nil {
return nil, errors.Wrap(err, "could not save chunks for max spans")
// Update the latest updated epoch for all validators involved to the current chunk.
indexes := s.params.validatorIndexesInChunk(validatorChunkIndex)
for _, index := range indexes {
s.latestEpochUpdatedForValidator[index] = currentEpoch
}
}
// Update the latest written epoch for all validators involved to the current chunk.
indices := s.params.validatorIndexesInChunk(validatorChunkIndex)
for _, idx := range indices {
s.latestEpochWrittenForValidator[idx] = currentEpoch
}
// Save the updated chunks to disk.
if err := s.saveChunksToDisk(ctx, slashertypes.MinSpan, minChunkByChunkIndexByValidatorChunkIndex); err != nil {
return nil, errors.Wrap(err, "could not save updated min chunks to disk")
}
if err := s.saveChunksToDisk(ctx, slashertypes.MaxSpan, maxChunkByChunkIndexByValidatorChunkIndex); err != nil {
return nil, errors.Wrap(err, "could not save updated max chunks to disk")
}
return slashings, nil
@@ -239,7 +255,7 @@ func (s *Service) checkDoubleVotes(
// updatedChunkByChunkIndex loads the chunks from the database for validators corresponding to
// the `validatorChunkIndex`.
// It then updates the chunks with the neutral element for corresponding validators from
// the epoch just after the latest epoch written to the current epoch.
// the epoch just after the latest updated epoch to the current epoch.
// A mapping between chunk index and chunk is returned to the caller.
func (s *Service) updatedChunkByChunkIndex(
ctx context.Context,
@@ -247,56 +263,97 @@ func (s *Service) updatedChunkByChunkIndex(
currentEpoch primitives.Epoch,
validatorChunkIndex uint64,
) (map[uint64]Chunker, error) {
chunkByChunkIndex := map[uint64]Chunker{}
// Every validator may have a first epoch to update.
// For a given validator,
// - If it has no latest updated epoch, then the first epoch to update is set to 0.
// - If the latest updated epoch is the current epoch, then there is no epoch to update.
// Thus, then there is no first epoch to update.
// - In all other cases, the first epoch to update is the latest updated epoch + 1.
// minFirstEpochToUpdate is set to the smallest first epoch to update for all validators in the chunk
// corresponding to the `validatorChunkIndex`.
var minFirstEpochToUpdate *primitives.Epoch
neededChunkIndexesMap := map[uint64]bool{}
validatorIndexes := s.params.validatorIndexesInChunk(validatorChunkIndex)
for _, validatorIndex := range validatorIndexes {
// Retrieve the latest epoch written for the validator.
latestEpochWritten, ok := s.latestEpochWrittenForValidator[validatorIndex]
// Start from the epoch just after the latest epoch written.
epochToWrite, err := latestEpochWritten.SafeAdd(1)
// Retrieve the first epoch to write for the validator index.
isAnEpochToUpdate, firstEpochToUpdate, err := s.firstEpochToUpdate(validatorIndex, currentEpoch)
if err != nil {
return nil, errors.Wrap(err, "could not add 1 to latest epoch written")
return nil, errors.Wrapf(err, "could not get first epoch to write for validator index %d with current epoch %d", validatorIndex, currentEpoch)
}
if !ok {
epochToWrite = 0
if !isAnEpochToUpdate {
// If there is no epoch to write, skip.
continue
}
// It is useless to update more than `historyLength` epochs, since
// the chunks are circular and we will be overwritten at least one.
if currentEpoch-epochToWrite >= s.params.historyLength {
epochToWrite = currentEpoch + 1 - s.params.historyLength
// If, for this validator index, the chunk corresponding to the first epoch to write
// (and all following epochs until the current epoch) are already flagged as needed,
// skip.
if minFirstEpochToUpdate != nil && *minFirstEpochToUpdate <= firstEpochToUpdate {
continue
}
for epochToWrite <= currentEpoch {
// Get the chunk index for the latest epoch written.
chunkIndex := s.params.chunkIndex(epochToWrite)
minFirstEpochToUpdate = &firstEpochToUpdate
// Add new needed chunk indexes to the map.
for i := firstEpochToUpdate; i <= currentEpoch; i++ {
chunkIndex := s.params.chunkIndex(i)
neededChunkIndexesMap[chunkIndex] = true
}
}
// Get the list of needed chunk indexes.
neededChunkIndexes := make([]uint64, 0, len(neededChunkIndexesMap))
for chunkIndex := range neededChunkIndexesMap {
neededChunkIndexes = append(neededChunkIndexes, chunkIndex)
}
// Retrieve needed chunks from the database.
chunkByChunkIndex, err := s.loadChunksFromDisk(ctx, validatorChunkIndex, chunkKind, neededChunkIndexes)
if err != nil {
return nil, errors.Wrap(err, "could not load chunks from disk")
}
for _, validatorIndex := range validatorIndexes {
// Retrieve the first epoch to write for the validator index.
isAnEpochToUpdate, firstEpochToUpdate, err := s.firstEpochToUpdate(validatorIndex, currentEpoch)
if err != nil {
return nil, errors.Wrapf(err, "could not get first epoch to write for validator index %d with current epoch %d", validatorIndex, currentEpoch)
}
if !isAnEpochToUpdate {
// If there is no epoch to write, skip.
continue
}
epochToUpdate := firstEpochToUpdate
for epochToUpdate <= currentEpoch {
// Get the chunk index for the ecpoh to write.
chunkIndex := s.params.chunkIndex(epochToUpdate)
// Get the chunk corresponding to the chunk index from the `chunkByChunkIndex` map.
currentChunk, ok := chunkByChunkIndex[chunkIndex]
if !ok {
// If the chunk is not in the map, retrieve it from the database.
currentChunk, err = s.getChunkFromDatabase(ctx, chunkKind, validatorChunkIndex, chunkIndex)
if err != nil {
return nil, errors.Wrap(err, "could not get chunk")
}
return nil, errors.Errorf("chunk at index %d does not exist", chunkIndex)
}
// Update the current chunk with the neutral element for the validator index for the latest epoch written.
for s.params.chunkIndex(epochToWrite) == chunkIndex && epochToWrite <= currentEpoch {
// Update the current chunk with the neutral element for the validator index for the epoch to write.
for s.params.chunkIndex(epochToUpdate) == chunkIndex && epochToUpdate <= currentEpoch {
if err := setChunkRawDistance(
s.params,
currentChunk.Chunk(),
validatorIndex,
epochToWrite,
epochToUpdate,
currentChunk.NeutralElement(),
); err != nil {
return nil, err
}
epochToWrite++
epochToUpdate++
}
chunkByChunkIndex[chunkIndex] = currentChunk
@@ -306,6 +363,40 @@ func (s *Service) updatedChunkByChunkIndex(
return chunkByChunkIndex, nil
}
// firstEpochToUpdate, given a validator index and the current epoch, returns a boolean indicating
// if there is an epoch to write. If it is the case, it returns the first epoch to write.
func (s *Service) firstEpochToUpdate(validatorIndex primitives.ValidatorIndex, currentEpoch primitives.Epoch) (bool, primitives.Epoch, error) {
latestEpochUpdated, ok := s.latestEpochUpdatedForValidator[validatorIndex]
// Start from the epoch just after the latest updated epoch.
epochToUpdate, err := latestEpochUpdated.SafeAdd(1)
if err != nil {
return false, primitives.Epoch(0), errors.Wrap(err, "could not add 1 to latest updated epoch")
}
if !ok {
epochToUpdate = 0
}
if latestEpochUpdated == currentEpoch {
// If the latest updated epoch is the current epoch, we do not need to update anything.
return false, primitives.Epoch(0), nil
}
// Latest updated epoch should not be greater than the current epoch.
if latestEpochUpdated > currentEpoch {
return false, primitives.Epoch(0), errors.Errorf("epoch to write `%d` should not be greater than the current epoch `%d`", epochToUpdate, currentEpoch)
}
// It is useless to update more than `historyLength` epochs, since
// the chunks are circular and we will be overwritten at least one.
if currentEpoch-epochToUpdate >= s.params.historyLength {
epochToUpdate = currentEpoch + 1 - s.params.historyLength
}
return true, epochToUpdate, nil
}
// Updates spans and detects any slashable attester offenses along the way.
// 1. Determine the chunks we need to use for updating for the validator indices
// in a validator chunk index, then retrieve those chunks from the database.
@@ -487,7 +578,7 @@ func (s *Service) getChunkFromDatabase(
chunkIndex uint64,
) (Chunker, error) {
// We can ensure we load the appropriate chunk we need by fetching from the DB.
diskChunks, err := s.loadChunks(ctx, validatorChunkIndex, chunkKind, []uint64{chunkIndex})
diskChunks, err := s.loadChunksFromDisk(ctx, validatorChunkIndex, chunkKind, []uint64{chunkIndex})
if err != nil {
return nil, errors.Wrapf(err, "could not load chunk at index %d", chunkIndex)
}
@@ -502,7 +593,7 @@ func (s *Service) getChunkFromDatabase(
// Load chunks for a specified list of chunk indices. We attempt to load it from the database.
// If the data exists, then we initialize a chunk of a specified kind. Otherwise, we create
// an empty chunk, add it to our map, and then return it to the caller.
func (s *Service) loadChunks(
func (s *Service) loadChunksFromDisk(
ctx context.Context,
validatorChunkIndex uint64,
chunkKind slashertypes.ChunkKind,
@@ -511,17 +602,36 @@ func (s *Service) loadChunks(
ctx, span := trace.StartSpan(ctx, "Slasher.loadChunks")
defer span.End()
chunkKeys := make([][]byte, 0, len(chunkIndexes))
for _, chunkIndex := range chunkIndexes {
chunkKeys = append(chunkKeys, s.params.flatSliceID(validatorChunkIndex, chunkIndex))
chunksCount := len(chunkIndexes)
if chunksCount == 0 {
return map[uint64]Chunker{}, nil
}
// Build chunk keys.
chunkKeys := make([][]byte, 0, chunksCount)
for _, chunkIndex := range chunkIndexes {
chunkKey := s.params.flatSliceID(validatorChunkIndex, chunkIndex)
chunkKeys = append(chunkKeys, chunkKey)
}
// Load the chunks from the database.
rawChunks, chunksExist, err := s.serviceCfg.Database.LoadSlasherChunks(ctx, chunkKind, chunkKeys)
if err != nil {
return nil, errors.Wrapf(err, "could not load slasher chunk index")
}
chunksByChunkIdx := make(map[uint64]Chunker, len(rawChunks))
// Perform basic checks.
if len(rawChunks) != chunksCount {
return nil, errors.Errorf("expected %d chunks, got %d", chunksCount, len(rawChunks))
}
if len(chunksExist) != chunksCount {
return nil, errors.Errorf("expected %d chunks exist, got %d", chunksCount, len(chunksExist))
}
// Initialize the chunks.
chunksByChunkIdx := make(map[uint64]Chunker, chunksCount)
for i := 0; i < len(rawChunks); i++ {
// If the chunk exists in the database, we initialize it from the raw bytes data.
// If it does not exist, we initialize an empty chunk.
@@ -558,21 +668,35 @@ func (s *Service) loadChunks(
return chunksByChunkIdx, nil
}
// Saves updated chunks to disk given the required database schema.
func (s *Service) saveUpdatedChunks(
func (s *Service) saveChunksToDisk(
ctx context.Context,
updatedChunksByChunkIdx map[uint64]Chunker,
chunkKind slashertypes.ChunkKind,
validatorChunkIndex uint64,
chunkByChunkIndexByValidatorChunkIndex map[uint64]map[uint64]Chunker,
) error {
ctx, span := trace.StartSpan(ctx, "Slasher.saveUpdatedChunks")
ctx, span := trace.StartSpan(ctx, "Slasher.saveChunksToDisk")
defer span.End()
chunkKeys := make([][]byte, 0, len(updatedChunksByChunkIdx))
chunks := make([][]uint16, 0, len(updatedChunksByChunkIdx))
for chunkIdx, chunk := range updatedChunksByChunkIdx {
chunkKeys = append(chunkKeys, s.params.flatSliceID(validatorChunkIndex, chunkIdx))
chunks = append(chunks, chunk.Chunk())
// Compute the total number of chunks to save.
chunksCount := 0
for _, chunkByChunkIndex := range chunkByChunkIndexByValidatorChunkIndex {
chunksCount += len(chunkByChunkIndex)
}
chunksSavedTotal.Add(float64(len(chunks)))
// Create needed arrays.
chunkKeys := make([][]byte, 0, chunksCount)
chunks := make([][]uint16, 0, chunksCount)
// Fill the arrays.
for validatorChunkIndex, chunkByChunkIndex := range chunkByChunkIndexByValidatorChunkIndex {
for chunkIndex, chunk := range chunkByChunkIndex {
chunkKeys = append(chunkKeys, s.params.flatSliceID(validatorChunkIndex, chunkIndex))
chunks = append(chunks, chunk.Chunk())
}
}
// Update prometheus metrics.
chunksSavedTotal.Add(float64(chunksCount))
// Save the chunks to disk.
return s.serviceCfg.Database.SaveSlasherChunks(ctx, chunkKind, chunkKeys, chunks)
}

View File

@@ -3,6 +3,7 @@ package slasher
import (
"context"
"fmt"
"math/rand"
"testing"
"time"
@@ -833,7 +834,7 @@ func Test_processQueuedAttestations_OverlappingChunkIndices(t *testing.T) {
require.LogsDoNotContain(t, hook, "Could not detect")
}
func Test_epochUpdateForValidators(t *testing.T) {
func Test_updatedChunkByChunkIndex(t *testing.T) {
neutralMin, neutralMax := uint16(65535), uint16(0)
testCases := []struct {
@@ -1066,7 +1067,7 @@ func Test_epochUpdateForValidators(t *testing.T) {
historyLength: tt.historyLength,
},
serviceCfg: &ServiceConfig{Database: slasherDB},
latestEpochWrittenForValidator: tt.latestUpdatedEpochByValidatorIndex,
latestEpochUpdatedForValidator: tt.latestUpdatedEpochByValidatorIndex,
}
// Save min initial chunks if they exist.
@@ -1076,7 +1077,11 @@ func Test_epochUpdateForValidators(t *testing.T) {
minChunkerByChunkerIndex[chunkIndex] = &MinSpanChunksSlice{data: minChunk}
}
err := service.saveUpdatedChunks(ctx, minChunkerByChunkerIndex, slashertypes.MinSpan, tt.validatorChunkIndex)
minChunkerByChunkerIndexByValidatorChunkerIndex := map[uint64]map[uint64]Chunker{
tt.validatorChunkIndex: minChunkerByChunkerIndex,
}
err := service.saveChunksToDisk(ctx, slashertypes.MinSpan, minChunkerByChunkerIndexByValidatorChunkerIndex)
require.NoError(t, err)
}
@@ -1087,7 +1092,11 @@ func Test_epochUpdateForValidators(t *testing.T) {
maxChunkerByChunkerIndex[chunkIndex] = &MaxSpanChunksSlice{data: maxChunk}
}
err := service.saveUpdatedChunks(ctx, maxChunkerByChunkerIndex, slashertypes.MaxSpan, tt.validatorChunkIndex)
maxChunkerByChunkerIndexByValidatorChunkerIndex := map[uint64]map[uint64]Chunker{
tt.validatorChunkIndex: maxChunkerByChunkerIndex,
}
err := service.saveChunksToDisk(ctx, slashertypes.MaxSpan, maxChunkerByChunkerIndexByValidatorChunkerIndex)
require.NoError(t, err)
}
@@ -1269,7 +1278,7 @@ func testLoadChunks(t *testing.T, kind slashertypes.ChunkKind) {
emptyChunk = EmptyMaxSpanChunksSlice(defaultParams)
}
chunkIdx := uint64(2)
received, err := s.loadChunks(ctx, 0, kind, []uint64{chunkIdx})
received, err := s.loadChunksFromDisk(ctx, 0, kind, []uint64{chunkIdx})
require.NoError(t, err)
wanted := map[uint64]Chunker{
chunkIdx: emptyChunk,
@@ -1301,15 +1310,15 @@ func testLoadChunks(t *testing.T, kind slashertypes.ChunkKind) {
4: existingChunk,
6: existingChunk,
}
err = s.saveUpdatedChunks(
ctx,
updatedChunks,
kind,
0, // validatorChunkIndex
)
chunkByChunkIndexByValidatorChunkIndex := map[uint64]map[uint64]Chunker{
0: updatedChunks,
}
err = s.saveChunksToDisk(ctx, kind, chunkByChunkIndexByValidatorChunkIndex)
require.NoError(t, err)
// Check if the retrieved chunks match what we just saved to disk.
received, err = s.loadChunks(ctx, 0, kind, []uint64{2, 4, 6})
received, err = s.loadChunksFromDisk(ctx, 0, kind, []uint64{2, 4, 6})
require.NoError(t, err)
require.DeepEqual(t, updatedChunks, received)
}
@@ -1351,7 +1360,54 @@ func TestService_processQueuedAttestations(t *testing.T) {
tickerChan <- 1
cancel()
s.wg.Wait()
assert.LogsContain(t, hook, "Processing queued")
assert.LogsContain(t, hook, "Start processing queued attestations")
assert.LogsContain(t, hook, "Done processing queued attestations")
}
func Benchmark_saveChunksToDisk(b *testing.B) {
// Define the parameters.
const (
chunkKind = slashertypes.MinSpan
validatorsChunksCount = 6000 // Corresponds to 1_536_000 validators x 256 validators / chunk
chunkIndex uint64 = 13
validatorChunkIndex uint64 = 42
)
params := DefaultParams()
// Get a context.
ctx := context.Background()
chunkByChunkIndexByValidatorChunkIndex := make(map[uint64]map[uint64]Chunker, validatorsChunksCount)
// Populate the chunkers.
for i := 0; i < validatorsChunksCount; i++ {
data := make([]uint16, params.chunkSize)
for j := 0; j < int(params.chunkSize); j++ {
data[j] = uint16(rand.Intn(1 << 16))
}
chunker := map[uint64]Chunker{chunkIndex: &MinSpanChunksSlice{params: params, data: data}}
chunkByChunkIndexByValidatorChunkIndex[uint64(i)] = chunker
}
// Initialize the slasher database.
slasherDB := dbtest.SetupSlasherDB(b)
// Initialize the slasher service.
service, err := New(ctx, &ServiceConfig{Database: slasherDB})
require.NoError(b, err)
// Reset the benchmark timer.
b.ResetTimer()
// Run the benchmark.
for i := 0; i < b.N; i++ {
b.StartTimer()
err = service.saveChunksToDisk(ctx, slashertypes.MinSpan, chunkByChunkIndexByValidatorChunkIndex)
b.StopTimer()
require.NoError(b, err)
}
}
func BenchmarkCheckSlashableAttestations(b *testing.B) {
@@ -1444,6 +1500,66 @@ func runAttestationsBenchmark(b *testing.B, s *Service, numAtts, numValidators u
}
}
func Benchmark_checkSurroundVotes(b *testing.B) {
const (
// Approximatively the number of Holesky active validators on 2024-02-16
// This number is both a multiple of 32 (the number of slots per epoch) and 256 (the number of validators per chunk)
validatorsCount = 1_638_400
slotsPerEpoch = 32
targetEpoch = 42
sourceEpoch = 43
currentEpoch = 43
)
// Create a context.
ctx := context.Background()
// Initialize the slasher database.
slasherDB := dbtest.SetupSlasherDB(b)
// Initialize the slasher service.
service, err := New(ctx, &ServiceConfig{Database: slasherDB})
require.NoError(b, err)
// Create the attesting validators indexes.
// The best case scenario would be to have all validators attesting for a slot with contiguous indexes.
// So for 1_638_400 validators with 32 slots per epoch, we would have 48_000 attestation wrappers per slot.
// With 256 validators per chunk, we would have only 188 modified chunks.
//
// In this benchmark, we use the worst case scenario where attestating validators are evenly splitted across all validators chunks.
// We also suppose that only one chunk per validator chunk index is modified.
// For one given validator index, multiple chunk indexes could be modified.
//
// With 1_638_400 validators we have 6400 chunks. If exactly 8 validators per chunks attest, we have:
// 6_400 chunks * 8 = 51_200 validators attesting per slot. And 51_200 validators * 32 slots = 1_638_400
// attesting validators per epoch.
// ==> Attesting validator indexes will be computed as follows:
// validator chunk index 0 validator chunk index 1 validator chunk index 6_399
// [0, 32, 64, 96, 128, 160, 192, 224 | 256, 288, 320, 352, 384, 416, 448, 480 | ... | ..., 1_638_606, 1_638_368, 1_638_400]
//
attestingValidatorsCount := validatorsCount / slotsPerEpoch
validatorIndexes := make([]uint64, attestingValidatorsCount)
for i := 0; i < attestingValidatorsCount; i++ {
validatorIndexes[i] = 32 * uint64(i)
}
// Create the attestation wrapper.
// This benchmark assume that all validators produced the exact same head, source and target votes.
attWrapper := createAttestationWrapperEmptySig(b, sourceEpoch, targetEpoch, validatorIndexes, nil)
attWrappers := []*slashertypes.IndexedAttestationWrapper{attWrapper}
// Run the benchmark.
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StartTimer()
_, err = service.checkSurroundVotes(ctx, attWrappers, currentEpoch)
b.StopTimer()
require.NoError(b, err)
}
}
// createAttestationWrapperEmptySig creates an attestation wrapper with source and target,
// for validators with indices, and a beacon block root (corresponding to the head vote).
// For source and target epochs, the corresponding root is null.

View File

@@ -1,45 +1,142 @@
// nolint:dupword
//
// Package slasher defines an optimized implementation of Ethereum proof-of-stake slashing
// detection, namely focused on catching "surround vote" slashable
// offenses as explained here: https://blog.ethereum.org/2020/01/13/validated-staking-on-eth2-1-incentives/.
//
// Surround vote detection is a difficult problem if done naively, as slasher
// needs to keep track of every single attestation by every single validator
// in the network and be ready to efficiently detect whether incoming attestations
// are slashable with respect to older ones. To do this, the Sigma Prime team
// created an elaborate design document: https://hackmd.io/@sproul/min-max-slasher
// offering an optimal solution.
//
// Attesting histories are kept for each validator in two separate arrays known
// as min and max spans, which are explained in our design document:
// https://hackmd.io/@prysmaticlabs/slasher.
//
// A regular pair of min and max spans for a validator look as follows
// with length = H where H is the amount of epochs worth of history
// we want to persist for slashing detection.
//
// validator_1_min_span = [2, 2, 2, ..., 2]
// validator_1_max_span = [0, 0, 0, ..., 0]
//
// Instead of always dealing with length H arrays, which can be prohibitively
// expensive to handle in memory, we split these arrays into chunks of length C.
// For C = 3, for example, the 0th chunk of validator 1's min and max spans would look
// as follows:
//
// validator_1_min_span_chunk_0 = [2, 2, 2]
// validator_1_max_span_chunk_0 = [2, 2, 2]
//
// Next, on disk, we take chunks for K validators, and store them as flat slices.
// For example, if H = 3, C = 3, and K = 3, then we can store 3 validators' chunks as a flat
// slice as follows:
//
// val0 val1 val2
// | | |
// { } { } { }
// [2, 2, 2, 2, 2, 2, 2, 2, 2]
//
// This is known as 2D chunking, pioneered by the Sigma Prime team here:
// https://hackmd.io/@sproul/min-max-slasher. The parameters H, C, and K will be
// used extensively throughout this package.
/*
Package slasher defines an optimized implementation of Ethereum proof-of-stake slashing
detection, namely focused on catching "surround vote" slashable
offenses as explained here: https://blog.ethereum.org/2020/01/13/validated-staking-on-eth2-1-incentives/.
Surround vote detection is a difficult problem if done naively, as slasher
needs to keep track of every single attestation by every single validator
in the network and be ready to efficiently detect whether incoming attestations
are slashable with respect to older ones. To do this, the Sigma Prime team
created an elaborate design document: https://hackmd.io/@sproul/min-max-slasher
offering an optimal solution.
Attesting histories are kept for each validator in two separate arrays known
as min and max spans, which are explained in our design document:
https://hackmd.io/@prysmaticlabs/slasher.
This is known as 2D chunking, pioneered by the Sigma Prime team here:
https://hackmd.io/@sproul/min-max-slasher. The parameters H, C, and K will be
used extensively throughout this package.
Attestations are represented as following: `<source epoch>====><target epoch>`
N: Number of epochs worth of history we want to keep for each validator.
In the following example:
- N = 4096
- Validators 257 and 258 have some attestations
- All other validators have no attestations
For MIN SPAN, `∞“ is actually set to the max `uint16` value: 65535
validator 257 : 8193=======>8195 8196=>8197=============>8200 8204=>8205=>8206=>8207=========>8209=>8210=>8211=>8212=>8213=>8214 8219=>8220 8221=>8222
validator 258 : 8193=======>8196=>8197=>8198=>8199=>8200=>8201=======>8203=>8204=>8205=>8206=>8207===>8208=>8209=>8210=>8211=>8212=>8213=>8214=>8215=>8216=>8217=>8218=>8219=>8220=>8221
/----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\
| MIN SPAN | kN+0 kN+1 kN+2 kN+3 kN+4 kN+5 kN+6 kN+7 kN+8 kN+9 kN+10 kN+11 kN+12 kN+13 kN+14 kN+15 | kN+16 kN+17 kN+18 kN+19 kN+20 kN+21 kN+22 kN+23 kN+24 kN+25 kN+26 kN+27 kN+28 kN+29 kN+30 kN+31 | ... | (k+1)N-16 (k+1)N-15 (k+1)N-14 (k+1)N-13 (k+1)N-12 (k+1)N-11 (k+1)N-10 (k+1)N-9 (k+1)N-8 (k+1)N-7 (k+1)N-6 (k+1)N-5 (k+1)N-4 (k+1)N-3 (k+1)N-2 (k+1)N-1 |
|-------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| validator 0 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| validator 1 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| validator 2 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
| validator 254 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| validator 255 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|-------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| validator 256 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| validator 257 | 3 4 3 2 4 8 7 6 5 4 3 2 2 2 3 3 | 2 2 2 2 2 7 6 5 4 3 2 3 2 ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| validator 258 | 4 3 3 2 2 2 2 2 3 3 2 2 2 2 2 2 | 2 2 2 2 2 2 2 2 2 2 2 2 ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
| validator 510 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| validator 511 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
|-------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
|-------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| validator M - 256 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| validator M - 255 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
| validator M - 1 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ | ... | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
\----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------/
/----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\
| MAX SPAN | kN+0 kN+1 kN+2 kN+3 kN+4 kN+5 kN+6 kN+7 kN+8 kN+9 kN+10 kN+11 kN+12 kN+13 kN+14 kN+15 | kN+16 kN+17 kN+18 kN+19 kN+20 kN+21 kN+22 kN+23 kN+24 kN+25 kN+26 kN+27 kN+28 kN+29 kN+30 kN+31 | ... | (k+1)N-16 (k+1)N-15 (k+1)N-14 (k+1)N-13 (k+1)N-12 (k+1)N-11 (k+1)N-10 (k+1)N-9 (k+1)N-8 (k+1)N-7 (k+1)N-6 (k+1)N-5 (k+1)N-4 (k+1)N-3 (k+1)N-2 (k+1)N-1 |
|-------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| validator 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| validator 1 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| validator 2 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
| validator 14 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| validator 15 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| ------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| validator 256 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| validator 257 | 0 0 1 0 0 0 2 1 0 0 0 0 0 0 0 0 | 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| validator 258 | 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
| validator 510 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| validator 511 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| ------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
| ------------------+-------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+-----+----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| validator M - 256 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| validator M - 255 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
| ................. | ............................................................................................... | ............................................................................................... | ... | .............................................................................................................................................................. |
| validator M - 1 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ... | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |
\ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------/
How to know if an incoming attestation will surround a pre-existing one?
------------------------------------------------------------------------
Example with an incoming attestation 8197====>8199 for validator 257.
- First, we retrieve the MIN SPAN value for the source epoch of the incoming attestation (here the source epoch is 8197). We get the value 8.
- Then, for the incoming attestation, we compute `target - source`. We get the value 8199 - 8197 = 2.
- 8 >= 2, so the incoming attestation will NOT surround any pre-existing one.
Example with an incoming attestation 8202====>8206 for validator 257.
- First, we retrieve the MIN SPAN value for the source epoch of the incoming attestation (here the source epoch is 8202). We get the value 3.
- Then, for the incoming attestation, we compute `target - source`. We get the value 8206 - 8202 = 4.
- 3 < 4, so the incoming attestation will surround a pre-existing one. (In this precise case, it will surround 8204=>8205)
How to know if an incoming attestation will be surrounded by a pre-existing one?
--------------------------------------------------------------------------------
Example with an incoming attestation 8197====>8199 for validator 257.
- First, we retrieve the MAX SPAN value for the source epoch of the incoming attestation (here the source epoch is 8197). We get the value 0.
- Then, for the incoming attestation, we compute `target - source`. We get the value 8199 - 8197 = 2.
- 0 <= 2, so the incoming attestation will NOT be surrounded by any pre-existing one.
Example with an incoming attestation 8198====>8199 for validator 257.
- First, we retrieve the MAX SPAN value for the source epoch of the incoming attestation (here the source epoch is 8198). We get the value 2.
- Then, for the incoming attestation, we compute `target - source`. We get the value 8199 - 8198 = 1.
- 2 > 1, so the incoming attestation will be surrounded by a pre-existing one. (In this precise case, it will be surrounded by 8197=>8200)
Data are stored on disk by chunk.
For example: For MIN SPAN, validators 256 to 511 included, epochs 8208 to 8223 included, the corresponding chunk is:
/---------------------------------------------------------------------------------------------------------------------\
| MIN SPAN | kN+16 kN+17 kN+18 kN+19 kN+20 kN+21 kN+22 kN+23 kN+24 kN+25 kN+26 kN+27 kN+28 kN+29 kN+30 kN+31 |
|-------------------+-------------------------------------------------------------------------------------------------|
| validator 256 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| validator 257 | 2 2 2 2 2 7 6 5 4 3 2 3 2 ∞ ∞ ∞ |
| validator 258 | 2 2 2 2 2 2 2 2 2 2 2 2 ∞ ∞ ∞ ∞ |
| ................. | ............................................................................................... |
| validator 510 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
| validator 511 | ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ ∞ |
\---------------------------------------------------------------------------------------------------------------------/
Chunks are stored into the database a flat array of bytes.
For this example, the stored value will be:
| validator 256 | validator 257 | validator 258 |...| validator 510 | validator 511 |
[∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,2,2,2,2,2,7,6,5,4,3,2,3,2,∞,∞,∞,2,2,2,2,2,2,2,2,2,2,2,2,∞,∞,∞,∞,...,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞,∞]
A chunk contains 256 validators * 16 epochs = 4096 values.
A chunk value is stored on 2 bytes (uint16).
==> A chunk takes 8192 bytes = 8KB
There is 4096 epochs / 16 epochs per chunk = 256 chunks per batch of 256 validators.
Storing all values fo a batch of 256 validators takes 256 * 8KB = 2MB
With 1_048_576 validators, we need 4096 * 2MB = 8GB
Storing both MIN and MAX spans for 1_048_576 validators takes 16GB.
Each chunk is stored snappy-compressed in the database.
If all validators attest ideally, a MIN SPAN chunk will contain only `2`s, and and MAX SPAN chunk will contain only `0`s.
This will compress very well, and will let us store a lot of data in a small amount of space.
*/
package slasher

View File

@@ -129,7 +129,7 @@ func (s *Service) processAttestations(
validAttestationsCount := len(validAttestations)
validInFutureAttestationsCount := len(validInFutureAttestations)
// Log useful infrormation
// Log useful information.
log.WithFields(logrus.Fields{
"currentSlot": currentSlot,
"currentEpoch": currentEpoch,
@@ -137,7 +137,9 @@ func (s *Service) processAttestations(
"numDeferredAtts": validInFutureAttestationsCount,
"numDroppedAtts": numDropped,
"attsQueueSize": queuedAttestationsCount,
}).Info("Processing queued attestations for slashing detection")
}).Info("Start processing queued attestations")
start := time.Now()
// Check for attestatinos slashings (double, sourrounding, surrounded votes).
slashings, err := s.checkSlashableAttestations(ctx, currentEpoch, validAttestations)
@@ -154,6 +156,13 @@ func (s *Service) processAttestations(
return nil
}
end := time.Since(start)
log.WithField("elapsed", end).Info("Done processing queued attestations")
if len(slashings) > 0 {
log.WithField("numSlashings", len(slashings)).Warn("Slashable attestation offenses found")
}
return processedAttesterSlashings
}

View File

@@ -58,7 +58,7 @@ type Service struct {
attsSlotTicker *slots.SlotTicker
blocksSlotTicker *slots.SlotTicker
pruningSlotTicker *slots.SlotTicker
latestEpochWrittenForValidator map[primitives.ValidatorIndex]primitives.Epoch
latestEpochUpdatedForValidator map[primitives.ValidatorIndex]primitives.Epoch
wg sync.WaitGroup
}
@@ -74,7 +74,7 @@ func New(ctx context.Context, srvCfg *ServiceConfig) (*Service, error) {
blksQueue: newBlocksQueue(),
ctx: ctx,
cancel: cancel,
latestEpochWrittenForValidator: make(map[primitives.ValidatorIndex]primitives.Epoch),
latestEpochUpdatedForValidator: make(map[primitives.ValidatorIndex]primitives.Epoch),
}, nil
}
@@ -111,7 +111,7 @@ func (s *Service) run() {
return
}
for _, item := range epochsByValidator {
s.latestEpochWrittenForValidator[item.ValidatorIndex] = item.Epoch
s.latestEpochUpdatedForValidator[item.ValidatorIndex] = item.Epoch
}
log.WithField("elapsed", time.Since(start)).Info(
"Finished retrieving last epoch written per validator",
@@ -162,7 +162,7 @@ func (s *Service) Stop() error {
defer innerCancel()
log.Info("Flushing last epoch written for each validator to disk, please wait")
if err := s.serviceCfg.Database.SaveLastEpochsWrittenForValidators(
ctx, s.latestEpochWrittenForValidator,
ctx, s.latestEpochUpdatedForValidator,
); err != nil {
log.Error(err)
}

View File

@@ -185,13 +185,14 @@ func (f *FieldTrie) CopyTrie() *FieldTrie {
copy(dstFieldTrie[i], layer)
}
return &FieldTrie{
fieldLayers: dstFieldTrie,
field: f.field,
dataType: f.dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: f.length,
numOfElems: f.numOfElems,
fieldLayers: dstFieldTrie,
field: f.field,
dataType: f.dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: f.length,
numOfElems: f.numOfElems,
isTransferred: f.isTransferred,
}
}

View File

@@ -58,9 +58,9 @@ func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, er
}
if features.Get().EnableExperimentalState {
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.validatorsVal(), currentEpoch)
return stateutil.UnrealizedCheckpointBalances(cp, pp, stateutil.NewValMultiValueSliceReader(b.validatorsMultiValue, b), currentEpoch)
} else {
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.validators, currentEpoch)
return stateutil.UnrealizedCheckpointBalances(cp, pp, stateutil.NewValSliceReader(b.validators), currentEpoch)
}
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -198,9 +199,14 @@ func (b *BeaconState) addDirtyIndices(index types.FieldIndex, indices []uint64)
return
}
totalIndicesLen := len(b.dirtyIndices[index]) + len(indices)
// Reduce duplicates to verify that these are indeed unique.
if totalIndicesLen > indicesLimit {
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
totalIndicesLen = len(b.dirtyIndices[index]) + len(indices)
}
if totalIndicesLen > indicesLimit {
b.rebuildTrie[index] = true
b.dirtyIndices[index] = []uint64{}
b.dirtyIndices[index] = make([]uint64, 0, indicesLimit)
} else {
b.dirtyIndices[index] = append(b.dirtyIndices[index], indices...)
}

View File

@@ -416,6 +416,26 @@ func TestCopyAllTries(t *testing.T) {
assert.NotEqual(t, rt, newRt)
}
func TestDuplicateDirtyIndices(t *testing.T) {
newState := &BeaconState{
rebuildTrie: make(map[types.FieldIndex]bool),
dirtyIndices: make(map[types.FieldIndex][]uint64),
}
for i := uint64(0); i < indicesLimit-5; i++ {
newState.dirtyIndices[types.Balances] = append(newState.dirtyIndices[types.Balances], i)
}
// Append duplicates
newState.dirtyIndices[types.Balances] = append(newState.dirtyIndices[types.Balances], []uint64{0, 1, 2, 3, 4}...)
// We would remove the duplicates and stay under the threshold
newState.addDirtyIndices(types.Balances, []uint64{9997, 9998})
assert.Equal(t, false, newState.rebuildTrie[types.Balances])
// We would trigger above the threshold.
newState.addDirtyIndices(types.Balances, []uint64{10000, 10001, 10002, 10003})
assert.Equal(t, true, newState.rebuildTrie[types.Balances])
}
func generateState(t *testing.T) state.BeaconState {
count := uint64(100)
vals := make([]*ethpb.Validator, 0, count)

View File

@@ -1158,8 +1158,16 @@ func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interf
}
if fTrie.FieldReference().Refs() > 1 {
var newTrie *fieldtrie.FieldTrie
// We choose to only copy the validator
// trie as it is pretty expensive to regenerate
// in the event of late blocks.
if index == types.Validators {
newTrie = fTrie.CopyTrie()
} else {
newTrie = fTrie.TransferTrie()
}
fTrie.FieldReference().MinusRef()
newTrie := fTrie.TransferTrie()
b.stateFieldLeaves[index] = newTrie
fTrie = newTrie
}

View File

@@ -17,6 +17,7 @@ go_library(
"trie_helpers.go",
"unrealized_justification.go",
"validator_map_handler.go",
"validator_reader.go",
"validator_root.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stateutil",
@@ -26,6 +27,7 @@ go_library(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/multi-value-slice:go_default_library",
"//container/trie:go_default_library",
"//crypto/hash:go_default_library",
"//crypto/hash/htr:go_default_library",
@@ -56,6 +58,7 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/multi-value-slice:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",

View File

@@ -5,7 +5,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/math"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
// UnrealizedCheckpointBalances returns the total current active balance, the
@@ -13,17 +12,21 @@ import (
// current epoch correctly attested for target balance. It takes the current and
// previous epoch participation bits as parameters so implicitly only works for
// beacon states post-Altair.
func UnrealizedCheckpointBalances(cp, pp []byte, validators []*ethpb.Validator, currentEpoch primitives.Epoch) (uint64, uint64, uint64, error) {
func UnrealizedCheckpointBalances(cp, pp []byte, validators ValReader, currentEpoch primitives.Epoch) (uint64, uint64, uint64, error) {
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
activeBalance := uint64(0)
currentTarget := uint64(0)
prevTarget := uint64(0)
if len(cp) < len(validators) || len(pp) < len(validators) {
if len(cp) < validators.Len() || len(pp) < validators.Len() {
return 0, 0, 0, errors.New("participation does not match validator set")
}
var err error
for i, v := range validators {
valLength := validators.Len()
for i := 0; i < valLength; i++ {
v, err := validators.At(i)
if err != nil {
return 0, 0, 0, err
}
active := v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch
if active && !v.Slashed {
activeBalance, err = math.Add64(activeBalance, v.EffectiveBalance)

View File

@@ -4,6 +4,7 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/v5/config/params"
multi_value_slice "github.com/prysmaticlabs/prysm/v5/container/multi-value-slice"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
@@ -25,7 +26,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
pp := make([]byte, len(validators))
t.Run("No one voted last two epochs", func(tt *testing.T) {
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 0)
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 0)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
@@ -35,7 +36,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
t.Run("bad votes in last two epochs", func(tt *testing.T) {
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00})
copy(pp, []byte{0x00, 0x00, 0x00, 0x00})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
@@ -45,7 +46,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
t.Run("two votes in last epoch", func(tt *testing.T) {
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
copy(pp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag)})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, current)
@@ -55,7 +56,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
t.Run("two votes in previous epoch", func(tt *testing.T) {
copy(cp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0x00})
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
@@ -66,7 +67,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
validators[0].EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().MinDepositAmount
copy(cp, []byte{0xFF, 0xFF, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0})
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 0xFF, 0xFF})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MinDepositAmount
require.Equal(tt, expectedActive, active)
@@ -76,7 +77,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
t.Run("slash a validator", func(tt *testing.T) {
validators[1].Slashed = true
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
require.Equal(tt, expectedActive, active)
@@ -85,7 +86,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
})
t.Run("Exit a validator", func(tt *testing.T) {
validators[4].ExitEpoch = 1
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 2)
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 2)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
require.Equal(tt, expectedActive, active)
@@ -93,3 +94,105 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance, previous)
})
}
func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
targetFlag := params.BeaconConfig().TimelyTargetFlagIndex
expectedActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
mv := &multi_value_slice.Slice[*ethpb.Validator]{}
mv.Init(validators)
cp := make([]byte, len(validators))
pp := make([]byte, len(validators))
t.Run("No one voted last two epochs", func(tt *testing.T) {
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 0)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, uint64(0), previous)
})
t.Run("bad votes in last two epochs", func(tt *testing.T) {
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00})
copy(pp, []byte{0x00, 0x00, 0x00, 0x00})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, uint64(0), previous)
})
t.Run("two votes in last epoch", func(tt *testing.T) {
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
copy(pp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag)})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(tt, uint64(0), previous)
})
t.Run("two votes in previous epoch", func(tt *testing.T) {
copy(cp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0x00})
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
t.Run("votes in both epochs, decreased balance in first validator", func(tt *testing.T) {
validators[0].EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().MinDepositAmount
copy(cp, []byte{0xFF, 0xFF, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0})
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 0xFF, 0xFF})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MinDepositAmount
require.Equal(tt, expectedActive, active)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
t.Run("slash a validator", func(tt *testing.T) {
validators[1].Slashed = true
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
require.Equal(tt, expectedActive, active)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
t.Run("Exit a validator", func(tt *testing.T) {
validators[4].ExitEpoch = 1
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 2)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
require.Equal(tt, expectedActive, active)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance, previous)
})
}
type testObject struct {
id uint64
}
func (o *testObject) Id() uint64 {
return o.id
}
func (o *testObject) SetId(id uint64) {
o.id = id
}

View File

@@ -0,0 +1,59 @@
package stateutil
import (
multi_value_slice "github.com/prysmaticlabs/prysm/v5/container/multi-value-slice"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
// ValReader specifies an interface through which we can access the validator registry.
type ValReader interface {
Len() int
At(i int) (*ethpb.Validator, error)
}
// ValSliceReader describes a struct that conforms to the ValReader interface
type ValSliceReader struct {
Validators []*ethpb.Validator
}
// NewValSliceReader constructs a ValSliceReader object.
func NewValSliceReader(vals []*ethpb.Validator) ValSliceReader {
return ValSliceReader{Validators: vals}
}
// Len is the length of the validator registry.
func (v ValSliceReader) Len() int {
return len(v.Validators)
}
// At returns the validator at the provided index.
func (v ValSliceReader) At(i int) (*ethpb.Validator, error) {
return v.Validators[i], nil
}
// ValMultiValueSliceReader describes a struct that conforms to the ValReader interface.
// This struct is specifically designed for accessing validator data from a
// multivalue slice.
type ValMultiValueSliceReader struct {
ValMVSlice *multi_value_slice.Slice[*ethpb.Validator]
Identifier multi_value_slice.Identifiable
}
// NewValMultiValueSliceReader constructs a new val reader object.
func NewValMultiValueSliceReader(valSlice *multi_value_slice.Slice[*ethpb.Validator],
identifier multi_value_slice.Identifiable) ValMultiValueSliceReader {
return ValMultiValueSliceReader{
ValMVSlice: valSlice,
Identifier: identifier,
}
}
// Len is the length of the validator registry.
func (v ValMultiValueSliceReader) Len() int {
return v.ValMVSlice.Len(v.Identifier)
}
// At returns the validator at the provided index.
func (v ValMultiValueSliceReader) At(i int) (*ethpb.Validator, error) {
return v.ValMVSlice.At(v.Identifier, uint64(i))
}

View File

@@ -6,6 +6,7 @@ go_library(
"batch.go",
"batcher.go",
"blobs.go",
"log.go",
"metrics.go",
"pool.go",
"service.go",

View File

@@ -12,7 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
)
// ErrChainBroken indicates a backfill batch can't be imported to the db because it is not known to be the ancestor
@@ -73,9 +73,9 @@ type batch struct {
bs *blobSync
}
func (b batch) logFields() log.Fields {
func (b batch) logFields() logrus.Fields {
return map[string]interface{}{
"batch_id": b.id(),
"batchId": b.id(),
"state": b.state.String(),
"scheduled": b.scheduled.String(),
"seq": b.seq,
@@ -139,7 +139,7 @@ func (b batch) withResults(results verifiedROBlocks, bs *blobSync) batch {
func (b batch) postBlobSync() batch {
if b.blobsNeeded() > 0 {
log.WithFields(b.logFields()).WithField("blobs_missing", b.blobsNeeded()).Error("batch still missing blobs after downloading from peer")
log.WithFields(b.logFields()).WithField("blobsMissing", b.blobsNeeded()).Error("Batch still missing blobs after downloading from peer")
b.bs = nil
b.results = []blocks.ROBlock{}
return b.withState(batchErrRetryable)
@@ -152,15 +152,16 @@ func (b batch) withState(s batchState) batch {
b.scheduled = time.Now()
switch b.state {
case batchErrRetryable:
backfillBatchRetries.Inc()
b.retries += 1
log.WithFields(b.logFields()).Info("sequencing batch for retry")
log.WithFields(b.logFields()).Info("Sequencing batch for retry")
case batchInit, batchNil:
b.firstScheduled = b.scheduled
}
}
if s == batchImportComplete {
backfillBatchTimeRoundtrip.Observe(float64(time.Since(b.firstScheduled).Milliseconds()))
log.WithFields(b.logFields()).Debug("Backfill batch imported.")
log.WithFields(b.logFields()).Debug("Backfill batch imported")
}
b.state = s
b.seq += 1

View File

@@ -112,7 +112,7 @@ func (bbv *blobBatchVerifier) newVerifier(rb blocks.ROBlob) verification.BlobVer
return m[rb.Index]
}
func (bbv blobBatchVerifier) VerifiedROBlobs(_ context.Context, blk blocks.ROBlock, _ []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
func (bbv *blobBatchVerifier) VerifiedROBlobs(_ context.Context, blk blocks.ROBlock, _ []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
m, ok := bbv.verifiers[blk.Root()]
if !ok {
return nil, errors.Wrapf(verification.ErrMissingVerification, "no record of verifiers for root %#x", blk.Root())

View File

@@ -0,0 +1,5 @@
package backfill
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "backfill")

View File

@@ -57,6 +57,12 @@ var (
Help: "Number of BeaconBlock values downloaded from peers for backfill.",
},
)
backfillBatchRetries = promauto.NewCounter(
prometheus.CounterOpts{
Name: "backfill_batch_retries",
Help: "Number of times batches have failed with a retryable error.",
},
)
backfillBatchTimeRoundtrip = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "backfill_batch_time_roundtrip",

View File

@@ -14,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
log "github.com/sirupsen/logrus"
)
type batchWorkerPool interface {

View File

@@ -17,7 +17,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/proto/dbval"
"github.com/prysmaticlabs/prysm/v5/runtime"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
)
type Service struct {
@@ -149,12 +148,12 @@ func (s *Service) initVerifier(ctx context.Context) (*verifier, sync.ContextByte
}
keys, err := cps.PublicKeys()
if err != nil {
return nil, nil, errors.Wrap(err, "Unable to retrieve public keys for all validators in the origin state")
return nil, nil, errors.Wrap(err, "unable to retrieve public keys for all validators in the origin state")
}
vr := cps.GenesisValidatorsRoot()
ctxMap, err := sync.ContextByteVersionsForValRoot(bytesutil.ToBytes32(vr))
if err != nil {
return nil, nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root = %#x", vr)
return nil, nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root %#x", vr)
}
v, err := newBackfillVerifier(vr, keys)
return v, ctxMap, err
@@ -164,10 +163,10 @@ func (s *Service) updateComplete() bool {
b, err := s.pool.complete()
if err != nil {
if errors.Is(err, errEndSequence) {
log.WithField("backfill_slot", b.begin).Info("Backfill is complete.")
log.WithField("backfillSlot", b.begin).Info("Backfill is complete")
return true
}
log.WithError(err).Error("Backfill service received unhandled error from worker pool.")
log.WithError(err).Error("Backfill service received unhandled error from worker pool")
return true
}
s.batchSeq.update(b)
@@ -187,11 +186,11 @@ func (s *Service) importBatches(ctx context.Context) {
for i := range importable {
ib := importable[i]
if len(ib.results) == 0 {
log.WithFields(ib.logFields()).Error("Batch with no results, skipping importer.")
log.WithFields(ib.logFields()).Error("Batch with no results, skipping importer")
}
_, err := s.batchImporter(ctx, current, ib, s.store)
if err != nil {
log.WithError(err).WithFields(ib.logFields()).Debug("Backfill batch failed to import.")
log.WithError(err).WithFields(ib.logFields()).Debug("Backfill batch failed to import")
s.downscore(ib)
s.batchSeq.update(ib.withState(batchErrRetryable))
// If a batch fails, the subsequent batches are no longer considered importable.
@@ -204,8 +203,8 @@ func (s *Service) importBatches(ctx context.Context) {
nt := s.batchSeq.numTodo()
log.WithField("imported", imported).WithField("importable", len(importable)).
WithField("batches_remaining", nt).
Info("Backfill batches processed.")
WithField("batchesRemaining", nt).
Info("Backfill batches processed")
backfillRemainingBatches.Set(float64(nt))
}
@@ -220,7 +219,7 @@ func (s *Service) scheduleTodos() {
// and then we'll have the parent_root expected by 90 to ensure it matches the root for 89,
// at which point we know we can process [80..90).
if errors.Is(err, errMaxBatches) {
log.Debug("Backfill batches waiting for descendent batch to complete.")
log.Debug("Backfill batches waiting for descendent batch to complete")
return
}
}
@@ -232,17 +231,17 @@ func (s *Service) scheduleTodos() {
// Start begins the runloop of backfill.Service in the current goroutine.
func (s *Service) Start() {
if !s.enabled {
log.Info("Backfill service not enabled.")
log.Info("Backfill service not enabled")
return
}
ctx, cancel := context.WithCancel(s.ctx)
defer func() {
log.Info("Backfill service is shutting down.")
log.Info("Backfill service is shutting down")
cancel()
}()
clock, err := s.cw.WaitForClock(ctx)
if err != nil {
log.WithError(err).Error("Backfill service failed to start while waiting for genesis data.")
log.WithError(err).Error("Backfill service failed to start while waiting for genesis data")
return
}
s.clock = clock
@@ -250,39 +249,39 @@ func (s *Service) Start() {
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
if err != nil {
log.WithError(err).Error("Could not initialize blob verifier in backfill service.")
log.WithError(err).Error("Could not initialize blob verifier in backfill service")
return
}
if s.store.isGenesisSync() {
log.Info("Backfill short-circuit; node synced from genesis.")
log.Info("Backfill short-circuit; node synced from genesis")
return
}
status := s.store.status()
// Exit early if there aren't going to be any batches to backfill.
if primitives.Slot(status.LowSlot) <= s.ms(s.clock.CurrentSlot()) {
log.WithField("minimum_required_slot", s.ms(s.clock.CurrentSlot())).
WithField("backfill_lowest_slot", status.LowSlot).
Info("Exiting backfill service; minimum block retention slot > lowest backfilled block.")
log.WithField("minimumRequiredSlot", s.ms(s.clock.CurrentSlot())).
WithField("backfillLowestSlot", status.LowSlot).
Info("Exiting backfill service; minimum block retention slot > lowest backfilled block")
return
}
s.verifier, s.ctxMap, err = s.initVerifier(ctx)
if err != nil {
log.WithError(err).Error("Unable to initialize backfill verifier.")
log.WithError(err).Error("Unable to initialize backfill verifier")
return
}
if s.initSyncWaiter != nil {
log.Info("Backfill service waiting for initial-sync to reach head before starting.")
log.Info("Backfill service waiting for initial-sync to reach head before starting")
if err := s.initSyncWaiter(); err != nil {
log.WithError(err).Error("Error waiting for init-sync to complete.")
log.WithError(err).Error("Error waiting for init-sync to complete")
return
}
}
s.pool.spawn(ctx, s.nWorkers, clock, s.pa, s.verifier, s.ctxMap, s.newBlobVerifier, s.blobStore)
s.batchSeq = newBatchSequencer(s.nWorkers, s.ms(s.clock.CurrentSlot()), primitives.Slot(status.LowSlot), primitives.Slot(s.batchSize))
if err = s.initBatches(); err != nil {
log.WithError(err).Error("Non-recoverable error in backfill service.")
log.WithError(err).Error("Non-recoverable error in backfill service")
return
}
@@ -296,7 +295,7 @@ func (s *Service) Start() {
s.importBatches(ctx)
batchesWaiting.Set(float64(s.batchSeq.countWithState(batchImportable)))
if err := s.batchSeq.moveMinimum(s.ms(s.clock.CurrentSlot())); err != nil {
log.WithError(err).Error("Non-recoverable error while adjusting backfill minimum slot.")
log.WithError(err).Error("Non-recoverable error while adjusting backfill minimum slot")
}
s.scheduleTodos()
}

View File

@@ -15,7 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/proto/dbval"
)
var errBatchDisconnected = errors.New("Highest block root in backfill batch doesn't match next parent_root")
var errBatchDisconnected = errors.New("highest block root in backfill batch doesn't match next parent_root")
// NewUpdater correctly initializes a StatusUpdater value with the required database value.
func NewUpdater(ctx context.Context, store BeaconDB) (*Store, error) {

View File

@@ -10,7 +10,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
log "github.com/sirupsen/logrus"
)
type workerId int
@@ -31,14 +30,14 @@ func (w *p2pWorker) run(ctx context.Context) {
for {
select {
case b := <-w.todo:
log.WithFields(b.logFields()).WithField("backfill_worker", w.id).Debug("Backfill worker received batch.")
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("Backfill worker received batch")
if b.state == batchBlobSync {
w.done <- w.handleBlobs(ctx, b)
} else {
w.done <- w.handleBlocks(ctx, b)
}
case <-ctx.Done():
log.WithField("backfill_worker", w.id).Info("Backfill worker exiting after context canceled.")
log.WithField("backfillWorker", w.id).Info("Backfill worker exiting after context canceled")
return
}
}
@@ -73,7 +72,7 @@ func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
bdl += vb[i].SizeSSZ()
}
backfillBlocksApproximateBytes.Add(float64(bdl))
log.WithFields(b.logFields()).WithField("dlbytes", bdl).Debug("backfill batch block bytes downloaded")
log.WithFields(b.logFields()).WithField("dlbytes", bdl).Debug("Backfill batch block bytes downloaded")
bs, err := newBlobSync(cs, vb, &blobSyncConfig{retentionStart: blobRetentionStart, nbv: w.nbv, store: w.bfs})
if err != nil {
return b.withRetryableError(err)
@@ -97,7 +96,7 @@ func (w *p2pWorker) handleBlobs(ctx context.Context, b batch) batch {
// All blobs are the same size, so we can compute 1 and use it for all in the batch.
sz := blobs[0].SizeSSZ() * len(blobs)
backfillBlobsApproximateBytes.Add(float64(sz))
log.WithFields(b.logFields()).WithField("dlbytes", sz).Debug("backfill batch blob bytes downloaded")
log.WithFields(b.logFields()).WithField("dlbytes", sz).Debug("Backfill batch blob bytes downloaded")
}
return b.postBlobSync()
}

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"api.go",
"file.go",
"log.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/checkpoint",
visibility = ["//visibility:public"],

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/api/client/beacon"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/config/params"
log "github.com/sirupsen/logrus"
)
// APIInitializer manages initializing the beacon node using checkpoint sync, retrieving the checkpoint state and root
@@ -31,7 +30,7 @@ func NewAPIInitializer(beaconNodeHost string) (*APIInitializer, error) {
func (dl *APIInitializer) Initialize(ctx context.Context, d db.Database) error {
origin, err := d.OriginCheckpointBlockRoot(ctx)
if err == nil && origin != params.BeaconConfig().ZeroHash {
log.Warnf("origin checkpoint root %#x found in db, ignoring checkpoint sync flags", origin)
log.Warnf("Origin checkpoint root %#x found in db, ignoring checkpoint sync flags", origin)
return nil
} else {
if !errors.Is(err, db.ErrNotFound) {

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/io/file"
log "github.com/sirupsen/logrus"
)
// Initializer describes a type that is able to obtain the checkpoint sync data (BeaconState and SignedBeaconBlock)
@@ -45,7 +44,7 @@ type FileInitializer struct {
func (fi *FileInitializer) Initialize(ctx context.Context, d db.Database) error {
origin, err := d.OriginCheckpointBlockRoot(ctx)
if err == nil && origin != params.BeaconConfig().ZeroHash {
log.Warnf("origin checkpoint root %#x found in db, ignoring checkpoint sync flags", origin)
log.Warnf("Origin checkpoint root %#x found in db, ignoring checkpoint sync flags", origin)
return nil
} else {
if !errors.Is(err, db.ErrNotFound) {

View File

@@ -0,0 +1,5 @@
package checkpoint
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "checkpoint-sync")

View File

@@ -45,6 +45,7 @@ go_library(
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",

View File

@@ -126,8 +126,9 @@ type fetchRequestResponse struct {
// newBlocksFetcher creates ready to use fetcher.
func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetcher {
blocksPerPeriod := flags.Get().BlockBatchLimit
allowedBlocksBurst := flags.Get().BlockBatchLimitBurstFactor * flags.Get().BlockBatchLimit
blockBatchLimit := maxBatchLimit()
blocksPerPeriod := blockBatchLimit
allowedBlocksBurst := flags.Get().BlockBatchLimitBurstFactor * blockBatchLimit
// Allow fetcher to go almost to the full burst capacity (less a single batch).
rateLimiter := leakybucket.NewCollector(
float64(blocksPerPeriod), int64(allowedBlocksBurst-blocksPerPeriod),
@@ -159,6 +160,27 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
}
}
// This specifies the block batch limit the initial sync fetcher will use. In the event the user has provided
// and excessive number, this is automatically lowered.
func maxBatchLimit() int {
currLimit := flags.Get().BlockBatchLimit
maxLimit := params.BeaconConfig().MaxRequestBlocks
if params.DenebEnabled() {
maxLimit = params.BeaconConfig().MaxRequestBlocksDeneb
}
castedMaxLimit, err := math.Int(maxLimit)
if err != nil {
// Should be impossible to hit this case.
log.WithError(err).Error("Unable to calculate the max batch limit")
return currLimit
}
if currLimit > castedMaxLimit {
log.Warnf("Specified batch size exceeds the block limit of the network, lowering from %d to %d", currLimit, maxLimit)
currLimit = castedMaxLimit
}
return currLimit
}
// start boots up the fetcher, which starts listening for incoming fetch requests.
func (f *blocksFetcher) start() error {
select {
@@ -414,8 +436,8 @@ func verifyAndPopulateBlobs(bwb []blocks2.BlockWithROBlobs, blobs []blocks.ROBlo
if err != nil {
if errors.Is(err, consensus_types.ErrUnsupportedField) {
log.
WithField("block_slot", block.Slot()).
WithField("retention_start", blobWindowStart).
WithField("blockSlot", block.Slot()).
WithField("retentionStart", blobWindowStart).
Warn("block with slot within blob retention period has version which does not support commitments")
continue
}

View File

@@ -3,6 +3,7 @@ package initialsync
import (
"context"
"fmt"
"math"
"math/rand"
"sort"
"sync"
@@ -1142,3 +1143,26 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
// We delete each entry we've seen, so if we see all expected commits, the map should be empty at the end.
require.Equal(t, 0, len(expectedCommits))
}
func TestBatchLimit(t *testing.T) {
params.SetupTestConfigCleanup(t)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = math.MaxUint64
params.OverrideBeaconConfig(testCfg)
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 640,
BlockBatchLimitBurstFactor: 10,
})
defer func() {
flags.Init(resetFlags)
}()
assert.Equal(t, 640, maxBatchLimit())
testCfg.DenebForkEpoch = 100000
params.OverrideBeaconConfig(testCfg)
assert.Equal(t, params.BeaconConfig().MaxRequestBlocksDeneb, uint64(maxBatchLimit()))
}

View File

@@ -5,22 +5,31 @@ package initialsync
import (
"context"
"fmt"
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/paulbellamy/ratecounter"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/async/abool"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
@@ -58,6 +67,7 @@ type Service struct {
clock *startup.Clock
verifierWaiter *verification.InitializerWaiter
newBlobVerifier verification.NewBlobVerifier
ctxMap sync.ContextByteVersions
}
// Option is a functional option for the initial-sync Service.
@@ -124,6 +134,13 @@ func (s *Service) Start() {
}
s.clock = clock
log.Info("Received state initialized event")
ctxMap, err := sync.ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
if err != nil {
log.WithField("genesisValidatorRoot", clock.GenesisValidatorsRoot()).
WithError(err).Error("unable to initialize context version map using genesis validator")
return
}
s.ctxMap = ctxMap
v, err := s.verifierWaiter.WaitForInitializer(s.ctx)
if err != nil {
@@ -162,7 +179,15 @@ func (s *Service) Start() {
s.markSynced()
return
}
s.waitForMinimumPeers()
peers, err := s.waitForMinimumPeers()
if err != nil {
log.WithError(err).Error("Error waiting for minimum number of peers")
return
}
if err := s.fetchOriginBlobs(peers); err != nil {
log.WithError(err).Error("Failed to fetch missing blobs for checkpoint origin")
return
}
if err := s.roundRobinSync(gt); err != nil {
if errors.Is(s.ctx.Err(), context.Canceled) {
return
@@ -215,7 +240,10 @@ func (s *Service) Resync() error {
defer func() { s.synced.Set() }() // Reset it at the end of the method.
genesis := time.Unix(int64(headState.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
s.waitForMinimumPeers()
_, err = s.waitForMinimumPeers()
if err != nil {
return err
}
if err = s.roundRobinSync(genesis); err != nil {
log = log.WithError(err)
}
@@ -223,16 +251,19 @@ func (s *Service) Resync() error {
return nil
}
func (s *Service) waitForMinimumPeers() {
func (s *Service) waitForMinimumPeers() ([]peer.ID, error) {
required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
for {
if s.ctx.Err() != nil {
return nil, s.ctx.Err()
}
cp := s.cfg.Chain.FinalizedCheckpt()
_, peers := s.cfg.P2P.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, cp.Epoch)
if len(peers) >= required {
break
return peers, nil
}
log.WithFields(logrus.Fields{
"suitable": len(peers),
@@ -247,3 +278,75 @@ func (s *Service) markSynced() {
s.synced.Set()
close(s.cfg.InitialSyncComplete)
}
func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
r, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
return nil
}
blk, err := s.cfg.DB.Block(s.ctx, r)
if err != nil {
log.WithField("root", r).Error("Block for checkpoint sync origin root not found in db")
return err
}
if blk.Version() < version.Deneb {
return nil
}
cmts, err := blk.Block().Body().BlobKzgCommitments()
if err != nil {
log.WithField("root", r).Error("Error reading commitments from checkpoint sync origin block")
return err
}
if len(cmts) == 0 {
return nil
}
rob, err := blocks.NewROBlockWithRoot(blk, r)
if err != nil {
return err
}
onDisk, err := s.cfg.BlobStorage.Indices(r)
if err != nil {
return errors.Wrapf(err, "error checking existing blobs for checkpoint sync bloc root %#x", r)
}
req := make(p2ptypes.BlobSidecarsByRootReq, 0, len(cmts))
for i := range cmts {
if onDisk[i] {
continue
}
req = append(req, &eth.BlobIdentifier{BlockRoot: r[:], Index: uint64(i)})
}
if len(req) == 0 {
log.WithField("nBlobs", len(cmts)).WithField("root", fmt.Sprintf("%#x", r)).Debug("All checkpoint block blobs are present")
return nil
}
shufflePeers(pids)
for i := range pids {
sidecars, err := sync.SendBlobSidecarByRoot(s.ctx, s.clock, s.cfg.P2P, pids[i], s.ctxMap, &req)
if err != nil {
continue
}
if len(sidecars) != len(req) {
continue
}
bv := newBlobBatchVerifier(s.newBlobVerifier)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
current := s.clock.CurrentSlot()
if err := avs.Persist(current, sidecars...); err != nil {
return err
}
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
log.WithField("root", fmt.Sprintf("%#x", r)).WithField("peerID", pids[i]).Warn("Blobs from peer for origin block were unusable")
continue
}
log.WithField("nBlobs", len(sidecars)).WithField("root", fmt.Sprintf("%#x", r)).Info("Successfully downloaded blobs for checkpoint sync block")
return nil
}
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
}
func shufflePeers(pids []peer.ID) {
rg := rand.NewGenerator()
rg.Shuffle(len(pids), func(i, j int) {
pids[i], pids[j] = pids[j], pids[i]
})
}

View File

@@ -99,7 +99,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
// Skip blocks that are already being processed.
if s.cfg.chain.BlockBeingSynced(blkRoot) {
log.WithField("BlockRoot", fmt.Sprintf("%#x", blkRoot)).Info("Skipping pending block already being processed")
log.WithField("blockRoot", fmt.Sprintf("%#x", blkRoot)).Info("Skipping pending block already being processed")
continue
}

View File

@@ -210,5 +210,5 @@ func (l *limiter) retrieveCollector(topic string) (*leakybucket.Collector, error
}
func (_ *limiter) topicLogger(topic string) *logrus.Entry {
return log.WithField("rate limiter", topic)
return log.WithField("rateLimiter", topic)
}

View File

@@ -128,7 +128,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
defer func() {
if r := recover(); r != nil {
log.WithField("error", r).
WithField("recovered_at", "registerRPC").
WithField("recoveredAt", "registerRPC").
WithField("stack", string(debug.Stack())).
Error("Panic occurred")
}

View File

@@ -106,7 +106,7 @@ func (s *Service) sendGoodByeMessage(ctx context.Context, code p2ptypes.RPCGoodb
}
defer closeStream(stream, log)
log := log.WithField("Reason", goodbyeMessage(code))
log := log.WithField("reason", goodbyeMessage(code))
log.WithField("peer", stream.Conn().RemotePeer()).Trace("Sending Goodbye message to peer")
// Wait up to the response timeout for the peer to receive the goodbye

Some files were not shown because too many files have changed in this diff Show More