mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
52 Commits
remote-key
...
22aa87019d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22aa87019d | ||
|
|
0db74365e0 | ||
|
|
6f90101364 | ||
|
|
49e1763ec2 | ||
|
|
c2527c82cd | ||
|
|
d4ea8fafd6 | ||
|
|
07d1d6bdf9 | ||
|
|
f938da99d9 | ||
|
|
9deec69cc7 | ||
|
|
2767f08f4d | ||
|
|
d46c620783 | ||
|
|
dd05e44ef3 | ||
|
|
9da36a5de6 | ||
|
|
7950a24926 | ||
|
|
ea51253be9 | ||
|
|
2ac30f5ce6 | ||
|
|
7418c00ad6 | ||
|
|
66342655fd | ||
|
|
18eca953c1 | ||
|
|
8191bb5711 | ||
|
|
d4613aee0c | ||
|
|
9fcc1a7a77 | ||
|
|
75dea214ac | ||
|
|
4374e709cb | ||
|
|
be300f80bd | ||
|
|
096cba5b2d | ||
|
|
d5127233e4 | ||
|
|
3d35cc20ec | ||
|
|
1e658530a7 | ||
|
|
b360794c9c | ||
|
|
0fc9ab925a | ||
|
|
dda5ee3334 | ||
|
|
14c67376c3 | ||
|
|
9c8b68a66d | ||
|
|
a3210157e2 | ||
|
|
1536d59e30 | ||
|
|
11e46a4560 | ||
|
|
5a2e51b894 | ||
|
|
d20ec4c7a1 | ||
|
|
7a70abbd15 | ||
|
|
a2b84c9320 | ||
|
|
edef17e41d | ||
|
|
85c5d31b5b | ||
|
|
fa056c2d21 | ||
|
|
61de11e2c4 | ||
|
|
2773bdef89 | ||
|
|
2a23dc7f4a | ||
|
|
f97622b054 | ||
|
|
08d0f42725 | ||
|
|
74c8a25354 | ||
|
|
a466c6db9c | ||
|
|
4da6c4291f |
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -34,4 +34,5 @@ Fixes #
|
||||
|
||||
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
|
||||
- [ ] I have included a uniquely named [changelog fragment file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
|
||||
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.
|
||||
- [ ] I have added a description with sufficient context for reviewers to understand this PR.
|
||||
- [ ] I have tested that my changes work as expected and I added a testing plan to the PR description (if applicable).
|
||||
|
||||
@@ -193,6 +193,7 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httpwriter:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
|
||||
85
CHANGELOG.md
85
CHANGELOG.md
@@ -4,6 +4,91 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.1.0](https://github.com/prysmaticlabs/prysm/compare/v7.0.0...v7.1.0) - 2025-12-10
|
||||
|
||||
This release includes several key features/fixes. If you are running v7.0.0 then you should update to v7.0.1 or later and remove the flag `--disable-last-epoch-targets`.
|
||||
|
||||
Release highlights:
|
||||
|
||||
- Backfill is now supported in Fulu. Backfill from checkpoint sync now supports data columns. Run with `--enable-backfill` when using checkpoint sync.
|
||||
- A new node configuration to custody enough data columns to reconstruct blobs. Use flag `--semi-supernode` to custody at least 50% of the data columns.
|
||||
- Critical fixes in attestation processing.
|
||||
|
||||
A post mortem doc with full details on the mainnet attestation processing issue from December 4th is expected in the coming days.
|
||||
|
||||
### Added
|
||||
|
||||
- add fulu support to light client processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15995)
|
||||
- Record data column gossip KZG batch verification latency in both the pooled worker and fallback paths so the `beacon_kzg_verification_data_column_batch_milliseconds` histogram reflects gossip traffic, annotated with `path` labels to distinguish the sources. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16018)
|
||||
- Implement Gloas state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15611)
|
||||
- Add initial configs for the state-diff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15903)
|
||||
- Add kv functions for the state-diff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15903)
|
||||
- Add supported version for fork versions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16030)
|
||||
- prometheus metric `gossip_attestation_verification_milliseconds` to track attestation gossip topic validation latency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15785)
|
||||
- Integrate state-diff into `State()`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16033)
|
||||
- Implement Gloas fork support in consensus-types/blocks with factory methods, getters, setters, and proto handling. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15618)
|
||||
- Integrate state-diff into `HasState()`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16045)
|
||||
- Added `--semi-supernode` flag to custody half of a super node's datacolumn requirements but allowing for reconstruction for blob retrieval. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16029)
|
||||
- Data column backfill. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
|
||||
- Backfill metrics for columns: backfill_data_column_sidecar_downloaded, backfill_data_column_sidecar_downloaded_bytes, backfill_batch_columns_download_ms, backfill_batch_columns_verify_ms. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
|
||||
- prometheus summary `gossip_data_column_sidecar_arrival_milliseconds` to track data column sidecar arrival latency since slot start. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16099)
|
||||
|
||||
### Changed
|
||||
|
||||
- Improve readability in slashing import and remove duplicated code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15957)
|
||||
- Use dependent root instead of target when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15996)
|
||||
- Changed `--subscribe-all-data-subnets` flag to `--supernode` and aliased `--subscribe-all-data-subnets` for existing users. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16012)
|
||||
- Use explicit slot component timing configs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15999)
|
||||
- Downgraded log level from INFO to DEBUG on PrepareBeaconProposer updated fee recipients. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15998)
|
||||
- Change the logging behaviour of Updated fee recipients to only log count of validators at Debug level and all validator indices at Trace level. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15998)
|
||||
- Stop emitting payload attribute events during late block handling when we are not proposing the next slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16026)
|
||||
- Initialize the `ExecutionRequests` field in gossip block map. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16047)
|
||||
- Avoid redundant WithHttpEndpoint when JWT is provided. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16032)
|
||||
- Removed dead slot parameter from blobCacheEntry.filter. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16021)
|
||||
- Added log prefix to the `genesis` package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- Added log prefix to the `params` package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- `WithGenesisValidatorsRoot`: Use camelCase for log field param. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- Move `Origin checkpoint found in db` from WARN to INFO, since it is the expected behaviour. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- backfill metrics that changed name and/or histogram buckets: backfill_batch_time_verify -> backfill_batch_verify_ms, backfill_batch_time_waiting -> backfill_batch_waiting_ms, backfill_batch_time_roundtrip -> backfill_batch_roundtrip_ms, backfill_blocks_bytes_downloaded -> backfill_blocks_downloaded_bytes, backfill_batch_time_verify -> backfill_batch_verify_ms, backfill_batch_blocks_time_download -> backfill_batch_blocks_download_ms, backfill_batch_blobs_time_download -> backfill_batch_blobs_download_ms, backfill_blobs_bytes_downloaded -> backfill_blocks_downloaded_bytes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
|
||||
- Move the "Not enough connected peers" (for a given subnet) from WARN to DEBUG. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16087)
|
||||
- `blobsDataFromStoredDataColumns`: Ask the use to use the `--supernode` flag and shorten the error mesage. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16097)
|
||||
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16094)
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove validator cross-client from end-to-end tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16025)
|
||||
- `NUMBER_OF_COLUMNS` configuration (not in the specification any more, replaced by a preset). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16073)
|
||||
- `MAX_CELLS_IN_EXTENDED_MATRIX` configuration (not in the specification any more). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16073)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Nil check for block if it doesn't exist in the DB in fetchOriginSidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16006)
|
||||
- Fix proposals progress bar count [#16020](https://github.com/OffchainLabs/prysm/pull/16020). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16020)
|
||||
- Move `BlockGossipReceived` event to the end of gossip validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16031)
|
||||
- Fix state diff repetitive anchor slot bug. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16037)
|
||||
- Check the JWT secret length is exactly 256 bits (32 bytes) as per Engine API specification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15939)
|
||||
- http_error_count now matches the other cases by listing the endpoint name rather than the actual URL requested. This improves metrics cardinality. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16055)
|
||||
- Fix array out of bounds in static analyzer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16058)
|
||||
- fixes E2E tests to be able to start from Electra genesis fork or future forks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16048)
|
||||
- Use head state to validate attestations for old blocks if they are compatible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16095)
|
||||
|
||||
## [v7.0.1](https://github.com/prysmaticlabs/prysm/compare/v7.0.0...v7.0.1) - 2025-12-08
|
||||
|
||||
This patch release contains 4 cherry-picked changes to address the mainnet attestation processing issue from 2025-12-04. Operators are encouraged to update to this release as soon as practical. As of this release, the feature flag `--disable-last-epoch-targets` has been deprecated and can be safely removed from your node configuration.
|
||||
|
||||
A post mortem doc with full details is expected to be published later this week.
|
||||
|
||||
### Changed
|
||||
|
||||
- Move the "Not enough connected peers" (for a given subnet) from WARN to DEBUG. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16087)
|
||||
- Use dependent root instead of target when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15996)
|
||||
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16094)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Use head state to validate attestations for old blocks if they are compatible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16095)
|
||||
|
||||
|
||||
## [v7.0.0](https://github.com/prysmaticlabs/prysm/compare/v6.1.4...v7.0.0) - 2025-11-10
|
||||
|
||||
This is our initial mainnet release for the Ethereum mainnet Fulu fork on December 3rd, 2025. All operators MUST update to v7.0.0 or later release prior to the fulu fork epoch `411392`. See the [Ethereum Foundation blog post](https://blog.ethereum.org/2025/11/06/fusaka-mainnet-announcement) for more information on Fulu.
|
||||
|
||||
@@ -323,14 +323,17 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var ok bool
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
if e == stateEpoch {
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
// At this point either we know we are proposing on a future slot or we need to still compute the
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
@@ -338,7 +341,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
if e > stateEpoch {
|
||||
if e > stateEpoch && !fuluAndNextEpoch {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
|
||||
@@ -1053,40 +1053,3 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
|
||||
require.Equal(t, vhs[0].String(), vh0)
|
||||
require.Equal(t, vhs[1].String(), vh1)
|
||||
}
|
||||
|
||||
func TestComputePayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
proposingSlot: slot,
|
||||
headRoot: [32]byte{},
|
||||
}
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -53,58 +54,53 @@ type fcuConfig struct {
|
||||
}
|
||||
|
||||
// sendFCU handles the logic to notify the engine of a forckhoice update
|
||||
// for the first time when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions when the
|
||||
// incoming block is late, preparing payload attributes in this case while it
|
||||
// only sends a message with empty attributes for early blocks.
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if !s.isNewHead(cfg.headRoot) {
|
||||
return nil
|
||||
// when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions .
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if cfg.postState.Version() < version.Fulu {
|
||||
// update the caches to compute the right proposer index
|
||||
// this function is called under a forkchoice lock which we need to release.
|
||||
s.ForkChoicer().Unlock()
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
s.ForkChoicer().Lock()
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
|
||||
// to the engine if needed
|
||||
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = slotCtx
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
|
||||
// The caller of this function should NOT have a lock in forkchoice store.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
_, err := s.notifyForkchoiceUpdate(ctx, args)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
headBlock: wsb,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
|
||||
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
headRoot: r,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
|
||||
@@ -22,10 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch {
|
||||
return nil
|
||||
}
|
||||
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -33,11 +30,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch)
|
||||
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
|
||||
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -46,14 +45,18 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
|
||||
// If the head state alone is enough, we can return it directly read only.
|
||||
if c.Epoch == headEpoch {
|
||||
if c.Epoch <= headEpoch {
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return st
|
||||
}
|
||||
// Otherwise we need to advance the head state to the start of the target epoch.
|
||||
// At this point we can only have c.Epoch > headEpoch.
|
||||
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
return nil
|
||||
}
|
||||
// Advance the head state to the start of the target epoch.
|
||||
// This point can only be reached if c.Root == headRoot and c.Epoch > headEpoch.
|
||||
slot, err := slots.EpochStart(c.Epoch)
|
||||
if err != nil {
|
||||
|
||||
@@ -170,17 +170,141 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Epoch_0(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
|
||||
// Create a fork 31 <-- 32 <--- 64
|
||||
// \---------33
|
||||
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
block: headBlock,
|
||||
slot: 64,
|
||||
state: s,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
|
||||
// Create a fork 30 <-- 31 <-- 32 <--- 64
|
||||
// \---------33
|
||||
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
|
||||
st, blk, err := prepareForkchoiceState(ctx, 30, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 31, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'U'},
|
||||
block: headBlock,
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -66,9 +66,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
@@ -105,14 +102,17 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
}
|
||||
s.sendFCU(cfg, fcuArgs)
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
if cfg.postState.Version() >= version.Fulu {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = ctx
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -295,18 +295,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
@@ -330,6 +322,7 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
|
||||
return nil
|
||||
}
|
||||
|
||||
// the caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -359,7 +352,9 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
s.ForkChoicer().RLock()
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
@@ -372,7 +367,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches.
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
@@ -634,9 +629,7 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
if uint64(len(expected)) > numberOfColumns {
|
||||
if len(expected) > fieldparams.NumberOfColumns {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
@@ -818,10 +811,9 @@ func (s *Service) areDataColumnsAvailable(
|
||||
|
||||
case <-ctx.Done():
|
||||
var missingIndices any = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
missingIndicesCount := len(missing)
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
if missingIndicesCount < fieldparams.NumberOfColumns {
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
@@ -915,8 +907,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we are in init sync
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
@@ -929,14 +919,32 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -966,6 +974,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
|
||||
@@ -42,14 +42,8 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
@@ -173,26 +167,19 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
// boundary in order to compute the right proposer indices after processing
|
||||
// state transition. This function is called on late blocks while still locked,
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
// state transition. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
|
||||
slot := cfg.postState.Slot()
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
log.WithError(err).Error("Could not update next slot state cache")
|
||||
return
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,20 +189,6 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// computePayloadAttributes modifies the passed FCU arguments to
|
||||
// contain the right payload attributes with the tracked proposer. It gets
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
|
||||
@@ -738,7 +738,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -788,7 +790,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -816,25 +820,9 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -866,7 +854,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1339,7 +1329,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1351,7 +1343,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1363,7 +1357,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1375,7 +1371,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1400,197 +1398,6 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
// INVALID from FCU, with LVH block 17. No head is viable. We check
|
||||
// that the node is optimistic and that we can actually import a block on top of
|
||||
// 17 and recover.
|
||||
func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
config.AltairForkEpoch = 1
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
driftGenesisTime(service, 18, 0)
|
||||
validHeadState, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 19, 0)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// import another block based on the last valid head state
|
||||
mockEngine = &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 20, 0)
|
||||
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
@@ -1642,7 +1449,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1662,8 +1471,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
@@ -1684,8 +1494,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1708,7 +1519,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1718,6 +1531,10 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isBlock18OptimisticAfterImport)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1768,7 +1585,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1835,7 +1654,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1856,8 +1677,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -1877,7 +1699,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1906,8 +1730,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1975,7 +1800,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -2000,7 +1827,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -2028,7 +1857,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -2072,7 +1903,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2089,7 +1919,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -2109,8 +1941,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -2130,7 +1963,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -2161,7 +1996,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -2282,7 +2119,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2348,7 +2187,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2495,7 +2336,8 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMissingDataColumnIndices(t *testing.T) {
|
||||
countPlusOne := params.BeaconConfig().NumberOfColumns + 1
|
||||
const countPlusOne = fieldparams.NumberOfColumns + 1
|
||||
|
||||
tooManyColumns := make(map[uint64]bool, countPlusOne)
|
||||
for i := range countPlusOne {
|
||||
tooManyColumns[uint64(i)] = true
|
||||
@@ -2630,7 +2472,10 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2731,7 +2576,9 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
@@ -2765,7 +2612,10 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -3261,7 +3111,9 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(cfg)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
|
||||
@@ -156,13 +156,15 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -117,7 +117,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -177,7 +179,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -39,8 +39,8 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
@@ -69,7 +69,7 @@ type SlashingReceiver interface {
|
||||
// 1. Validate block, apply state transition and update checkpoints
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block is blacklisted
|
||||
@@ -242,7 +242,7 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
@@ -332,7 +332,7 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
@@ -470,30 +471,35 @@ func (s *Service) removeStartupState() {
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
isSupernode := flags.Get().Supernode
|
||||
isSemiSupernode := flags.Get().SemiSupernode
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
// Compute the target custody group count based on current flag configuration.
|
||||
targetCustodyGroupCount := custodyRequirement
|
||||
|
||||
// Supernode: custody all groups (either currently set or previously enabled)
|
||||
if isSupernode {
|
||||
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
|
||||
if isSemiSupernode {
|
||||
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "minimum custody group count")
|
||||
}
|
||||
|
||||
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
@@ -510,12 +516,23 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
if isSupernode {
|
||||
log.WithFields(logrus.Fields{
|
||||
"current": actualCustodyGroupCount,
|
||||
"target": cfg.NumberOfCustodyGroups,
|
||||
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
|
||||
}
|
||||
|
||||
if wasSupernode && !isSupernode {
|
||||
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, actualCustodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
|
||||
@@ -603,7 +603,6 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
custodyRequirement = uint64(4)
|
||||
earliestStoredSlot = primitives.Slot(12)
|
||||
numberOfCustodyGroups = uint64(64)
|
||||
numberOfColumns = uint64(128)
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
@@ -611,7 +610,6 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
cfg.CustodyRequirement = custodyRequirement
|
||||
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
|
||||
cfg.NumberOfColumns = numberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := t.Context()
|
||||
@@ -642,7 +640,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -680,7 +678,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
// ----------
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -695,4 +693,121 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("Supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.Supernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should still be supernode
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.SemiSupernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start with semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Upgrade to full supernode
|
||||
gFlags.SemiSupernode = false
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should upgrade to full supernode
|
||||
upgradeSlot := slot + 2
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Mock a high custody requirement (simulating many validators)
|
||||
// We need to override the custody requirement calculation
|
||||
// For this test, we'll verify the logic by checking if custodyRequirement > 64
|
||||
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
|
||||
// This would require a different test setup with actual validators
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
// With low validator requirements (4), should use semi-supernode minimum (64)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -275,7 +275,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -305,7 +305,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityChecker) error {
|
||||
if s.ReceiveBlockMockErr != nil {
|
||||
return s.ReceiveBlockMockErr
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -75,7 +77,12 @@ func ProcessAttestationNoVerifySignature(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance)
|
||||
beaconState, err = gloas.UpdatePendingPaymentWeight(beaconState, att, indices, participatedFlags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance, att)
|
||||
}
|
||||
|
||||
// SetParticipationAndRewardProposer retrieves and sets the epoch participation bits in state. Based on the epoch participation, it rewards
|
||||
@@ -105,7 +112,9 @@ func SetParticipationAndRewardProposer(
|
||||
beaconState state.BeaconState,
|
||||
targetEpoch primitives.Epoch,
|
||||
indices []uint64,
|
||||
participatedFlags map[uint8]bool, totalBalance uint64) (state.BeaconState, error) {
|
||||
participatedFlags map[uint8]bool,
|
||||
totalBalance uint64,
|
||||
att ethpb.Att) (state.BeaconState, error) {
|
||||
var proposerRewardNumerator uint64
|
||||
currentEpoch := time.CurrentEpoch(beaconState)
|
||||
var stateErr error
|
||||
@@ -299,6 +308,40 @@ func AttestationParticipationFlagIndices(beaconState state.ReadOnlyBeaconState,
|
||||
participatedFlags[targetFlagIndex] = true
|
||||
}
|
||||
matchedSrcTgtHead := matchedHead && matchedSrcTgt
|
||||
|
||||
// Spec v1.6.1 (pseudocode excerpt):
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
// if is_attestation_same_slot(state, data):
|
||||
// assert data.index == 0
|
||||
// payload_matches = True
|
||||
// else:
|
||||
// slot_index = data.slot % SLOTS_PER_HISTORICAL_ROOT
|
||||
// payload_index = state.execution_payload_availability[slot_index]
|
||||
// payload_matches = data.index == payload_index
|
||||
//
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// is_matching_head = is_matching_target and head_root_matches and payload_matches
|
||||
var matchingPayload bool
|
||||
if beaconState.Version() >= version.Gloas {
|
||||
ok, err := gloas.SameSlotAttestation(beaconState, [32]byte(data.BeaconBlockRoot), data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
if data.CommitteeIndex != 0 {
|
||||
return nil, fmt.Errorf("committee index %d for same slot attestation must be 0", data.CommitteeIndex)
|
||||
}
|
||||
matchingPayload = true
|
||||
} else {
|
||||
executionPayloadAvail, err := beaconState.ExecutionPayloadAvailability(data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matchingPayload = executionPayloadAvail == uint64(data.CommitteeIndex)
|
||||
}
|
||||
matchedSrcTgtHead = matchedSrcTgtHead && matchingPayload
|
||||
}
|
||||
if matchedSrcTgtHead && delay == cfg.MinAttestationInclusionDelay {
|
||||
participatedFlags[headFlagIndex] = true
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
@@ -556,7 +558,7 @@ func TestSetParticipationAndRewardProposer(t *testing.T) {
|
||||
|
||||
b, err := helpers.TotalActiveBalance(beaconState)
|
||||
require.NoError(t, err)
|
||||
st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b)
|
||||
st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b, ðpb.Attestation{})
|
||||
require.NoError(t, err)
|
||||
|
||||
i, err := helpers.BeaconProposerIndex(t.Context(), st)
|
||||
@@ -775,11 +777,67 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
|
||||
headFlagIndex: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gloas same-slot committee index non-zero errors",
|
||||
inputState: func() state.BeaconState {
|
||||
stateSlot := primitives.Slot(5)
|
||||
slot := primitives.Slot(3)
|
||||
targetRoot := bytes.Repeat([]byte{0xAA}, 32)
|
||||
headRoot := bytes.Repeat([]byte{0xBB}, 32)
|
||||
prevRoot := bytes.Repeat([]byte{0xCC}, 32)
|
||||
return buildGloasStateForFlags(t, stateSlot, slot, targetRoot, headRoot, prevRoot, 0, 0)
|
||||
}(),
|
||||
inputData: ðpb.AttestationData{
|
||||
Slot: 3,
|
||||
CommitteeIndex: 1, // invalid for same-slot
|
||||
BeaconBlockRoot: bytes.Repeat([]byte{0xBB}, 32),
|
||||
Source: ðpb.Checkpoint{Root: bytes.Repeat([]byte{0xDD}, 32)},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytes.Repeat([]byte{0xAA}, 32),
|
||||
},
|
||||
},
|
||||
inputDelay: 1,
|
||||
participationIndices: nil,
|
||||
},
|
||||
{
|
||||
name: "gloas payload availability matches committee index",
|
||||
inputState: func() state.BeaconState {
|
||||
stateSlot := primitives.Slot(5)
|
||||
slot := primitives.Slot(3)
|
||||
targetRoot := bytes.Repeat([]byte{0xAA}, 32)
|
||||
headRoot := bytes.Repeat([]byte{0xBB}, 32)
|
||||
// Same prev root to make SameSlotAttestation false and use payload availability.
|
||||
return buildGloasStateForFlags(t, stateSlot, slot, targetRoot, headRoot, headRoot, 1, slot)
|
||||
}(),
|
||||
inputData: ðpb.AttestationData{
|
||||
Slot: 3,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytes.Repeat([]byte{0xBB}, 32),
|
||||
Source: ðpb.Checkpoint{Root: bytes.Repeat([]byte{0xDD}, 32)},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytes.Repeat([]byte{0xAA}, 32),
|
||||
},
|
||||
},
|
||||
inputDelay: 1,
|
||||
participationIndices: map[uint8]bool{
|
||||
sourceFlagIndex: true,
|
||||
targetFlagIndex: true,
|
||||
headFlagIndex: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
flagIndices, err := altair.AttestationParticipationFlagIndices(test.inputState, test.inputData, test.inputDelay)
|
||||
if test.participationIndices == nil {
|
||||
require.ErrorContains(t, "committee index", err)
|
||||
continue
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, test.participationIndices, flagIndices)
|
||||
if !reflect.DeepEqual(test.participationIndices, flagIndices) {
|
||||
t.Fatalf("unexpected participation indices: got %v want %v", flagIndices, test.participationIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -858,3 +916,61 @@ func TestMatchingStatus(t *testing.T) {
|
||||
require.Equal(t, test.matchedHead, head)
|
||||
}
|
||||
}
|
||||
|
||||
func buildGloasStateForFlags(t *testing.T, stateSlot, slot primitives.Slot, targetRoot, headRoot, prevRoot []byte, availabilityBit uint8, availabilitySlot primitives.Slot) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
blockRoots[0] = targetRoot
|
||||
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = headRoot
|
||||
blockRoots[(slot-1)%cfg.SlotsPerHistoricalRoot] = prevRoot
|
||||
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
|
||||
execPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
|
||||
idx := availabilitySlot % cfg.SlotsPerHistoricalRoot
|
||||
byteIndex := idx / 8
|
||||
bitIndex := idx % 8
|
||||
if availabilityBit == 1 {
|
||||
execPayloadAvailability[byteIndex] |= 1 << bitIndex
|
||||
}
|
||||
|
||||
checkpointRoot := bytes.Repeat([]byte{0xDD}, fieldparams.RootLength)
|
||||
justified := ðpb.Checkpoint{Root: checkpointRoot}
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: stateSlot,
|
||||
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, fieldparams.RootLength),
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
ExecutionPayloadAvailability: execPayloadAvailability,
|
||||
CurrentJustifiedCheckpoint: justified,
|
||||
PreviousJustifiedCheckpoint: justified,
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: cfg.MinActivationBalance,
|
||||
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x01}, 31)...),
|
||||
},
|
||||
},
|
||||
Balances: []uint64{cfg.MinActivationBalance},
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
return beaconState
|
||||
}
|
||||
|
||||
@@ -111,7 +111,16 @@ func VerifyAttestationNoVerifySignature(
|
||||
var indexedAtt ethpb.IndexedAtt
|
||||
|
||||
if att.Version() >= version.Electra {
|
||||
if att.GetData().CommitteeIndex != 0 {
|
||||
// Spec v1.6.1 (pseudocode excerpt):
|
||||
//
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// assert data.index < 2
|
||||
//
|
||||
if beaconState.Version() >= version.Gloas {
|
||||
if att.GetData().CommitteeIndex >= 2 {
|
||||
return fmt.Errorf("incorrect committee index %d", att.GetData().CommitteeIndex)
|
||||
}
|
||||
} else if att.GetData().CommitteeIndex != 0 {
|
||||
return errors.New("committee index must be 0 post-Electra")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
@@ -314,6 +315,75 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyAttestationNoVerifySignature_GloasCommitteeIndexLimit(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
stateSlot := cfg.MinAttestationInclusionDelay + 1
|
||||
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
|
||||
checkpointRoot := bytes.Repeat([]byte{0xAA}, fieldparams.RootLength)
|
||||
justified := ðpb.Checkpoint{Epoch: 0, Root: checkpointRoot}
|
||||
|
||||
gloasStateProto := ðpb.BeaconStateGloas{
|
||||
Slot: stateSlot,
|
||||
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, fieldparams.RootLength),
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
ExecutionPayloadAvailability: make([]byte, cfg.SlotsPerHistoricalRoot/8),
|
||||
CurrentJustifiedCheckpoint: justified,
|
||||
PreviousJustifiedCheckpoint: justified,
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: cfg.MinActivationBalance,
|
||||
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x01}, 31)...),
|
||||
},
|
||||
},
|
||||
Balances: []uint64{cfg.MinActivationBalance},
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoGloas(gloasStateProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(0, true)
|
||||
aggBits := bitfield.NewBitlist(1)
|
||||
aggBits.SetBitAt(0, true)
|
||||
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 0,
|
||||
CommitteeIndex: 2, // invalid for Gloas (must be <2)
|
||||
BeaconBlockRoot: blockRoots[0],
|
||||
Source: justified,
|
||||
Target: justified,
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
Signature: bytes.Repeat([]byte{0x00}, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
|
||||
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.ErrorContains(t, "incorrect committee index 2", err)
|
||||
}
|
||||
|
||||
func TestConvertToIndexed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
@@ -60,7 +60,7 @@ func Eth1DataHasEnoughSupport(beaconState state.ReadOnlyBeaconState, data *ethpb
|
||||
voteCount := uint64(0)
|
||||
|
||||
for _, vote := range beaconState.Eth1DataVotes() {
|
||||
if AreEth1DataEqual(vote, data.Copy()) {
|
||||
if AreEth1DataEqual(vote, data) {
|
||||
voteCount++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,52 +290,3 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
|
||||
}
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: true,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 10
|
||||
block.Block.ProposerIndex = pID
|
||||
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
|
||||
block.Block.ParentRoot = latestBlockSignedRoot[:]
|
||||
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
validators[proposerIdx].Slashed = false
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
|
||||
}
|
||||
|
||||
@@ -122,24 +122,6 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureBatch(
|
||||
|
||||
@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if bal > 0 {
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
31
beacon-chain/core/gloas/BUILD.bazel
Normal file
31
beacon-chain/core/gloas/BUILD.bazel
Normal file
@@ -0,0 +1,31 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["attestation.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["attestation_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
144
beacon-chain/core/gloas/attestation.go
Normal file
144
beacon-chain/core/gloas/attestation.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// SameSlotAttestation checks if the attestation is for the same slot as the block root in the state.
|
||||
// Spec v1.6.1 (pseudocode excerpt):
|
||||
//
|
||||
// is_attestation_same_slot(state, data):
|
||||
// block_root_at_slot = get_block_root_at_slot(state, data.slot)
|
||||
// block_root_at_prev_slot = get_block_root_at_slot(state, data.slot - 1)
|
||||
// return block_root_at_slot == data.beacon_block_root and block_root_at_prev_slot != data.beacon_block_root
|
||||
func SameSlotAttestation(state state.ReadOnlyBeaconState, blockRoot [32]byte, slot primitives.Slot) (bool, error) {
|
||||
if slot == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
blockRootAtSlot, err := helpers.BlockRootAtSlot(state, slot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
matchingBlockRoot := bytes.Equal(blockRoot[:], blockRootAtSlot)
|
||||
|
||||
blockRootAtPrevSlot, err := helpers.BlockRootAtSlot(state, slot-1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
matchingPrevBlockRoot := bytes.Equal(blockRoot[:], blockRootAtPrevSlot)
|
||||
|
||||
return matchingBlockRoot && !matchingPrevBlockRoot, nil
|
||||
}
|
||||
|
||||
// UpdatePendingPaymentWeight updates the builder pending payment weight based on attestation participation.
|
||||
// Spec v1.6.1 (pseudocode excerpt):
|
||||
//
|
||||
// if data.target.epoch == get_current_epoch(state):
|
||||
// epoch_participation = state.current_epoch_participation
|
||||
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + data.slot % SLOTS_PER_EPOCH]
|
||||
// else:
|
||||
// epoch_participation = state.previous_epoch_participation
|
||||
// payment = state.builder_pending_payments[data.slot % SLOTS_PER_EPOCH]
|
||||
//
|
||||
// for index in get_attesting_indices(state, attestation):
|
||||
// will_set_new_flag = False
|
||||
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
|
||||
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
// proposer_reward_numerator += get_base_reward(state, index) * weight
|
||||
// will_set_new_flag = True
|
||||
// if will_set_new_flag and is_attestation_same_slot(state, data) and payment.withdrawal.amount > 0:
|
||||
// payment.weight += state.validators[index].effective_balance
|
||||
//
|
||||
// if current_epoch_target:
|
||||
// state.builder_pending_payments[SLOTS_PER_EPOCH + data.slot % SLOTS_PER_EPOCH] = payment
|
||||
// else:
|
||||
// state.builder_pending_payments[data.slot % SLOTS_PER_EPOCH] = payment
|
||||
func UpdatePendingPaymentWeight(beaconState state.BeaconState, att ethpb.Att, indices []uint64, participatedFlags map[uint8]bool) (state.BeaconState, error) {
|
||||
if beaconState.Version() < version.Gloas {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
data := att.GetData()
|
||||
epoch := slots.ToEpoch(beaconState.Slot())
|
||||
|
||||
isSameSlot, err := SameSlotAttestation(beaconState, [32]byte(data.BeaconBlockRoot), data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !isSameSlot {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
var (
|
||||
paymentSlot primitives.Slot
|
||||
payment *ethpb.BuilderPendingPayment
|
||||
epochParticipation []byte
|
||||
)
|
||||
if data.Target.Epoch == epoch {
|
||||
paymentSlot = slotsPerEpoch + (data.Slot % slotsPerEpoch)
|
||||
payment, err = beaconState.BuilderPendingPayment(uint64(paymentSlot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epochParticipation, err = beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
paymentSlot = data.Slot % slotsPerEpoch
|
||||
payment, err = beaconState.BuilderPendingPayment(uint64(paymentSlot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epochParticipation, err = beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if payment.Withdrawal.Amount == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
flagIndices := []uint8{cfg.TimelySourceFlagIndex, cfg.TimelyTargetFlagIndex, cfg.TimelyHeadFlagIndex}
|
||||
hasNewFlag := func(idx uint64) bool {
|
||||
participation := epochParticipation[idx]
|
||||
for _, f := range flagIndices {
|
||||
if !participatedFlags[f] {
|
||||
continue
|
||||
}
|
||||
if participation&(1<<f) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
for _, idx := range indices {
|
||||
if !hasNewFlag(idx) {
|
||||
continue
|
||||
}
|
||||
validator, err := beaconState.ValidatorAtIndex(primitives.ValidatorIndex(idx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payment.Weight += primitives.Gwei(validator.EffectiveBalance)
|
||||
}
|
||||
|
||||
if err := beaconState.SetBuilderPendingPayment(uint64(paymentSlot), payment); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return beaconState, nil
|
||||
}
|
||||
288
beacon-chain/core/gloas/attestation_test.go
Normal file
288
beacon-chain/core/gloas/attestation_test.go
Normal file
@@ -0,0 +1,288 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
func buildStateWithBlockRoots(t *testing.T, stateSlot primitives.Slot, roots map[primitives.Slot][]byte) *state_native.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for slot, root := range roots {
|
||||
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = root
|
||||
}
|
||||
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = bytes.Repeat([]byte{0x11}, 32)
|
||||
}
|
||||
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = bytes.Repeat([]byte{0x22}, 32)
|
||||
}
|
||||
|
||||
execPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: stateSlot,
|
||||
GenesisValidatorsRoot: bytes.Repeat([]byte{0x33}, 32),
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x44}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x44}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
ExecutionPayloadAvailability: execPayloadAvailability,
|
||||
Validators: []*ethpb.Validator{},
|
||||
Balances: []uint64{},
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
|
||||
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
return state.(*state_native.BeaconState)
|
||||
}
|
||||
|
||||
func TestSameSlotAttestation(t *testing.T) {
|
||||
rootA := bytes.Repeat([]byte{0xAA}, 32)
|
||||
rootB := bytes.Repeat([]byte{0xBB}, 32)
|
||||
rootC := bytes.Repeat([]byte{0xCC}, 32)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
stateSlot primitives.Slot
|
||||
slot primitives.Slot
|
||||
blockRoot []byte
|
||||
roots map[primitives.Slot][]byte
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "slot zero always true",
|
||||
stateSlot: 1,
|
||||
slot: 0,
|
||||
blockRoot: rootA,
|
||||
roots: map[primitives.Slot][]byte{},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "matching current different previous",
|
||||
stateSlot: 6,
|
||||
slot: 4,
|
||||
blockRoot: rootA,
|
||||
roots: map[primitives.Slot][]byte{
|
||||
4: rootA,
|
||||
3: rootB,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "matching current same previous",
|
||||
stateSlot: 6,
|
||||
slot: 4,
|
||||
blockRoot: rootA,
|
||||
roots: map[primitives.Slot][]byte{
|
||||
4: rootA,
|
||||
3: rootA,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "non matching current",
|
||||
stateSlot: 6,
|
||||
slot: 4,
|
||||
blockRoot: rootC,
|
||||
roots: map[primitives.Slot][]byte{
|
||||
4: rootA,
|
||||
3: rootB,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
state := buildStateWithBlockRoots(t, tt.stateSlot, tt.roots)
|
||||
var rootArr [32]byte
|
||||
copy(rootArr[:], tt.blockRoot)
|
||||
|
||||
got, err := SameSlotAttestation(state, rootArr, tt.slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func buildStateForPaymentTest(t *testing.T, stateSlot primitives.Slot, paymentIdx int, amount primitives.Gwei, weight primitives.Gwei, roots map[primitives.Slot][]byte) *state_native.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for slot, root := range roots {
|
||||
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = root
|
||||
}
|
||||
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = bytes.Repeat([]byte{0x44}, 32)
|
||||
}
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = bytes.Repeat([]byte{0x55}, 32)
|
||||
}
|
||||
|
||||
validator := ðpb.Validator{
|
||||
PublicKey: bytes.Repeat([]byte{0x01}, 48),
|
||||
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x02}, 31)...),
|
||||
EffectiveBalance: cfg.MinActivationBalance,
|
||||
}
|
||||
|
||||
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
|
||||
for i := range payments {
|
||||
payments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
payments[paymentIdx] = ðpb.BuilderPendingPayment{
|
||||
Weight: weight,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: amount,
|
||||
},
|
||||
}
|
||||
|
||||
execPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: stateSlot,
|
||||
GenesisValidatorsRoot: bytes.Repeat([]byte{0x33}, 32),
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
ExecutionPayloadAvailability: execPayloadAvailability,
|
||||
Validators: []*ethpb.Validator{validator},
|
||||
Balances: []uint64{cfg.MinActivationBalance},
|
||||
CurrentEpochParticipation: []byte{0},
|
||||
PreviousEpochParticipation: []byte{0},
|
||||
BuilderPendingPayments: payments,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x66}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x66}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
return state.(*state_native.BeaconState)
|
||||
}
|
||||
|
||||
func TestUpdatePendingPaymentWeight(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
slotsPerEpoch := cfg.SlotsPerEpoch
|
||||
slot := primitives.Slot(4)
|
||||
stateSlot := slot + 1
|
||||
currentEpoch := slots.ToEpoch(stateSlot)
|
||||
|
||||
rootA := bytes.Repeat([]byte{0xAA}, 32)
|
||||
rootB := bytes.Repeat([]byte{0xBB}, 32)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
targetEpoch primitives.Epoch
|
||||
blockRoot []byte
|
||||
initialAmount primitives.Gwei
|
||||
initialWeight primitives.Gwei
|
||||
wantWeight primitives.Gwei
|
||||
}{
|
||||
{
|
||||
name: "same slot current epoch adds weight",
|
||||
targetEpoch: currentEpoch,
|
||||
blockRoot: rootA,
|
||||
initialAmount: 10,
|
||||
initialWeight: 0,
|
||||
wantWeight: primitives.Gwei(cfg.MinActivationBalance),
|
||||
},
|
||||
{
|
||||
name: "same slot zero amount no weight change",
|
||||
targetEpoch: currentEpoch,
|
||||
blockRoot: rootA,
|
||||
initialAmount: 0,
|
||||
initialWeight: 5,
|
||||
wantWeight: 5,
|
||||
},
|
||||
{
|
||||
name: "non matching block root no change",
|
||||
targetEpoch: currentEpoch,
|
||||
blockRoot: rootB,
|
||||
initialAmount: 10,
|
||||
initialWeight: 7,
|
||||
wantWeight: 7,
|
||||
},
|
||||
{
|
||||
name: "previous epoch target uses earlier slot",
|
||||
targetEpoch: currentEpoch - 1,
|
||||
blockRoot: rootA,
|
||||
initialAmount: 20,
|
||||
initialWeight: 0,
|
||||
wantWeight: primitives.Gwei(cfg.MinActivationBalance),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var paymentIdx int
|
||||
if tt.targetEpoch == currentEpoch {
|
||||
paymentIdx = int(slotsPerEpoch + (slot % slotsPerEpoch))
|
||||
} else {
|
||||
paymentIdx = int(slot % slotsPerEpoch)
|
||||
}
|
||||
state := buildStateForPaymentTest(t, stateSlot, paymentIdx, tt.initialAmount, tt.initialWeight, map[primitives.Slot][]byte{
|
||||
slot: tt.blockRoot,
|
||||
slot - 1: rootB,
|
||||
})
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: slot,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: tt.blockRoot,
|
||||
Source: ðpb.Checkpoint{},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: tt.targetEpoch,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
participatedFlags := map[uint8]bool{
|
||||
cfg.TimelySourceFlagIndex: true,
|
||||
cfg.TimelyTargetFlagIndex: true,
|
||||
cfg.TimelyHeadFlagIndex: true,
|
||||
}
|
||||
indices := []uint64{0}
|
||||
|
||||
gotState, err := UpdatePendingPaymentWeight(state, att, indices, participatedFlags)
|
||||
require.NoError(t, err)
|
||||
|
||||
payment, err := gotState.BuilderPendingPayment(uint64(paymentIdx))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.wantWeight, payment.Weight)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -152,7 +152,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
log.WithError(err).Error("Could not update committee cache")
|
||||
}
|
||||
|
||||
return indices, nil
|
||||
|
||||
@@ -45,12 +45,13 @@ go_test(
|
||||
"p2p_interface_test.go",
|
||||
"reconstruction_helpers_test.go",
|
||||
"reconstruction_test.go",
|
||||
"semi_supernode_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
"verification_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
@@ -96,8 +97,7 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
|
||||
numberOfColumns := uint64(fieldparams.NumberOfColumns)
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
@@ -112,8 +112,9 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
|
||||
@@ -30,7 +30,6 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.NumberOfColumns = 128
|
||||
config.NumberOfCustodyGroups = 64
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"maps"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -107,3 +108,102 @@ func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCac
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// ColumnIndices represents as a set of ColumnIndices. This could be the set of indices that a node is required to custody,
|
||||
// the set that a peer custodies, missing indices for a given block, indices that are present on disk, etc.
|
||||
type ColumnIndices map[uint64]struct{}
|
||||
|
||||
// Has returns true if the index is present in the ColumnIndices.
|
||||
func (ci ColumnIndices) Has(index uint64) bool {
|
||||
_, ok := ci[index]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Count returns the number of indices present in the ColumnIndices.
|
||||
func (ci ColumnIndices) Count() int {
|
||||
return len(ci)
|
||||
}
|
||||
|
||||
// Set sets the index in the ColumnIndices.
|
||||
func (ci ColumnIndices) Set(index uint64) {
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
|
||||
// Unset removes the index from the ColumnIndices.
|
||||
func (ci ColumnIndices) Unset(index uint64) {
|
||||
delete(ci, index)
|
||||
}
|
||||
|
||||
// Copy creates a copy of the ColumnIndices.
|
||||
func (ci ColumnIndices) Copy() ColumnIndices {
|
||||
newCi := make(ColumnIndices, len(ci))
|
||||
maps.Copy(newCi, ci)
|
||||
return newCi
|
||||
}
|
||||
|
||||
// Intersection returns a new ColumnIndices that contains only the indices that are present in both ColumnIndices.
|
||||
func (ci ColumnIndices) Intersection(other ColumnIndices) ColumnIndices {
|
||||
result := make(ColumnIndices)
|
||||
for index := range ci {
|
||||
if other.Has(index) {
|
||||
result.Set(index)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Merge mutates the receiver so that any index that is set in either of
|
||||
// the two ColumnIndices is set in the receiver after the function finishes.
|
||||
// It does not mutate the other ColumnIndices given as a function argument.
|
||||
func (ci ColumnIndices) Merge(other ColumnIndices) {
|
||||
for index := range other {
|
||||
ci.Set(index)
|
||||
}
|
||||
}
|
||||
|
||||
// ToMap converts a ColumnIndices into a map[uint64]struct{}.
|
||||
// In the future ColumnIndices may be changed to a bit map, so using
|
||||
// ToMap will ensure forwards-compatibility.
|
||||
func (ci ColumnIndices) ToMap() map[uint64]struct{} {
|
||||
return ci.Copy()
|
||||
}
|
||||
|
||||
// ToSlice converts a ColumnIndices into a slice of uint64 indices.
|
||||
func (ci ColumnIndices) ToSlice() []uint64 {
|
||||
indices := make([]uint64, 0, len(ci))
|
||||
for index := range ci {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
return indices
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromSlice creates a ColumnIndices from a slice of uint64.
|
||||
func NewColumnIndicesFromSlice(indices []uint64) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for _, index := range indices {
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromMap creates a ColumnIndices from a map[uint64]bool. This kind of map
|
||||
// is used in several places in peerdas code. Converting from this map type to ColumnIndices
|
||||
// will allow us to move ColumnIndices underlying type to a bitmap in the future and avoid
|
||||
// lots of loops for things like intersections/unions or copies.
|
||||
func NewColumnIndicesFromMap(indices map[uint64]bool) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for index, set := range indices {
|
||||
if !set {
|
||||
continue
|
||||
}
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndices creates an empty ColumnIndices.
|
||||
// In the future ColumnIndices may change from a reference type to a value type,
|
||||
// so using this constructor will ensure forwards-compatibility.
|
||||
func NewColumnIndices() ColumnIndices {
|
||||
return make(ColumnIndices)
|
||||
}
|
||||
|
||||
@@ -25,3 +25,10 @@ func TestInfo(t *testing.T) {
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewColumnIndicesFromMap(t *testing.T) {
|
||||
t.Run("nil map", func(t *testing.T) {
|
||||
ci := peerdas.NewColumnIndicesFromMap(nil)
|
||||
require.Equal(t, 0, ci.Count())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,10 +5,20 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
var (
|
||||
dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
cellsAndProofsFromStructuredComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cells_and_proofs_from_structured_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute cells and proofs from structured computation.",
|
||||
Buckets: []float64{10, 20, 30, 40, 50, 100, 200},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -33,8 +33,7 @@ func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCou
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// The sidecar index must be within the valid range.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if sidecar.Index >= numberOfColumns {
|
||||
if sidecar.Index >= fieldparams.NumberOfColumns {
|
||||
return ErrIndexTooLarge
|
||||
}
|
||||
|
||||
|
||||
@@ -281,8 +281,11 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
||||
}
|
||||
|
||||
func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.B) {
|
||||
const blobCount = 12
|
||||
numberOfColumns := int64(params.BeaconConfig().NumberOfColumns)
|
||||
const (
|
||||
blobCount = 12
|
||||
numberOfColumns = fieldparams.NumberOfColumns
|
||||
)
|
||||
|
||||
err := kzg.Start()
|
||||
require.NoError(b, err)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package peerdas
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -26,7 +27,40 @@ var (
|
||||
func MinimumColumnCountToReconstruct() uint64 {
|
||||
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If the number of columns is even, then we need total / 2 columns to reconstruct.
|
||||
return (params.BeaconConfig().NumberOfColumns + 1) / 2
|
||||
return (fieldparams.NumberOfColumns + 1) / 2
|
||||
}
|
||||
|
||||
// MinimumCustodyGroupCountToReconstruct returns the minimum number of custody groups needed to
|
||||
// custody enough data columns for reconstruction. This accounts for the relationship between
|
||||
// custody groups and columns, making it future-proof if these values change.
|
||||
// Returns an error if the configuration values are invalid (zero or would cause division by zero).
|
||||
func MinimumCustodyGroupCountToReconstruct() (uint64, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
// Validate configuration values
|
||||
if numberOfColumns == 0 {
|
||||
return 0, errors.New("NumberOfColumns cannot be zero")
|
||||
}
|
||||
if cfg.NumberOfCustodyGroups == 0 {
|
||||
return 0, errors.New("NumberOfCustodyGroups cannot be zero")
|
||||
}
|
||||
|
||||
minimumColumnCount := MinimumColumnCountToReconstruct()
|
||||
|
||||
// Calculate how many columns each custody group represents
|
||||
columnsPerGroup := numberOfColumns / cfg.NumberOfCustodyGroups
|
||||
|
||||
// If there are more groups than columns (columnsPerGroup = 0), this is an invalid configuration
|
||||
// for reconstruction purposes as we cannot determine a meaningful custody group count
|
||||
if columnsPerGroup == 0 {
|
||||
return 0, errors.Errorf("invalid configuration: NumberOfCustodyGroups (%d) exceeds NumberOfColumns (%d)",
|
||||
cfg.NumberOfCustodyGroups, numberOfColumns)
|
||||
}
|
||||
|
||||
// Use ceiling division to ensure we have enough groups to cover the minimum columns
|
||||
// ceiling(a/b) = (a + b - 1) / b
|
||||
return (minimumColumnCount + columnsPerGroup - 1) / columnsPerGroup, nil
|
||||
}
|
||||
|
||||
// recoverCellsForBlobs reconstructs cells for specified blobs from the given data column sidecars.
|
||||
@@ -253,7 +287,8 @@ func ReconstructBlobSidecars(block blocks.ROBlock, verifiedDataColumnSidecars []
|
||||
|
||||
// ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs.
|
||||
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
@@ -262,32 +297,42 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
return nil, nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, proofs)
|
||||
proofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = proofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
@@ -295,42 +340,55 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||
|
||||
for i, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, kzgProofs)
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = kzgProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
|
||||
@@ -17,41 +17,9 @@ import (
|
||||
)
|
||||
|
||||
func TestMinimumColumnsCountToReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
numberOfColumns uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "numberOfColumns=128",
|
||||
numberOfColumns: 128,
|
||||
expected: 64,
|
||||
},
|
||||
{
|
||||
name: "numberOfColumns=129",
|
||||
numberOfColumns: 129,
|
||||
expected: 65,
|
||||
},
|
||||
{
|
||||
name: "numberOfColumns=130",
|
||||
numberOfColumns: 130,
|
||||
expected: 65,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = tc.numberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the minimum number of columns needed to reconstruct.
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
const expected = uint64(64)
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
@@ -200,7 +168,6 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 3
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
roBlock, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
@@ -236,7 +203,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flatten proofs.
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
cellProofs := make([][]byte, 0, blobCount*fieldparams.NumberOfColumns)
|
||||
for _, proofs := range inputProofsPerBlob {
|
||||
for _, proof := range proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
@@ -428,13 +395,12 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("mismatched blob and proof counts", func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Create one blob but proofs for two blobs
|
||||
blobs := [][]byte{{}}
|
||||
|
||||
@@ -447,7 +413,6 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 2
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
137
beacon-chain/core/peerdas/semi_supernode_test.go
Normal file
137
beacon-chain/core/peerdas/semi_supernode_test.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
func TestSemiSupernodeCustody(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 128
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create a test node ID
|
||||
nodeID := enode.ID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32})
|
||||
|
||||
t.Run("semi-supernode custodies exactly 64 columns", func(t *testing.T) {
|
||||
// Semi-supernode uses 64 custody groups (half of 128)
|
||||
const semiSupernodeCustodyGroupCount = 64
|
||||
|
||||
// Get custody groups for semi-supernode
|
||||
custodyGroups, err := CustodyGroups(nodeID, semiSupernodeCustodyGroupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyGroups))
|
||||
|
||||
// Verify we get exactly 64 custody columns
|
||||
custodyColumns, err := CustodyColumns(custodyGroups)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyColumns))
|
||||
|
||||
// Verify the columns are valid (within 0-127 range)
|
||||
for columnIndex := range custodyColumns {
|
||||
if columnIndex >= numberOfColumns {
|
||||
t.Fatalf("Invalid column index %d, should be less than %d", columnIndex, numberOfColumns)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("64 columns is exactly the minimum for reconstruction", func(t *testing.T) {
|
||||
minimumCount := MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, uint64(64), minimumCount)
|
||||
})
|
||||
|
||||
t.Run("semi-supernode vs supernode custody", func(t *testing.T) {
|
||||
// Semi-supernode (64 custody groups)
|
||||
semiSupernodeGroups, err := CustodyGroups(nodeID, 64)
|
||||
require.NoError(t, err)
|
||||
semiSupernodeColumns, err := CustodyColumns(semiSupernodeGroups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Supernode (128 custody groups = all groups)
|
||||
supernodeGroups, err := CustodyGroups(nodeID, 128)
|
||||
require.NoError(t, err)
|
||||
supernodeColumns, err := CustodyColumns(supernodeGroups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify semi-supernode has exactly half the columns of supernode
|
||||
require.Equal(t, 64, len(semiSupernodeColumns))
|
||||
require.Equal(t, 128, len(supernodeColumns))
|
||||
require.Equal(t, len(supernodeColumns)/2, len(semiSupernodeColumns))
|
||||
|
||||
// Verify all semi-supernode columns are a subset of supernode columns
|
||||
for columnIndex := range semiSupernodeColumns {
|
||||
if !supernodeColumns[columnIndex] {
|
||||
t.Fatalf("Semi-supernode column %d not found in supernode columns", columnIndex)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMinimumCustodyGroupCountToReconstruct(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
numberOfGroups uint64
|
||||
expectedResult uint64
|
||||
}{
|
||||
{
|
||||
name: "Standard 1:1 ratio (128 columns, 128 groups)",
|
||||
numberOfGroups: 128,
|
||||
expectedResult: 64, // Need half of 128 groups
|
||||
},
|
||||
{
|
||||
name: "2 columns per group (128 columns, 64 groups)",
|
||||
numberOfGroups: 64,
|
||||
expectedResult: 32, // Need 64 columns, which is 32 groups (64/2)
|
||||
},
|
||||
{
|
||||
name: "4 columns per group (128 columns, 32 groups)",
|
||||
numberOfGroups: 32,
|
||||
expectedResult: 16, // Need 64 columns, which is 16 groups (64/4)
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = tt.numberOfGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
result, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinimumCustodyGroupCountToReconstruct_ErrorCases(t *testing.T) {
|
||||
t.Run("Returns error when NumberOfCustodyGroups is zero", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, err.Error() == "NumberOfCustodyGroups cannot be zero")
|
||||
})
|
||||
|
||||
t.Run("Returns error when NumberOfCustodyGroups exceeds NumberOfColumns", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 256
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
// Just check that we got an error about the configuration
|
||||
require.Equal(t, true, len(err.Error()) > 0)
|
||||
})
|
||||
}
|
||||
@@ -102,11 +102,13 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block and
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_column_sidecar
|
||||
func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
|
||||
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
|
||||
|
||||
if len(cellsPerBlob) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, params.BeaconConfig().NumberOfColumns)
|
||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
||||
}
|
||||
@@ -115,9 +117,8 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
|
||||
return nil, errors.Wrap(err, "extract block info")
|
||||
}
|
||||
|
||||
maxIdx := params.BeaconConfig().NumberOfColumns
|
||||
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
|
||||
for idx := range maxIdx {
|
||||
roSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
|
||||
for idx := range numberOfColumns {
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: idx,
|
||||
Column: cells[idx],
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -59,6 +59,8 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
@@ -69,10 +71,10 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, params.BeaconConfig().NumberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
}
|
||||
proofsPerBlob := [][]kzg.Proof{
|
||||
make([]kzg.Proof, params.BeaconConfig().NumberOfColumns),
|
||||
make([]kzg.Proof, numberOfColumns),
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
@@ -117,7 +119,6 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
}
|
||||
@@ -149,7 +150,6 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
@@ -197,6 +197,7 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconstructionSource(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
@@ -212,7 +213,6 @@ func TestReconstructionSource(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
|
||||
@@ -182,12 +182,6 @@ func ProcessBlockNoVerifyAnySig(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sig := signed.Signature()
|
||||
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -201,7 +195,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
|
||||
@@ -157,9 +157,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "block signature", set.Descriptions[0])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[1])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[2])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
}
|
||||
|
||||
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
|
||||
@@ -4,14 +4,19 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"bisect.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
"log.go",
|
||||
"mock.go",
|
||||
"needs.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -21,6 +26,7 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -30,11 +36,14 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
"needs_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -45,6 +54,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -11,9 +11,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -24,12 +23,13 @@ var (
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreBlob struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *blobCache
|
||||
verifier BlobBatchVerifier
|
||||
store *filesystem.BlobStorage
|
||||
cache *blobCache
|
||||
verifier BlobBatchVerifier
|
||||
shouldRetain RetentionChecker
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreBlob{}
|
||||
var _ AvailabilityChecker = &LazilyPersistentStoreBlob{}
|
||||
|
||||
// BlobBatchVerifier enables LazyAvailabilityStore to manage the verification process
|
||||
// going from ROBlob->VerifiedROBlob, while avoiding the decision of which individual verifications
|
||||
@@ -42,11 +42,12 @@ type BlobBatchVerifier interface {
|
||||
|
||||
// NewLazilyPersistentStore creates a new LazilyPersistentStore. This constructor should always be used
|
||||
// when creating a LazilyPersistentStore because it needs to initialize the cache under the hood.
|
||||
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier) *LazilyPersistentStoreBlob {
|
||||
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier, shouldRetain RetentionChecker) *LazilyPersistentStoreBlob {
|
||||
return &LazilyPersistentStoreBlob{
|
||||
store: store,
|
||||
cache: newBlobCache(),
|
||||
verifier: verifier,
|
||||
store: store,
|
||||
cache: newBlobCache(),
|
||||
verifier: verifier,
|
||||
shouldRetain: shouldRetain,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,9 +67,6 @@ func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ..
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromSidecar(sidecars[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for _, blobSidecar := range sidecars {
|
||||
@@ -81,8 +79,17 @@ func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ..
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
for _, b := range blks {
|
||||
if err := s.checkOne(ctx, current, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreBlob) checkOne(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, s.shouldRetain)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check data availability for block %#x", b.Root())
|
||||
}
|
||||
@@ -100,7 +107,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
sidecars, err := entry.filter(root, blockCommitments, b.Block().Slot())
|
||||
sidecars, err := entry.filter(root, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
@@ -112,7 +119,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
ok := errors.As(err, &me)
|
||||
if ok {
|
||||
fails := me.Failures()
|
||||
lf := make(log.Fields, len(fails))
|
||||
lf := make(logrus.Fields, len(fails))
|
||||
for i := range fails {
|
||||
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
|
||||
}
|
||||
@@ -131,13 +138,12 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
return nil
|
||||
}
|
||||
|
||||
func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) ([][]byte, error) {
|
||||
func commitmentsToCheck(b blocks.ROBlock, shouldRetain RetentionChecker) ([][]byte, error) {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
if !shouldRetain(b.Block().Slot()) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,10 @@ import (
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func testShouldRetainAlways(s primitives.Slot) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
@@ -30,11 +34,12 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
shouldRetain RetentionChecker
|
||||
}{
|
||||
{
|
||||
name: "pre deneb",
|
||||
@@ -60,6 +65,7 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
shouldRetain: testShouldRetainAlways,
|
||||
commits: func() [][]byte {
|
||||
mb := params.GetNetworkScheduleEntry(slots.ToEpoch(fulu + 100)).MaxBlobsPerBlock
|
||||
return commits[:mb]
|
||||
@@ -79,7 +85,8 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: fulu + windowSlots + 1,
|
||||
shouldRetain: func(s primitives.Slot) bool { return false },
|
||||
slot: fulu + windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "excessive commitments",
|
||||
@@ -97,14 +104,15 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.Equal(t, true, len(c) > params.BeaconConfig().MaxBlobsPerBlock(sb.Block().Slot()))
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
err: errIndexOutOfBounds,
|
||||
shouldRetain: testShouldRetainAlways,
|
||||
slot: windowSlots + 1,
|
||||
err: errIndexOutOfBounds,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := c.block(t)
|
||||
co, err := commitmentsToCheck(b, c.slot)
|
||||
co, err := commitmentsToCheck(b, c.shouldRetain)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
@@ -126,7 +134,7 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
as := NewLazilyPersistentStore(store, mbv, testShouldRetainAlways)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[2]))
|
||||
@@ -153,7 +161,7 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
as := NewLazilyPersistentStore(store, mbv, testShouldRetainAlways)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
@@ -166,11 +174,11 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 6)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{}, testShouldRetainAlways)
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), errDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
@@ -183,7 +191,7 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
require.NoError(t, as.Persist(slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars[1:]...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
244
beacon-chain/das/availability_columns.go
Normal file
244
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
cache *dataColumnCache
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custody *custodyRequirement
|
||||
bisector Bisector
|
||||
shouldRetain RetentionChecker
|
||||
}
|
||||
|
||||
var _ AvailabilityChecker = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
nodeID enode.ID,
|
||||
cgc uint64,
|
||||
bisector Bisector,
|
||||
shouldRetain RetentionChecker,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custody: &custodyRequirement{nodeID: nodeID, cgc: cgc},
|
||||
bisector: bisector,
|
||||
shouldRetain: shouldRetain,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(_ primitives.Slot, sidecars ...blocks.RODataColumn) error {
|
||||
for _, sidecar := range sidecars {
|
||||
if err := s.cache.stash(sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, _ primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
toVerify := make([]blocks.RODataColumn, 0)
|
||||
for _, block := range blks {
|
||||
indices, err := s.required(block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x`", block.Root())
|
||||
}
|
||||
if indices.Count() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := keyFromBlock(block)
|
||||
entry := s.cache.entry(key)
|
||||
toVerify, err = entry.append(toVerify, IndicesNotStored(s.store.Summary(block.Root()), indices))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.verifyAndSave(toVerify); err != nil {
|
||||
log.Warn("Batch verification failed, bisecting columns by peer")
|
||||
if err := s.bisectVerification(toVerify); err != nil {
|
||||
return errors.Wrap(err, "bisect verification")
|
||||
}
|
||||
}
|
||||
|
||||
s.cache.cleanup(blks)
|
||||
return nil
|
||||
}
|
||||
|
||||
// required returns the set of column indices to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) required(block blocks.ROBlock) (peerdas.ColumnIndices, error) {
|
||||
if !s.shouldRetain(block.Block().Slot()) {
|
||||
return peerdas.NewColumnIndices(), nil
|
||||
}
|
||||
|
||||
// If there are any commitments in the block, there are blobs,
|
||||
// and if there are blobs, we need the columns bisecting those blobs.
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
// No DA check needed if the block has no blobs.
|
||||
if len(commitments) == 0 {
|
||||
return peerdas.NewColumnIndices(), nil
|
||||
}
|
||||
|
||||
return s.custody.required()
|
||||
}
|
||||
|
||||
// verifyAndSave calls Save on the column store if the columns pass verification.
|
||||
func (s *LazilyPersistentStoreColumn) verifyAndSave(columns []blocks.RODataColumn) error {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
verified, err := s.verifyColumns(columns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verify columns")
|
||||
}
|
||||
if err := s.store.Save(verified); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreColumn) verifyColumns(columns []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
if len(columns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
verifier := s.newDataColumnsVerifier(columns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return nil, errors.Wrap(err, "valid fields")
|
||||
}
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
return verifier.VerifiedRODataColumns()
|
||||
}
|
||||
|
||||
// bisectVerification is used when verification of a batch of columns fails. Since the batch could
|
||||
// span multiple blocks or have been fetched from multiple peers, this pattern enables code using the
|
||||
// store to break the verification into smaller units and learn the results, in order to plan to retry
|
||||
// retrieval of the unusable columns.
|
||||
func (s *LazilyPersistentStoreColumn) bisectVerification(columns []blocks.RODataColumn) error {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.bisector == nil {
|
||||
return errors.New("bisector not initialized")
|
||||
}
|
||||
iter, err := s.bisector.Bisect(columns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Bisector.Bisect")
|
||||
}
|
||||
// It's up to the bisector how to chunk up columns for verification,
|
||||
// which could be by block, or by peer, or any other strategy.
|
||||
// For the purposes of range syncing or backfill this will be by peer,
|
||||
// so that the node can learn which peer is giving us bad data and downscore them.
|
||||
for columns, err := iter.Next(); columns != nil; columns, err = iter.Next() {
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return errors.Wrap(err, "Bisector.Next")
|
||||
}
|
||||
break // io.EOF signals end of iteration
|
||||
}
|
||||
// We save the parts of the batch that have been verified successfully even though we don't know
|
||||
// if all columns for the block will be available until the block is imported.
|
||||
if err := s.verifyAndSave(s.columnsNotStored(columns)); err != nil {
|
||||
iter.OnError(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// This should give us a single error representing any unresolved errors seen via onError.
|
||||
return iter.Error()
|
||||
}
|
||||
|
||||
// columnsNotStored filters the list of ROColumnSidecars to only include those that are not found in the storage summary.
|
||||
func (s *LazilyPersistentStoreColumn) columnsNotStored(sidecars []blocks.RODataColumn) []blocks.RODataColumn {
|
||||
// We use this method to filter a set of sidecars that were previously seen to be unavailable on disk. So our base assumption
|
||||
// is that they are still available and we don't need to copy the list. Instead we make a slice of any indices that are unexpectedly
|
||||
// stored and only when we find that the storage view has changed do we need to create a new slice.
|
||||
stored := make(map[int]struct{}, 0)
|
||||
lastRoot := [32]byte{}
|
||||
var sum filesystem.DataColumnStorageSummary
|
||||
for i, sc := range sidecars {
|
||||
if sc.BlockRoot() != lastRoot {
|
||||
sum = s.store.Summary(sc.BlockRoot())
|
||||
lastRoot = sc.BlockRoot()
|
||||
}
|
||||
if sum.HasIndex(sc.Index) {
|
||||
stored[i] = struct{}{}
|
||||
}
|
||||
}
|
||||
// If the view on storage hasn't changed, return the original list.
|
||||
if len(stored) == 0 {
|
||||
return sidecars
|
||||
}
|
||||
shift := 0
|
||||
for i := range sidecars {
|
||||
if _, ok := stored[i]; ok {
|
||||
// If the index is stored, skip and overwrite it.
|
||||
// Track how many spaces down to shift unseen sidecars (to overwrite the previously shifted or seen).
|
||||
shift++
|
||||
continue
|
||||
}
|
||||
if shift > 0 {
|
||||
// If the index is not stored and we have seen stored indices,
|
||||
// we need to shift the current index down.
|
||||
sidecars[i-shift] = sidecars[i]
|
||||
}
|
||||
}
|
||||
return sidecars[:len(sidecars)-shift]
|
||||
}
|
||||
|
||||
type custodyRequirement struct {
|
||||
nodeID enode.ID
|
||||
cgc uint64 // custody group count
|
||||
indices peerdas.ColumnIndices
|
||||
}
|
||||
|
||||
func (c *custodyRequirement) required() (peerdas.ColumnIndices, error) {
|
||||
peerInfo, _, err := peerdas.Info(c.nodeID, c.cgc)
|
||||
if err != nil {
|
||||
return peerdas.NewColumnIndices(), errors.Wrap(err, "peer info")
|
||||
}
|
||||
return peerdas.NewColumnIndicesFromMap(peerInfo.CustodyColumns), nil
|
||||
}
|
||||
908
beacon-chain/das/availability_columns_test.go
Normal file
908
beacon-chain/das/availability_columns_test.go
Normal file
@@ -0,0 +1,908 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func mockShouldRetain(current primitives.Epoch) RetentionChecker {
|
||||
return func(slot primitives.Slot) bool {
|
||||
return params.WithinDAPeriod(slots.ToEpoch(slot), current)
|
||||
}
|
||||
}
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, nil, enode.ID{}, 0, nil, mockShouldRetain(0))
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
var current primitives.Slot = 1_000_000
|
||||
sr := mockShouldRetain(slots.ToEpoch(current))
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, nil, enode.ID{}, 0, nil, sr)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(current, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(roSidecars), len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
store := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
avs := NewLazilyPersistentStoreColumn(store, nil, enode.ID{}, 0, nil, mockShouldRetain(slots.ToEpoch(slot)))
|
||||
|
||||
err := avs.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(avs.cache.entries))
|
||||
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := avs.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
summary := store.Summary(key.root)
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), summary.Count())
|
||||
require.Equal(t, len(roSidecars), len(entry.scs))
|
||||
|
||||
idx1 := entry.scs[1]
|
||||
require.NotNil(t, idx1)
|
||||
require.DeepSSZEqual(t, roDataColumns[0].BlockRoot(), idx1.BlockRoot())
|
||||
idx5 := entry.scs[5]
|
||||
require.NotNil(t, idx5)
|
||||
require.DeepSSZEqual(t, roDataColumns[1].BlockRoot(), idx5.BlockRoot())
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[uint64]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, newDataColumnsVerifier, enode.ID{}, 0, nil, mockShouldRetain(0))
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Slot = primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
indices := []uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
avs := NewLazilyPersistentStoreColumn(storage, newDataColumnsVerifier, enode.ID{}, uint64(len(indices)), nil, mockShouldRetain(slots.ToEpoch(slot)))
|
||||
dcparams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dcparams = append(dcparams, dataColumnParams)
|
||||
}
|
||||
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dcparams)
|
||||
|
||||
key := keyFromBlock(signedRoBlock)
|
||||
entry := avs.cache.entry(key)
|
||||
defer avs.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := storage.Get(root, indices)
|
||||
require.NoError(t, err)
|
||||
|
||||
//summary := storage.Summary(root)
|
||||
require.Equal(t, len(verifiedRoDataColumns), len(actual))
|
||||
//require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
//require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRetentionWindow(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
fuluSlot, err := slots.EpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
numberOfColumns := fieldparams.NumberOfColumns
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
wantedCols int
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: fuluSlot + windowSlots,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = fuluSlot + windowSlots - 1
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: fuluSlot + windowSlots,
|
||||
wantedCols: numberOfColumns,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, nil, enode.ID{}, uint64(numberOfColumns), nil, mockShouldRetain(slots.ToEpoch(tc.slot)))
|
||||
|
||||
indices, err := s.required(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tc.wantedCols, len(indices))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock any) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
|
||||
// Mock implementations for bisectVerification tests
|
||||
|
||||
// mockBisectionIterator simulates a BisectionIterator for testing.
|
||||
type mockBisectionIterator struct {
|
||||
chunks [][]blocks.RODataColumn
|
||||
chunkErrors []error
|
||||
finalError error
|
||||
chunkIndex int
|
||||
nextCallCount int
|
||||
onErrorCallCount int
|
||||
onErrorErrors []error
|
||||
}
|
||||
|
||||
func (m *mockBisectionIterator) Next() ([]blocks.RODataColumn, error) {
|
||||
if m.chunkIndex >= len(m.chunks) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
chunk := m.chunks[m.chunkIndex]
|
||||
var err error
|
||||
if m.chunkIndex < len(m.chunkErrors) {
|
||||
err = m.chunkErrors[m.chunkIndex]
|
||||
}
|
||||
m.chunkIndex++
|
||||
m.nextCallCount++
|
||||
if err != nil {
|
||||
return chunk, err
|
||||
}
|
||||
return chunk, nil
|
||||
}
|
||||
|
||||
func (m *mockBisectionIterator) OnError(err error) {
|
||||
m.onErrorCallCount++
|
||||
m.onErrorErrors = append(m.onErrorErrors, err)
|
||||
}
|
||||
|
||||
func (m *mockBisectionIterator) Error() error {
|
||||
return m.finalError
|
||||
}
|
||||
|
||||
// mockBisector simulates a Bisector for testing.
|
||||
type mockBisector struct {
|
||||
shouldError bool
|
||||
bisectErr error
|
||||
iterator *mockBisectionIterator
|
||||
}
|
||||
|
||||
func (m *mockBisector) Bisect(columns []blocks.RODataColumn) (BisectionIterator, error) {
|
||||
if m.shouldError {
|
||||
return nil, m.bisectErr
|
||||
}
|
||||
return m.iterator, nil
|
||||
}
|
||||
|
||||
// testDataColumnsVerifier implements verification.DataColumnsVerifier for testing.
|
||||
type testDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
shouldFail bool
|
||||
columns []blocks.RODataColumn
|
||||
}
|
||||
|
||||
func (v *testDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
verified := make([]blocks.VerifiedRODataColumn, len(v.columns))
|
||||
for i, col := range v.columns {
|
||||
verified[i] = blocks.NewVerifiedRODataColumn(col)
|
||||
}
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
func (v *testDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
func (v *testDataColumnsVerifier) ValidFields() error {
|
||||
if v.shouldFail {
|
||||
return errors.New("verification failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (v *testDataColumnsVerifier) CorrectSubnet(string, []string) error { return nil }
|
||||
func (v *testDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (v *testDataColumnsVerifier) ValidProposerSignature(context.Context) error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarParentSeen(func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
func (v *testDataColumnsVerifier) SidecarParentValid(func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
func (v *testDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarInclusionProven() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarKzgProofVerified() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarProposerExpected(context.Context) error { return nil }
|
||||
|
||||
// Helper function to create test data columns
|
||||
func makeTestDataColumns(t *testing.T, count int, blockRoot [32]byte, startIndex uint64) []blocks.RODataColumn {
|
||||
columns := make([]blocks.RODataColumn, 0, count)
|
||||
for i := range count {
|
||||
params := util.DataColumnParam{
|
||||
Index: startIndex + uint64(i),
|
||||
KzgCommitments: commitments,
|
||||
Slot: primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
}
|
||||
_, verifiedCols := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{params})
|
||||
if len(verifiedCols) > 0 {
|
||||
columns = append(columns, verifiedCols[0].RODataColumn)
|
||||
}
|
||||
}
|
||||
return columns
|
||||
}
|
||||
|
||||
// Helper function to create test verifier factory with failure pattern
|
||||
func makeTestVerifierFactory(failurePattern []bool) verification.NewDataColumnsVerifier {
|
||||
callIndex := 0
|
||||
return func(cols []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
shouldFail := callIndex < len(failurePattern) && failurePattern[callIndex]
|
||||
callIndex++
|
||||
return &testDataColumnsVerifier{
|
||||
shouldFail: shouldFail,
|
||||
columns: cols,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestBisectVerification tests the bisectVerification method with comprehensive table-driven test cases.
|
||||
func TestBisectVerification(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
cases := []struct {
|
||||
expectedError bool
|
||||
bisectorNil bool
|
||||
expectedOnErrorCallCount int
|
||||
expectedNextCallCount int
|
||||
inputCount int
|
||||
iteratorFinalError error
|
||||
bisectorError error
|
||||
name string
|
||||
storedColumnIndices []uint64
|
||||
verificationFailurePattern []bool
|
||||
chunkErrors []error
|
||||
chunks [][]blocks.RODataColumn
|
||||
}{
|
||||
{
|
||||
name: "EmptyColumns",
|
||||
inputCount: 0,
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 0,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "NilBisector",
|
||||
inputCount: 3,
|
||||
bisectorNil: true,
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 0,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "BisectError",
|
||||
inputCount: 5,
|
||||
bisectorError: errors.New("bisect failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 0,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "SingleChunkSuccess",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "SingleChunkFails",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{true},
|
||||
iteratorFinalError: errors.New("chunk failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_BothPass",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{false, false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_FirstFails",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{true, false},
|
||||
iteratorFinalError: errors.New("first failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_SecondFails",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{false, true},
|
||||
iteratorFinalError: errors.New("second failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_BothFail",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{true, true},
|
||||
iteratorFinalError: errors.New("both failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 2,
|
||||
},
|
||||
{
|
||||
name: "ManyChunks_AllPass",
|
||||
inputCount: 16,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}, {}, {}},
|
||||
verificationFailurePattern: []bool{false, false, false, false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 5,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "ManyChunks_MixedFail",
|
||||
inputCount: 16,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}, {}, {}},
|
||||
verificationFailurePattern: []bool{false, true, false, true},
|
||||
iteratorFinalError: errors.New("mixed failures"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 5,
|
||||
expectedOnErrorCallCount: 2,
|
||||
},
|
||||
{
|
||||
name: "FilterStoredColumns_PartialFilter",
|
||||
inputCount: 6,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{1, 3},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "FilterStoredColumns_AllStored",
|
||||
inputCount: 6,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{0, 1, 2, 3, 4, 5},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "FilterStoredColumns_MixedAccess",
|
||||
inputCount: 10,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{1, 5, 9},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "IteratorNextError",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
chunkErrors: []error{nil, errors.New("next error")},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "IteratorNextEOF",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "LargeChunkSize",
|
||||
inputCount: 128,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "ManySmallChunks",
|
||||
inputCount: 32,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}, {}, {}, {}, {}, {}, {}},
|
||||
verificationFailurePattern: []bool{false, false, false, false, false, false, false, false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 9,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "ChunkWithSomeStoredColumns",
|
||||
inputCount: 6,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{0, 2, 4},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "OnErrorDoesNotStopIteration",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{true, false},
|
||||
iteratorFinalError: errors.New("first failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "VerificationErrorWrapping",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{true},
|
||||
iteratorFinalError: errors.New("verification failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Setup storage
|
||||
var store *filesystem.DataColumnStorage
|
||||
if len(tc.storedColumnIndices) > 0 {
|
||||
mocker, s := filesystem.NewEphemeralDataColumnStorageWithMocker(t)
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
slot := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, mocker.CreateFakeIndices(blockRoot, slot, tc.storedColumnIndices...))
|
||||
store = s
|
||||
} else {
|
||||
store = filesystem.NewEphemeralDataColumnStorage(t)
|
||||
}
|
||||
|
||||
// Create test columns
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
columns := makeTestDataColumns(t, tc.inputCount, blockRoot, 0)
|
||||
|
||||
// Setup iterator with chunks
|
||||
iterator := &mockBisectionIterator{
|
||||
chunks: tc.chunks,
|
||||
chunkErrors: tc.chunkErrors,
|
||||
finalError: tc.iteratorFinalError,
|
||||
}
|
||||
|
||||
// Setup bisector
|
||||
var bisector Bisector
|
||||
if tc.bisectorNil || tc.inputCount == 0 {
|
||||
bisector = nil
|
||||
} else if tc.bisectorError != nil {
|
||||
bisector = &mockBisector{
|
||||
shouldError: true,
|
||||
bisectErr: tc.bisectorError,
|
||||
}
|
||||
} else {
|
||||
bisector = &mockBisector{
|
||||
shouldError: false,
|
||||
iterator: iterator,
|
||||
}
|
||||
}
|
||||
|
||||
// Create store with verifier
|
||||
verifierFactory := makeTestVerifierFactory(tc.verificationFailurePattern)
|
||||
lazilyPersistentStore := &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: verifierFactory,
|
||||
custody: &custodyRequirement{},
|
||||
bisector: bisector,
|
||||
}
|
||||
|
||||
// Execute
|
||||
err := lazilyPersistentStore.bisectVerification(columns)
|
||||
|
||||
// Assert
|
||||
if tc.expectedError {
|
||||
require.NotNil(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify iterator interactions for non-error cases
|
||||
if tc.inputCount > 0 && bisector != nil && tc.bisectorError == nil && !tc.expectedError {
|
||||
require.NotEqual(t, 0, iterator.nextCallCount, "iterator Next() should have been called")
|
||||
require.Equal(t, tc.expectedOnErrorCallCount, iterator.onErrorCallCount, "OnError() call count mismatch")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func allIndicesExcept(total int, excluded []uint64) []uint64 {
|
||||
excludeMap := make(map[uint64]bool)
|
||||
for _, idx := range excluded {
|
||||
excludeMap[idx] = true
|
||||
}
|
||||
|
||||
var result []uint64
|
||||
for i := range total {
|
||||
if !excludeMap[uint64(i)] {
|
||||
result = append(result, uint64(i))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// TestColumnsNotStored tests the columnsNotStored method.
|
||||
func TestColumnsNotStored(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
count int
|
||||
stored []uint64 // Column indices marked as stored
|
||||
expected []uint64 // Expected column indices in returned result
|
||||
}{
|
||||
// Empty cases
|
||||
{
|
||||
name: "EmptyInput",
|
||||
count: 0,
|
||||
stored: []uint64{},
|
||||
expected: []uint64{},
|
||||
},
|
||||
// Single element cases
|
||||
{
|
||||
name: "SingleElement_NotStored",
|
||||
count: 1,
|
||||
stored: []uint64{},
|
||||
expected: []uint64{0},
|
||||
},
|
||||
{
|
||||
name: "SingleElement_Stored",
|
||||
count: 1,
|
||||
stored: []uint64{0},
|
||||
expected: []uint64{},
|
||||
},
|
||||
// All not stored cases
|
||||
{
|
||||
name: "AllNotStored_FiveElements",
|
||||
count: 5,
|
||||
stored: []uint64{},
|
||||
expected: []uint64{0, 1, 2, 3, 4},
|
||||
},
|
||||
// All stored cases
|
||||
{
|
||||
name: "AllStored",
|
||||
count: 5,
|
||||
stored: []uint64{0, 1, 2, 3, 4},
|
||||
expected: []uint64{},
|
||||
},
|
||||
// Partial storage - beginning
|
||||
{
|
||||
name: "StoredAtBeginning",
|
||||
count: 5,
|
||||
stored: []uint64{0, 1},
|
||||
expected: []uint64{2, 3, 4},
|
||||
},
|
||||
// Partial storage - end
|
||||
{
|
||||
name: "StoredAtEnd",
|
||||
count: 5,
|
||||
stored: []uint64{3, 4},
|
||||
expected: []uint64{0, 1, 2},
|
||||
},
|
||||
// Partial storage - middle
|
||||
{
|
||||
name: "StoredInMiddle",
|
||||
count: 5,
|
||||
stored: []uint64{2},
|
||||
expected: []uint64{0, 1, 3, 4},
|
||||
},
|
||||
// Partial storage - scattered
|
||||
{
|
||||
name: "StoredScattered",
|
||||
count: 8,
|
||||
stored: []uint64{1, 3, 5},
|
||||
expected: []uint64{0, 2, 4, 6, 7},
|
||||
},
|
||||
// Alternating pattern
|
||||
{
|
||||
name: "AlternatingPattern",
|
||||
count: 8,
|
||||
stored: []uint64{0, 2, 4, 6},
|
||||
expected: []uint64{1, 3, 5, 7},
|
||||
},
|
||||
// Consecutive stored
|
||||
{
|
||||
name: "ConsecutiveStored",
|
||||
count: 10,
|
||||
stored: []uint64{3, 4, 5, 6},
|
||||
expected: []uint64{0, 1, 2, 7, 8, 9},
|
||||
},
|
||||
// Large slice cases
|
||||
{
|
||||
name: "LargeSlice_NoStored",
|
||||
count: 64,
|
||||
stored: []uint64{},
|
||||
expected: allIndicesExcept(64, []uint64{}),
|
||||
},
|
||||
{
|
||||
name: "LargeSlice_SingleStored",
|
||||
count: 64,
|
||||
stored: []uint64{32},
|
||||
expected: allIndicesExcept(64, []uint64{32}),
|
||||
},
|
||||
}
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create test columns first to get the actual block root
|
||||
var columns []blocks.RODataColumn
|
||||
if tc.count > 0 {
|
||||
columns = makeTestDataColumns(t, tc.count, [32]byte{}, 0)
|
||||
}
|
||||
|
||||
// Get the actual block root from the first column (if any)
|
||||
var blockRoot [32]byte
|
||||
if len(columns) > 0 {
|
||||
blockRoot = columns[0].BlockRoot()
|
||||
}
|
||||
|
||||
// Setup storage
|
||||
var store *filesystem.DataColumnStorage
|
||||
if len(tc.stored) > 0 {
|
||||
mocker, s := filesystem.NewEphemeralDataColumnStorageWithMocker(t)
|
||||
require.NoError(t, mocker.CreateFakeIndices(blockRoot, slot, tc.stored...))
|
||||
store = s
|
||||
} else {
|
||||
store = filesystem.NewEphemeralDataColumnStorage(t)
|
||||
}
|
||||
|
||||
// Create store instance
|
||||
lazilyPersistentStore := &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
}
|
||||
|
||||
// Execute
|
||||
result := lazilyPersistentStore.columnsNotStored(columns)
|
||||
|
||||
// Assert count
|
||||
require.Equal(t, len(tc.expected), len(result),
|
||||
fmt.Sprintf("expected %d columns, got %d", len(tc.expected), len(result)))
|
||||
|
||||
// Verify that no stored columns are in the result
|
||||
if len(tc.stored) > 0 {
|
||||
resultIndices := make(map[uint64]bool)
|
||||
for _, col := range result {
|
||||
resultIndices[col.Index] = true
|
||||
}
|
||||
for _, storedIdx := range tc.stored {
|
||||
require.Equal(t, false, resultIndices[storedIdx],
|
||||
fmt.Sprintf("stored column index %d should not be in result", storedIdx))
|
||||
}
|
||||
}
|
||||
|
||||
// If expectedIndices is specified, verify the exact column indices in order
|
||||
if len(tc.expected) > 0 && len(tc.stored) == 0 {
|
||||
// Only check exact order for non-stored cases (where we know they stay in same order)
|
||||
for i, expectedIdx := range tc.expected {
|
||||
require.Equal(t, columns[expectedIdx].Index, result[i].Index,
|
||||
fmt.Sprintf("column %d: expected index %d, got %d", i, columns[expectedIdx].Index, result[i].Index))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify optimization: if nothing stored, should return original slice
|
||||
if len(tc.stored) == 0 && tc.count > 0 {
|
||||
require.Equal(t, &columns[0], &result[0],
|
||||
"when no columns stored, should return original slice (same pointer)")
|
||||
}
|
||||
|
||||
// Verify optimization: if some stored, result should use in-place shifting
|
||||
if len(tc.stored) > 0 && len(tc.expected) > 0 && tc.count > 0 {
|
||||
require.Equal(t, cap(columns), cap(result),
|
||||
"result should be in-place shifted from original (same capacity)")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
40
beacon-chain/das/bisect.go
Normal file
40
beacon-chain/das/bisect.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
)
|
||||
|
||||
// Bisector describes a type that takes a set of RODataColumns via the Bisect method
|
||||
// and returns a BisectionIterator that returns batches of those columns to be
|
||||
// verified together.
|
||||
type Bisector interface {
|
||||
// Bisect initializes the BisectionIterator and returns the result.
|
||||
Bisect([]blocks.RODataColumn) (BisectionIterator, error)
|
||||
}
|
||||
|
||||
// BisectionIterator describes an iterator that returns groups of columns to verify.
|
||||
// It is up to the bisector implementation to decide how to chunk up the columns,
|
||||
// whether by block, by peer, or any other strategy. For example, backfill implements
|
||||
// a bisector that keeps track of the source of each sidecar by peer, and groups
|
||||
// sidecars by peer in the Next method, enabling it to track which peers, out of all
|
||||
// the peers contributing to a batch, gave us bad data.
|
||||
// When a batch fails, the OnError method should be used so that the bisector can
|
||||
// keep track of the failed groups of columns and eg apply that knowledge in peer scoring.
|
||||
// The same column will be returned multiple times by Next; first as part of a larger batch,
|
||||
// and again as part of a more fine grained batch if there was an error in the large batch.
|
||||
// For example, first as part of a batch of all columns spanning peers, and then again
|
||||
// as part of a batch of columns from a single peer if some column in the larger batch
|
||||
// failed verification.
|
||||
type BisectionIterator interface {
|
||||
// Next returns the next group of columns to verify.
|
||||
// When the iteration is complete, Next should return (nil, io.EOF).
|
||||
Next() ([]blocks.RODataColumn, error)
|
||||
// OnError should be called when verification of a group of columns obtained via Next() fails.
|
||||
OnError(error)
|
||||
// Error can be used at the end of the iteration to get a single error result. It will return
|
||||
// nil if OnError was never called, or an error of the implementers choosing representing the set
|
||||
// of errors seen during iteration. For instance when bisecting from columns spanning peers to columns
|
||||
// from a single peer, the broader error could be dropped, and then the more specific error
|
||||
// (for a single peer's response) returned after bisecting to it.
|
||||
Error() error
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func (e *blobCacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
e.scs = make([]*blocks.ROBlob, maxBlobsPerBlock)
|
||||
}
|
||||
if e.scs[sc.Index] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitment)
|
||||
return errors.Wrapf(errDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitment)
|
||||
}
|
||||
e.scs[sc.Index] = sc
|
||||
return nil
|
||||
@@ -88,7 +88,7 @@ func (e *blobCacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
// commitments were found in the cache and the sidecar slice return value can be used
|
||||
// to perform a DA check against the cached sidecars.
|
||||
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
|
||||
func (e *blobCacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot) ([]blocks.ROBlob, error) {
|
||||
func (e *blobCacheEntry) filter(root [32]byte, kc [][]byte) ([]blocks.ROBlob, error) {
|
||||
count := len(kc)
|
||||
if e.diskSummary.AllAvailable(count) {
|
||||
return nil, nil
|
||||
|
||||
@@ -34,7 +34,8 @@ type filterTestCaseSetupFunc func(t *testing.T) (*blobCacheEntry, [][]byte, []bl
|
||||
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
|
||||
return func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
|
||||
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
|
||||
shouldRetain := func(s primitives.Slot) bool { return true }
|
||||
commits, err := commitmentsToCheck(blk, shouldRetain)
|
||||
require.NoError(t, err)
|
||||
entry := &blobCacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
@@ -113,7 +114,7 @@ func TestFilterDiskSummary(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits, 100)
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(got))
|
||||
})
|
||||
@@ -195,7 +196,7 @@ func TestFilter(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits, 100)
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -11,9 +9,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errColumnIndexTooHigh = errors.New("column index too high")
|
||||
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
|
||||
errCommitmentMismatch = errors.New("commitment of sidecar in cache did not match block commitment")
|
||||
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
|
||||
)
|
||||
|
||||
@@ -25,107 +23,80 @@ func newDataColumnCache() *dataColumnCache {
|
||||
return &dataColumnCache{entries: make(map[cacheKey]*dataColumnCacheEntry)}
|
||||
}
|
||||
|
||||
// ensure returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *dataColumnCache) ensure(key cacheKey) *dataColumnCacheEntry {
|
||||
// entry returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *dataColumnCache) entry(key cacheKey) *dataColumnCacheEntry {
|
||||
entry, ok := c.entries[key]
|
||||
if !ok {
|
||||
entry = &dataColumnCacheEntry{}
|
||||
entry = newDataColumnCacheEntry(key.root)
|
||||
c.entries[key] = entry
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
func (c *dataColumnCache) cleanup(blks []blocks.ROBlock) {
|
||||
for _, block := range blks {
|
||||
key := cacheKey{slot: block.Block().Slot(), root: block.Root()}
|
||||
c.delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
// delete removes the cache entry from the cache.
|
||||
func (c *dataColumnCache) delete(key cacheKey) {
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
// dataColumnCacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type dataColumnCacheEntry struct {
|
||||
scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.DataColumnStorageSummary
|
||||
func (c *dataColumnCache) stash(sc blocks.RODataColumn) error {
|
||||
key := cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
entry := c.entry(key)
|
||||
return entry.stash(sc)
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) setDiskSummary(sum filesystem.DataColumnStorageSummary) {
|
||||
e.diskSummary = sum
|
||||
func newDataColumnCacheEntry(root [32]byte) *dataColumnCacheEntry {
|
||||
return &dataColumnCacheEntry{scs: make(map[uint64]blocks.RODataColumn), root: &root}
|
||||
}
|
||||
|
||||
// dataColumnCacheEntry is the set of RODataColumns for a given block.
|
||||
type dataColumnCacheEntry struct {
|
||||
root *[32]byte
|
||||
scs map[uint64]blocks.RODataColumn
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of DataColumnSidecars.
|
||||
// Only the first DataColumnSidecar of a given Index will be kept in the cache.
|
||||
// stash will return an error if the given data colunn is already in the cache, or if the Index is out of bounds.
|
||||
func (e *dataColumnCacheEntry) stash(sc *blocks.RODataColumn) error {
|
||||
// stash will return an error if the given data column Index is out of bounds.
|
||||
// It will overwrite any existing entry for the same index.
|
||||
func (e *dataColumnCacheEntry) stash(sc blocks.RODataColumn) error {
|
||||
if sc.Index >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errColumnIndexTooHigh, "index=%d", sc.Index)
|
||||
}
|
||||
|
||||
if e.scs[sc.Index] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitments)
|
||||
}
|
||||
|
||||
e.scs[sc.Index] = sc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) filter(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
// append appends the requested root and indices from the cache to the given sidecars slice and returns the result.
|
||||
// If any of the given indices are missing, an error will be returned and the sidecars slice will be unchanged.
|
||||
func (e *dataColumnCacheEntry) append(sidecars []blocks.RODataColumn, indices peerdas.ColumnIndices) ([]blocks.RODataColumn, error) {
|
||||
needed := indices.ToMap()
|
||||
for col := range needed {
|
||||
_, ok := e.scs[col]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", e.root, col)
|
||||
}
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := range nonEmptyIndices {
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.scs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !sliceBytesEqual(commitmentsArray[i], e.scs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.scs[i])
|
||||
// Loop twice so we can avoid touching the slice if any of the blobs are missing.
|
||||
for col := range needed {
|
||||
sidecars = append(sidecars, e.scs[col])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
// IndicesNotStored filters the list of indices to only include those that are not found in the storage summary.
|
||||
func IndicesNotStored(sum filesystem.DataColumnStorageSummary, indices peerdas.ColumnIndices) peerdas.ColumnIndices {
|
||||
indices = indices.Copy()
|
||||
for col := range indices {
|
||||
if sum.HasIndex(col) {
|
||||
indices.Unset(col)
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
func sliceBytesEqual(a, b [][]byte) bool {
|
||||
return slices.EqualFunc(a, b, bytes.Equal)
|
||||
return indices
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -13,124 +15,105 @@ import (
|
||||
func TestEnsureDeleteSetDiskSummary(t *testing.T) {
|
||||
c := newDataColumnCache()
|
||||
key := cacheKey{}
|
||||
entry := c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
entry := c.entry(key)
|
||||
require.Equal(t, 0, len(entry.scs))
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true})
|
||||
entry.setDiskSummary(diskSummary)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{diskSummary: diskSummary}, *entry)
|
||||
nonDupe := c.entry(key)
|
||||
require.Equal(t, entry, nonDupe) // same pointer
|
||||
expect, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
require.NoError(t, entry.stash(expect[0]))
|
||||
require.Equal(t, 1, len(entry.scs))
|
||||
cols, err := nonDupe.append([]blocks.RODataColumn{}, peerdas.NewColumnIndicesFromSlice([]uint64{expect[0].Index}))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expect[0], cols[0])
|
||||
|
||||
c.delete(key)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
entry = c.entry(key)
|
||||
require.Equal(t, 0, len(entry.scs))
|
||||
require.NotEqual(t, entry, nonDupe) // different pointer
|
||||
}
|
||||
|
||||
func TestStash(t *testing.T) {
|
||||
t.Run("Index too high", func(t *testing.T) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 10_000}})
|
||||
columns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 10_000}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
err := entry.stash(columns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal and already existing", func(t *testing.T) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
entry := newDataColumnCacheEntry(roDataColumns[0].BlockRoot())
|
||||
err := entry.stash(roDataColumns[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, roDataColumns[0], entry.scs[1])
|
||||
|
||||
err = entry.stash(&roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
require.NoError(t, entry.stash(roDataColumns[0]))
|
||||
// stash simply replaces duplicate values now
|
||||
require.DeepEqual(t, roDataColumns[0], entry.scs[1])
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterDataColumns(t *testing.T) {
|
||||
func TestAppendDataColumns(t *testing.T) {
|
||||
t.Run("All available", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
sum := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
notStored := IndicesNotStored(sum, peerdas.NewColumnIndicesFromSlice([]uint64{1, 3}))
|
||||
actual, err := newDataColumnCacheEntry([32]byte{}).append([]blocks.RODataColumn{}, notStored)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
require.Equal(t, 0, len(actual))
|
||||
})
|
||||
|
||||
t.Run("Some scs missing", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
sum := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Commitments not equal", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[1] = &roDataColumns[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter(roDataColumns[0].BlockRoot(), &commitmentsArray)
|
||||
notStored := IndicesNotStored(sum, peerdas.NewColumnIndicesFromSlice([]uint64{1}))
|
||||
actual, err := newDataColumnCacheEntry([32]byte{}).append([]blocks.RODataColumn{}, notStored)
|
||||
require.Equal(t, 0, len(actual))
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
indices := peerdas.NewColumnIndicesFromSlice([]uint64{1, 3})
|
||||
expected, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 3, KzgCommitments: [][]byte{[]byte{3}}}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[3] = &expected[0]
|
||||
scs := map[uint64]blocks.RODataColumn{
|
||||
3: expected[0],
|
||||
}
|
||||
sum := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
entry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs, diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter(expected[0].BlockRoot(), &commitmentsArray)
|
||||
actual, err := entry.append([]blocks.RODataColumn{}, IndicesNotStored(sum, indices))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
require.Equal(t, 2, s.count())
|
||||
}
|
||||
t.Run("Append does not mutate the input", func(t *testing.T) {
|
||||
indices := peerdas.NewColumnIndicesFromSlice([]uint64{1, 2})
|
||||
expected, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Index: 0, KzgCommitments: [][]byte{[]byte{1}}},
|
||||
{Index: 1, KzgCommitments: [][]byte{[]byte{2}}},
|
||||
{Index: 2, KzgCommitments: [][]byte{[]byte{3}}},
|
||||
})
|
||||
|
||||
func TestNonEmptyIndices(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{10}}, nil, [][]byte{[]byte{20}}}
|
||||
actual := s.nonEmptyIndices()
|
||||
require.DeepEqual(t, map[uint64]bool{1: true, 3: true}, actual)
|
||||
}
|
||||
scs := map[uint64]blocks.RODataColumn{
|
||||
1: expected[1],
|
||||
2: expected[2],
|
||||
}
|
||||
entry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
func TestSliceBytesEqual(t *testing.T) {
|
||||
t.Run("Different lengths", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
require.Equal(t, false, sliceBytesEqual(a, b))
|
||||
})
|
||||
|
||||
t.Run("Same length but different content", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 7}}
|
||||
require.Equal(t, false, sliceBytesEqual(a, b))
|
||||
})
|
||||
|
||||
t.Run("Equal slices", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
require.Equal(t, true, sliceBytesEqual(a, b))
|
||||
original := []blocks.RODataColumn{expected[0]}
|
||||
actual, err := entry.append(original, indices)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
slices.SortFunc(actual, func(i, j blocks.RODataColumn) int {
|
||||
return int(i.Index) - int(j.Index)
|
||||
})
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i].Index, actual[i].Index)
|
||||
}
|
||||
require.Equal(t, 1, len(original))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,13 +7,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// AvailabilityStore describes a component that can verify and save sidecars for a given block, and confirm previously
|
||||
// verified and saved sidecars.
|
||||
// Persist guarantees that the sidecar will be available to perform a DA check
|
||||
// for the life of the beacon node process.
|
||||
// IsDataAvailable guarantees that all blobs committed to in the block have been
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
// AvailabilityChecker is the minimum interface needed to check if data is available for a block.
|
||||
// By convention there is a concept of an AvailabilityStore that implements a method to persist
|
||||
// blobs or data columns to prepare for Availability checking, but since those methods are different
|
||||
// for different forms of blob data, they are not included in the interface.
|
||||
type AvailabilityChecker interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b ...blocks.ROBlock) error
|
||||
}
|
||||
|
||||
// RetentionChecker is a callback that determines whether blobs at the given slot are within the retention period.
|
||||
type RetentionChecker func(primitives.Slot) bool
|
||||
|
||||
5
beacon-chain/das/log.go
Normal file
5
beacon-chain/das/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package das
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "das")
|
||||
@@ -9,16 +9,20 @@ import (
|
||||
|
||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||
type MockAvailabilityStore struct {
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b ...blocks.ROBlock) error
|
||||
ErrIsDataAvailable error
|
||||
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
var _ AvailabilityChecker = &MockAvailabilityStore{}
|
||||
|
||||
// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b ...blocks.ROBlock) error {
|
||||
if m.ErrIsDataAvailable != nil {
|
||||
return m.ErrIsDataAvailable
|
||||
}
|
||||
if m.VerifyAvailabilityCallback != nil {
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b)
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
135
beacon-chain/das/needs.go
Normal file
135
beacon-chain/das/needs.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NeedSpan represents the need for a resource over a span of slots.
|
||||
type NeedSpan struct {
|
||||
Begin primitives.Slot
|
||||
End primitives.Slot
|
||||
}
|
||||
|
||||
// At returns whether blocks/blobs/columns are needed At the given slot.
|
||||
func (n NeedSpan) At(slot primitives.Slot) bool {
|
||||
return slot >= n.Begin && slot < n.End
|
||||
}
|
||||
|
||||
// CurrentNeeds fields can be used to check whether the given resource type is needed
|
||||
// at a given slot. The values are based on the current slot, so this value shouldn't
|
||||
// be retained / reused across slots.
|
||||
type CurrentNeeds struct {
|
||||
Block NeedSpan
|
||||
Blob NeedSpan
|
||||
Col NeedSpan
|
||||
}
|
||||
|
||||
// SyncNeeds holds configuration and state for determining what data is needed
|
||||
// at any given slot during backfill based on the current slot.
|
||||
type SyncNeeds struct {
|
||||
current func() primitives.Slot
|
||||
deneb primitives.Slot
|
||||
fulu primitives.Slot
|
||||
|
||||
oldestSlotFlagPtr *primitives.Slot
|
||||
validOldestSlotPtr *primitives.Slot
|
||||
blockRetention primitives.Epoch
|
||||
|
||||
blobRetentionFlag primitives.Epoch
|
||||
blobRetention primitives.Epoch
|
||||
colRetention primitives.Epoch
|
||||
}
|
||||
|
||||
type CurrentSlotter func() primitives.Slot
|
||||
|
||||
func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, blobRetentionFlag primitives.Epoch) (SyncNeeds, error) {
|
||||
deneb, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
if err != nil {
|
||||
return SyncNeeds{}, errors.Wrap(err, "deneb fork slot")
|
||||
}
|
||||
fuluBoundary := min(params.BeaconConfig().FuluForkEpoch, slots.MaxSafeEpoch())
|
||||
fulu, err := slots.EpochStart(fuluBoundary)
|
||||
if err != nil {
|
||||
return SyncNeeds{}, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
sn := SyncNeeds{
|
||||
current: func() primitives.Slot { return current() },
|
||||
deneb: deneb,
|
||||
fulu: fulu,
|
||||
blobRetentionFlag: blobRetentionFlag,
|
||||
}
|
||||
// We apply the --blob-retention-epochs flag to both blob and column retention.
|
||||
sn.blobRetention = max(sn.blobRetentionFlag, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
sn.colRetention = max(sn.blobRetentionFlag, params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
|
||||
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
|
||||
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
|
||||
if oldestSlotFlagPtr != nil {
|
||||
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
|
||||
sn.validOldestSlotPtr = oldestSlotFlagPtr
|
||||
} else {
|
||||
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).
|
||||
WithField("specMinSlot", syncEpochOffset(current(), sn.blockRetention)).
|
||||
Warn("Ignoring user-specified slot > MIN_EPOCHS_FOR_BLOCK_REQUESTS.")
|
||||
}
|
||||
}
|
||||
|
||||
return sn, nil
|
||||
}
|
||||
|
||||
// Currently is the main callback given to the different parts of backfill to determine
|
||||
// what resources are needed at a given slot. It assumes the current instance of SyncNeeds
|
||||
// is the result of calling initialize.
|
||||
func (n SyncNeeds) Currently() CurrentNeeds {
|
||||
current := n.current()
|
||||
c := CurrentNeeds{
|
||||
Block: n.blockSpan(current),
|
||||
Blob: NeedSpan{Begin: syncEpochOffset(current, n.blobRetention), End: n.fulu},
|
||||
Col: NeedSpan{Begin: syncEpochOffset(current, n.colRetention), End: current},
|
||||
}
|
||||
// Adjust the minimums forward to the slots where the sidecar types were introduced
|
||||
c.Blob.Begin = max(c.Blob.Begin, n.deneb)
|
||||
c.Col.Begin = max(c.Col.Begin, n.fulu)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (n SyncNeeds) blockSpan(current primitives.Slot) NeedSpan {
|
||||
if n.validOldestSlotPtr != nil { // assumes validation done in initialize()
|
||||
return NeedSpan{Begin: *n.validOldestSlotPtr, End: current}
|
||||
}
|
||||
return NeedSpan{Begin: syncEpochOffset(current, n.blockRetention), End: current}
|
||||
}
|
||||
|
||||
func (n SyncNeeds) BlobRetentionChecker() RetentionChecker {
|
||||
return func(slot primitives.Slot) bool {
|
||||
current := n.Currently()
|
||||
return current.Blob.At(slot)
|
||||
}
|
||||
}
|
||||
|
||||
func (n SyncNeeds) DataColumnRetentionChecker() RetentionChecker {
|
||||
return func(slot primitives.Slot) bool {
|
||||
current := n.Currently()
|
||||
return current.Col.At(slot)
|
||||
}
|
||||
}
|
||||
|
||||
// syncEpochOffset subtracts a number of epochs as slots from the current slot, with underflow checks.
|
||||
// It returns slot 1 if the result would be 0 or underflow. It doesn't return slot 0 because the
|
||||
// genesis block needs to be specially synced (it doesn't have a valid signature).
|
||||
func syncEpochOffset(current primitives.Slot, subtract primitives.Epoch) primitives.Slot {
|
||||
minEpoch := min(subtract, slots.MaxSafeEpoch())
|
||||
// compute slot offset - offset is a number of slots to go back from current (not an absolute slot).
|
||||
offset := slots.UnsafeEpochStart(minEpoch)
|
||||
// Undeflow protection: slot 0 is the genesis block, therefore the signature in it is invalid.
|
||||
// To prevent us from rejecting a batch, we restrict the minimum backfill batch till only slot 1
|
||||
if offset >= current {
|
||||
return 1
|
||||
}
|
||||
return current - offset
|
||||
}
|
||||
693
beacon-chain/das/needs_test.go
Normal file
693
beacon-chain/das/needs_test.go
Normal file
@@ -0,0 +1,693 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// TestNeedSpanAt tests the needSpan.at() method for range checking.
|
||||
func TestNeedSpanAt(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
span NeedSpan
|
||||
slots []primitives.Slot
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "within bounds",
|
||||
span: NeedSpan{Begin: 100, End: 200},
|
||||
slots: []primitives.Slot{101, 150, 199},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "before begin / at end boundary (exclusive)",
|
||||
span: NeedSpan{Begin: 100, End: 200},
|
||||
slots: []primitives.Slot{99, 200, 201},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "empty span (begin == end)",
|
||||
span: NeedSpan{Begin: 100, End: 100},
|
||||
slots: []primitives.Slot{100},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "slot 0 with span starting at 0",
|
||||
span: NeedSpan{Begin: 0, End: 100},
|
||||
slots: []primitives.Slot{0},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
for _, sl := range tc.slots {
|
||||
t.Run(fmt.Sprintf("%s at slot %d, ", tc.name, sl), func(t *testing.T) {
|
||||
result := tc.span.At(sl)
|
||||
require.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSyncEpochOffset tests the syncEpochOffset helper function.
|
||||
func TestSyncEpochOffset(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
current primitives.Slot
|
||||
subtract primitives.Epoch
|
||||
expected primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "typical offset - 5 epochs back",
|
||||
current: primitives.Slot(10000),
|
||||
subtract: 5,
|
||||
expected: primitives.Slot(10000 - 5*slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "zero subtract returns current",
|
||||
current: primitives.Slot(5000),
|
||||
subtract: 0,
|
||||
expected: primitives.Slot(5000),
|
||||
},
|
||||
{
|
||||
name: "subtract 1 epoch from mid-range slot",
|
||||
current: primitives.Slot(1000),
|
||||
subtract: 1,
|
||||
expected: primitives.Slot(1000 - slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "offset equals current - underflow protection",
|
||||
current: primitives.Slot(slotsPerEpoch),
|
||||
subtract: 1,
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
name: "offset exceeds current - underflow protection",
|
||||
current: primitives.Slot(50),
|
||||
subtract: 1000,
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
name: "current very close to 0",
|
||||
current: primitives.Slot(10),
|
||||
subtract: 1,
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
name: "subtract MaxSafeEpoch",
|
||||
current: primitives.Slot(1000000),
|
||||
subtract: slots.MaxSafeEpoch(),
|
||||
expected: 1, // underflow protection
|
||||
},
|
||||
{
|
||||
name: "result exactly at slot 1",
|
||||
current: primitives.Slot(1 + slotsPerEpoch),
|
||||
subtract: 1,
|
||||
expected: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := syncEpochOffset(tc.current, tc.subtract)
|
||||
require.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSyncNeedsInitialize tests the syncNeeds.initialize() method.
|
||||
func TestSyncNeedsInitialize(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
|
||||
|
||||
currentSlot := primitives.Slot(10000)
|
||||
currentFunc := func() primitives.Slot { return currentSlot }
|
||||
|
||||
cases := []struct {
|
||||
invalidOldestFlag bool
|
||||
expectValidOldest bool
|
||||
oldestSlotFlagPtr *primitives.Slot
|
||||
blobRetentionFlag primitives.Epoch
|
||||
expectedBlob primitives.Epoch
|
||||
expectedCol primitives.Epoch
|
||||
name string
|
||||
input SyncNeeds
|
||||
current func() primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "basic initialization with no flags",
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
blobRetentionFlag: 0,
|
||||
},
|
||||
{
|
||||
name: "blob retention flag less than spec minimum",
|
||||
blobRetentionFlag: minBlobEpochs - 1,
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
},
|
||||
{
|
||||
name: "blob retention flag greater than spec minimum",
|
||||
blobRetentionFlag: minBlobEpochs + 10,
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs + 10,
|
||||
expectedCol: minBlobEpochs + 10,
|
||||
},
|
||||
{
|
||||
name: "oldestSlotFlagPtr is nil",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: nil,
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
},
|
||||
{
|
||||
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
// Make it way past the spec minimum
|
||||
slot := currentSlot - primitives.Slot(params.BeaconConfig().MinEpochsForBlockRequests-1)*slotsPerEpoch
|
||||
return &slot
|
||||
}(),
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
invalidOldestFlag: true,
|
||||
},
|
||||
{
|
||||
name: "oldestSlotFlagPtr at boundary (exactly at spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := currentSlot - primitives.Slot(params.BeaconConfig().MinEpochsForBlockRequests)*slotsPerEpoch
|
||||
return &slot
|
||||
}(),
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
invalidOldestFlag: true,
|
||||
},
|
||||
{
|
||||
name: "both blob retention flag and oldest slot set",
|
||||
blobRetentionFlag: minBlobEpochs + 5,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(100)
|
||||
return &slot
|
||||
}(),
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs + 5,
|
||||
expectedCol: minBlobEpochs + 5,
|
||||
},
|
||||
{
|
||||
name: "zero blob retention uses spec minimum",
|
||||
blobRetentionFlag: 0,
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
},
|
||||
{
|
||||
name: "large blob retention value",
|
||||
blobRetentionFlag: 5000,
|
||||
expectValidOldest: false,
|
||||
expectedBlob: 5000,
|
||||
expectedCol: 5000,
|
||||
},
|
||||
{
|
||||
name: "regression for deneb start",
|
||||
blobRetentionFlag: 8212500,
|
||||
expectValidOldest: true,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
expectedBlob: 8212500,
|
||||
expectedCol: 8212500,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.current == nil {
|
||||
tc.current = currentFunc
|
||||
}
|
||||
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check retention calculations
|
||||
require.Equal(t, tc.expectedBlob, result.blobRetention)
|
||||
require.Equal(t, tc.expectedCol, result.colRetention)
|
||||
|
||||
if tc.invalidOldestFlag {
|
||||
require.IsNil(t, result.validOldestSlotPtr)
|
||||
} else {
|
||||
require.Equal(t, tc.oldestSlotFlagPtr, result.validOldestSlotPtr)
|
||||
}
|
||||
|
||||
// Check blockRetention is always spec minimum
|
||||
require.Equal(t, primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests), result.blockRetention)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSyncNeedsBlockSpan tests the syncNeeds.blockSpan() method.
|
||||
func TestSyncNeedsBlockSpan(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
minBlockEpochs := params.BeaconConfig().MinEpochsForBlockRequests
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
validOldest *primitives.Slot
|
||||
blockRetention primitives.Epoch
|
||||
current primitives.Slot
|
||||
expectedBegin primitives.Slot
|
||||
expectedEnd primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "with validOldestSlotPtr set",
|
||||
validOldest: func() *primitives.Slot { s := primitives.Slot(500); return &s }(),
|
||||
blockRetention: primitives.Epoch(minBlockEpochs),
|
||||
current: 10000,
|
||||
expectedBegin: 500,
|
||||
expectedEnd: 10000,
|
||||
},
|
||||
{
|
||||
name: "without validOldestSlotPtr (nil)",
|
||||
validOldest: nil,
|
||||
blockRetention: primitives.Epoch(minBlockEpochs),
|
||||
current: 10000,
|
||||
expectedBegin: syncEpochOffset(10000, primitives.Epoch(minBlockEpochs)),
|
||||
expectedEnd: 10000,
|
||||
},
|
||||
{
|
||||
name: "very low current slot",
|
||||
validOldest: nil,
|
||||
blockRetention: primitives.Epoch(minBlockEpochs),
|
||||
current: 100,
|
||||
expectedBegin: 1, // underflow protection
|
||||
expectedEnd: 100,
|
||||
},
|
||||
{
|
||||
name: "very high current slot",
|
||||
validOldest: nil,
|
||||
blockRetention: primitives.Epoch(minBlockEpochs),
|
||||
current: 1000000,
|
||||
expectedBegin: syncEpochOffset(1000000, primitives.Epoch(minBlockEpochs)),
|
||||
expectedEnd: 1000000,
|
||||
},
|
||||
{
|
||||
name: "validOldestSlotPtr at boundary value",
|
||||
validOldest: func() *primitives.Slot { s := primitives.Slot(1); return &s }(),
|
||||
blockRetention: primitives.Epoch(minBlockEpochs),
|
||||
current: 5000,
|
||||
expectedBegin: 1,
|
||||
expectedEnd: 5000,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
sn := SyncNeeds{
|
||||
validOldestSlotPtr: tc.validOldest,
|
||||
blockRetention: tc.blockRetention,
|
||||
}
|
||||
result := sn.blockSpan(tc.current)
|
||||
require.Equal(t, tc.expectedBegin, result.Begin)
|
||||
require.Equal(t, tc.expectedEnd, result.End)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSyncNeedsCurrently tests the syncNeeds.currently() method.
|
||||
func TestSyncNeedsCurrently(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
denebSlot := primitives.Slot(1000)
|
||||
fuluSlot := primitives.Slot(2000)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
current primitives.Slot
|
||||
blobRetention primitives.Epoch
|
||||
colRetention primitives.Epoch
|
||||
blockRetention primitives.Epoch
|
||||
validOldest *primitives.Slot
|
||||
// Expected block span
|
||||
expectBlockBegin primitives.Slot
|
||||
expectBlockEnd primitives.Slot
|
||||
// Expected blob span
|
||||
expectBlobBegin primitives.Slot
|
||||
expectBlobEnd primitives.Slot
|
||||
// Expected column span
|
||||
expectColBegin primitives.Slot
|
||||
expectColEnd primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "pre-Deneb - only blocks needed",
|
||||
current: 500,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(500, 5),
|
||||
expectBlockEnd: 500,
|
||||
expectBlobBegin: denebSlot, // adjusted to deneb
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot, // adjusted to fulu
|
||||
expectColEnd: 500,
|
||||
},
|
||||
{
|
||||
name: "between Deneb and Fulu - blocks and blobs needed",
|
||||
current: 1500,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(1500, 5),
|
||||
expectBlockEnd: 1500,
|
||||
expectBlobBegin: max(syncEpochOffset(1500, 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot, // adjusted to fulu
|
||||
expectColEnd: 1500,
|
||||
},
|
||||
{
|
||||
name: "post-Fulu - all resources needed",
|
||||
current: 3000,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(3000, 5),
|
||||
expectBlockEnd: 3000,
|
||||
expectBlobBegin: max(syncEpochOffset(3000, 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: max(syncEpochOffset(3000, 10), fuluSlot),
|
||||
expectColEnd: 3000,
|
||||
},
|
||||
{
|
||||
name: "exactly at Deneb boundary",
|
||||
current: denebSlot,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(denebSlot, 5),
|
||||
expectBlockEnd: denebSlot,
|
||||
expectBlobBegin: denebSlot,
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot,
|
||||
expectColEnd: denebSlot,
|
||||
},
|
||||
{
|
||||
name: "exactly at Fulu boundary",
|
||||
current: fuluSlot,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(fuluSlot, 5),
|
||||
expectBlockEnd: fuluSlot,
|
||||
expectBlobBegin: max(syncEpochOffset(fuluSlot, 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot,
|
||||
expectColEnd: fuluSlot,
|
||||
},
|
||||
{
|
||||
name: "small retention periods",
|
||||
current: 5000,
|
||||
blobRetention: 1,
|
||||
colRetention: 2,
|
||||
blockRetention: 1,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(5000, 1),
|
||||
expectBlockEnd: 5000,
|
||||
expectBlobBegin: max(syncEpochOffset(5000, 1), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: max(syncEpochOffset(5000, 2), fuluSlot),
|
||||
expectColEnd: 5000,
|
||||
},
|
||||
{
|
||||
name: "large retention periods",
|
||||
current: 10000,
|
||||
blobRetention: 100,
|
||||
colRetention: 100,
|
||||
blockRetention: 50,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(10000, 50),
|
||||
expectBlockEnd: 10000,
|
||||
expectBlobBegin: max(syncEpochOffset(10000, 100), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: max(syncEpochOffset(10000, 100), fuluSlot),
|
||||
expectColEnd: 10000,
|
||||
},
|
||||
{
|
||||
name: "with validOldestSlotPtr for blocks",
|
||||
current: 8000,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: func() *primitives.Slot { s := primitives.Slot(100); return &s }(),
|
||||
expectBlockBegin: 100,
|
||||
expectBlockEnd: 8000,
|
||||
expectBlobBegin: max(syncEpochOffset(8000, 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: max(syncEpochOffset(8000, 10), fuluSlot),
|
||||
expectColEnd: 8000,
|
||||
},
|
||||
{
|
||||
name: "retention approaching current slot",
|
||||
current: primitives.Slot(2000 + 5*slotsPerEpoch),
|
||||
blobRetention: 5,
|
||||
colRetention: 5,
|
||||
blockRetention: 3,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(primitives.Slot(2000+5*slotsPerEpoch), 3),
|
||||
expectBlockEnd: primitives.Slot(2000 + 5*slotsPerEpoch),
|
||||
expectBlobBegin: max(syncEpochOffset(primitives.Slot(2000+5*slotsPerEpoch), 5), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: max(syncEpochOffset(primitives.Slot(2000+5*slotsPerEpoch), 5), fuluSlot),
|
||||
expectColEnd: primitives.Slot(2000 + 5*slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "current just after Deneb",
|
||||
current: denebSlot + 10,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(denebSlot+10, 5),
|
||||
expectBlockEnd: denebSlot + 10,
|
||||
expectBlobBegin: denebSlot,
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot,
|
||||
expectColEnd: denebSlot + 10,
|
||||
},
|
||||
{
|
||||
name: "current just after Fulu",
|
||||
current: fuluSlot + 10,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(fuluSlot+10, 5),
|
||||
expectBlockEnd: fuluSlot + 10,
|
||||
expectBlobBegin: max(syncEpochOffset(fuluSlot+10, 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot,
|
||||
expectColEnd: fuluSlot + 10,
|
||||
},
|
||||
{
|
||||
name: "blob retention would start before Deneb",
|
||||
current: denebSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
blobRetention: 100, // very large retention
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(denebSlot+primitives.Slot(5*slotsPerEpoch), 5),
|
||||
expectBlockEnd: denebSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
expectBlobBegin: denebSlot, // clamped to deneb
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot,
|
||||
expectColEnd: denebSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "column retention would start before Fulu",
|
||||
current: fuluSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
blobRetention: 10,
|
||||
colRetention: 100, // very large retention
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(fuluSlot+primitives.Slot(5*slotsPerEpoch), 5),
|
||||
expectBlockEnd: fuluSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
expectBlobBegin: max(syncEpochOffset(fuluSlot+primitives.Slot(5*slotsPerEpoch), 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot, // clamped to fulu
|
||||
expectColEnd: fuluSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
sn := SyncNeeds{
|
||||
current: func() primitives.Slot { return tc.current },
|
||||
deneb: denebSlot,
|
||||
fulu: fuluSlot,
|
||||
validOldestSlotPtr: tc.validOldest,
|
||||
blockRetention: tc.blockRetention,
|
||||
blobRetention: tc.blobRetention,
|
||||
colRetention: tc.colRetention,
|
||||
}
|
||||
|
||||
result := sn.Currently()
|
||||
|
||||
// Verify block span
|
||||
require.Equal(t, tc.expectBlockBegin, result.Block.Begin,
|
||||
"block.begin mismatch")
|
||||
require.Equal(t, tc.expectBlockEnd, result.Block.End,
|
||||
"block.end mismatch")
|
||||
|
||||
// Verify blob span
|
||||
require.Equal(t, tc.expectBlobBegin, result.Blob.Begin,
|
||||
"blob.begin mismatch")
|
||||
require.Equal(t, tc.expectBlobEnd, result.Blob.End,
|
||||
"blob.end mismatch")
|
||||
|
||||
// Verify column span
|
||||
require.Equal(t, tc.expectColBegin, result.Col.Begin,
|
||||
"col.begin mismatch")
|
||||
require.Equal(t, tc.expectColEnd, result.Col.End,
|
||||
"col.end mismatch")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCurrentNeedsIntegration verifies the complete currentNeeds workflow.
|
||||
func TestCurrentNeedsIntegration(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
denebSlot := primitives.Slot(1000)
|
||||
fuluSlot := primitives.Slot(2000)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
current primitives.Slot
|
||||
blobRetention primitives.Epoch
|
||||
colRetention primitives.Epoch
|
||||
testSlots []primitives.Slot
|
||||
expectBlockAt []bool
|
||||
expectBlobAt []bool
|
||||
expectColAt []bool
|
||||
}{
|
||||
{
|
||||
name: "pre-Deneb slot - only blocks",
|
||||
current: 500,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
testSlots: []primitives.Slot{100, 250, 499, 500, 1000, 2000},
|
||||
expectBlockAt: []bool{true, true, true, false, false, false},
|
||||
expectBlobAt: []bool{false, false, false, false, true, false},
|
||||
expectColAt: []bool{false, false, false, false, false, false},
|
||||
},
|
||||
{
|
||||
name: "between Deneb and Fulu - blocks and blobs",
|
||||
current: 1500,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
testSlots: []primitives.Slot{500, 1000, 1200, 1499, 1500, 2000},
|
||||
expectBlockAt: []bool{true, true, true, true, false, false},
|
||||
expectBlobAt: []bool{false, false, true, true, true, false},
|
||||
expectColAt: []bool{false, false, false, false, false, false},
|
||||
},
|
||||
{
|
||||
name: "post-Fulu - all resources",
|
||||
current: 3000,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
testSlots: []primitives.Slot{1000, 1500, 2000, 2500, 2999, 3000},
|
||||
expectBlockAt: []bool{true, true, true, true, true, false},
|
||||
expectBlobAt: []bool{false, false, false, false, false, false},
|
||||
expectColAt: []bool{false, false, false, false, true, false},
|
||||
},
|
||||
{
|
||||
name: "at Deneb boundary",
|
||||
current: denebSlot,
|
||||
blobRetention: 5,
|
||||
colRetention: 5,
|
||||
testSlots: []primitives.Slot{500, 999, 1000, 1500, 2000},
|
||||
expectBlockAt: []bool{true, true, false, false, false},
|
||||
expectBlobAt: []bool{false, false, true, true, false},
|
||||
expectColAt: []bool{false, false, false, false, false},
|
||||
},
|
||||
{
|
||||
name: "at Fulu boundary",
|
||||
current: fuluSlot,
|
||||
blobRetention: 5,
|
||||
colRetention: 5,
|
||||
testSlots: []primitives.Slot{1000, 1500, 1999, 2000, 2001},
|
||||
expectBlockAt: []bool{true, true, true, false, false},
|
||||
expectBlobAt: []bool{false, false, true, false, false},
|
||||
expectColAt: []bool{false, false, false, false, false},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
sn := SyncNeeds{
|
||||
current: func() primitives.Slot { return tc.current },
|
||||
deneb: denebSlot,
|
||||
fulu: fuluSlot,
|
||||
blockRetention: 100,
|
||||
blobRetention: tc.blobRetention,
|
||||
colRetention: tc.colRetention,
|
||||
}
|
||||
|
||||
cn := sn.Currently()
|
||||
|
||||
// Verify block.end == current
|
||||
require.Equal(t, tc.current, cn.Block.End, "block.end should equal current")
|
||||
|
||||
// Verify blob.end == fulu
|
||||
require.Equal(t, fuluSlot, cn.Blob.End, "blob.end should equal fulu")
|
||||
|
||||
// Verify col.end == current
|
||||
require.Equal(t, tc.current, cn.Col.End, "col.end should equal current")
|
||||
|
||||
// Test each slot
|
||||
for i, slot := range tc.testSlots {
|
||||
require.Equal(t, tc.expectBlockAt[i], cn.Block.At(slot),
|
||||
"block.at(%d) mismatch at index %d", slot, i)
|
||||
require.Equal(t, tc.expectBlobAt[i], cn.Blob.At(slot),
|
||||
"blob.at(%d) mismatch at index %d", slot, i)
|
||||
require.Equal(t, tc.expectColAt[i], cn.Col.At(slot),
|
||||
"col.at(%d) mismatch at index %d", slot, i)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -185,73 +186,162 @@ func (dcs *DataColumnStorage) WarmCache() {
|
||||
|
||||
highestStoredEpoch := primitives.Epoch(0)
|
||||
|
||||
// Walk the data column filesystem to warm up the cache.
|
||||
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
|
||||
if fileErr != nil {
|
||||
return fileErr
|
||||
}
|
||||
|
||||
// If not a leaf, skip.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract metadata from the file path.
|
||||
fileMetadata, err := extractFileMetadata(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while extracting file metadata")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open the data column filesystem file.
|
||||
f, err := dcs.fs.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while opening data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
defer func() {
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
closeErr := f.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the metadata of the file.
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the indices.
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
|
||||
|
||||
// Update the highest stored epoch.
|
||||
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
|
||||
|
||||
// Set the ident in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error encountered while walking data column filesystem.")
|
||||
// List all period directories
|
||||
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading top directory during warm cache")
|
||||
return
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem.
|
||||
// Iterate through periods
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
if !periodFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
periodPath := periodFileInfo.Name()
|
||||
|
||||
// List all epoch directories in this period
|
||||
epochFileInfos, err := afero.ReadDir(dcs.fs, periodPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate through epochs
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
if !epochFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
epochPath := path.Join(periodPath, epochFileInfo.Name())
|
||||
|
||||
// List all .sszs files in this epoch
|
||||
files, err := listEpochFiles(dcs.fs, epochPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process all files in this epoch in parallel
|
||||
epochHighest, err := dcs.processEpochFiles(files)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error processing epoch files during warm cache")
|
||||
}
|
||||
|
||||
highestStoredEpoch = max(highestStoredEpoch, epochHighest)
|
||||
}
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem
|
||||
dcs.prune()
|
||||
|
||||
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
|
||||
totalElapsed := time.Since(start)
|
||||
|
||||
// Log summary
|
||||
log.WithField("elapsed", totalElapsed).Info("Data column filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// listEpochFiles lists all .sszs files in an epoch directory.
|
||||
func listEpochFiles(fs afero.Fs, epochPath string) ([]string, error) {
|
||||
fileInfos, err := afero.ReadDir(fs, epochPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read epoch directory")
|
||||
}
|
||||
|
||||
files := make([]string, 0, len(fileInfos))
|
||||
for _, fileInfo := range fileInfos {
|
||||
if fileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := fileInfo.Name()
|
||||
if strings.HasSuffix(fileName, "."+dataColumnsFileExtension) {
|
||||
files = append(files, path.Join(epochPath, fileName))
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// processEpochFiles processes all .sszs files in an epoch directory in parallel.
|
||||
func (dcs *DataColumnStorage) processEpochFiles(files []string) (primitives.Epoch, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
for _, filePath := range files {
|
||||
eg.Go(func() error {
|
||||
epoch, err := dcs.processFile(filePath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("file", filePath).Error("Error processing file during warm cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return highestEpoch, err
|
||||
}
|
||||
|
||||
return highestEpoch, nil
|
||||
}
|
||||
|
||||
// processFile processes a single .sszs file.
|
||||
func (dcs *DataColumnStorage) processFile(filePath string) (primitives.Epoch, error) {
|
||||
// Extract metadata from the file path
|
||||
fileMetadata, err := extractFileMetadata(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "extract file metadata")
|
||||
}
|
||||
|
||||
// Open the file (each goroutine gets its own FD)
|
||||
f, err := dcs.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "open file")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := f.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during warm cache")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read metadata
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "read metadata")
|
||||
}
|
||||
|
||||
// Extract indices
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return fileMetadata.epoch, nil // No indices, skip
|
||||
}
|
||||
|
||||
// Build ident and set in cache (thread-safe)
|
||||
dataColumnsIdent := DataColumnsIdent{
|
||||
Root: fileMetadata.blockRoot,
|
||||
Epoch: fileMetadata.epoch,
|
||||
Indices: indices,
|
||||
}
|
||||
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
return 0, errors.Wrap(err, "cache set")
|
||||
}
|
||||
|
||||
return fileMetadata.epoch, nil
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary.
|
||||
@@ -270,7 +360,7 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
|
||||
// Check the number of columns is the one expected.
|
||||
// While implementing this, we expect the number of columns won't change.
|
||||
// If it does, we will need to create a new version of the data column sidecar file.
|
||||
if params.BeaconConfig().NumberOfColumns != mandatoryNumberOfColumns {
|
||||
if fieldparams.NumberOfColumns != mandatoryNumberOfColumns {
|
||||
return errWrongNumberOfColumns
|
||||
}
|
||||
|
||||
@@ -515,6 +605,11 @@ func (dcs *DataColumnStorage) Clear() error {
|
||||
|
||||
// prune clean the cache, the filesystem and mutexes.
|
||||
func (dcs *DataColumnStorage) prune() {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
dataColumnPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
highestStoredEpoch := dcs.cache.HighestEpoch()
|
||||
|
||||
// Check if we need to prune.
|
||||
@@ -622,6 +717,9 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
// Create the SSZ encoded data column sidecars.
|
||||
var sszEncodedDataColumnSidecars []byte
|
||||
|
||||
// Initialize the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount := uint8(0)
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
@@ -668,6 +766,9 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
// Increment the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount++
|
||||
|
||||
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
@@ -692,9 +793,12 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -808,10 +912,14 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -964,8 +1072,7 @@ func (si *storageIndices) set(dataColumnIndex uint64, position uint8) error {
|
||||
|
||||
// pullChan pulls data column sidecars from the input channel until it is empty.
|
||||
func pullChan(inputRoDataColumns chan []blocks.VerifiedRODataColumn) []blocks.VerifiedRODataColumn {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
dataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns)
|
||||
dataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, fieldparams.NumberOfColumns)
|
||||
|
||||
for {
|
||||
select {
|
||||
|
||||
@@ -117,8 +117,6 @@ func (sc *dataColumnStorageSummaryCache) HighestEpoch() primitives.Epoch {
|
||||
|
||||
// set updates the cache.
|
||||
func (sc *dataColumnStorageSummaryCache) set(dataColumnsIdent DataColumnsIdent) error {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
|
||||
@@ -127,7 +125,7 @@ func (sc *dataColumnStorageSummaryCache) set(dataColumnsIdent DataColumnsIdent)
|
||||
|
||||
count := uint64(0)
|
||||
for _, index := range dataColumnsIdent.Indices {
|
||||
if index >= numberOfColumns {
|
||||
if index >= fieldparams.NumberOfColumns {
|
||||
return errDataColumnIndexOutOfBounds
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
@@ -88,22 +87,6 @@ func TestWarmCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("wrong numbers of columns", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
[]util.DataColumnParam{{Index: 12}, {Index: 1_000_000}, {Index: 48}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errWrongNumberOfColumns)
|
||||
})
|
||||
|
||||
t.Run("one of the column index is too large", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
|
||||
@@ -36,16 +36,15 @@ var (
|
||||
})
|
||||
|
||||
// Data columns
|
||||
dataColumnBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
dataColumnSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_save_latency",
|
||||
Help: "Latency of DataColumnSidecar storage save operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
Buckets: []float64{10, 20, 30, 50, 100, 200, 500},
|
||||
})
|
||||
dataColumnFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_get_latency",
|
||||
Help: "Latency of DataColumnSidecar storage get operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
Buckets: []float64{3, 5, 7, 9, 11, 13},
|
||||
})
|
||||
dataColumnPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_pruned",
|
||||
@@ -59,4 +58,16 @@ var (
|
||||
Name: "data_column_disk_count",
|
||||
Help: "Approximate number of data columns in storage",
|
||||
})
|
||||
dataColumnFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_file_sync_latency",
|
||||
Help: "Latency of sync operations when saving data columns in milliseconds",
|
||||
})
|
||||
dataColumnBatchStoreCount = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_batch_store_count",
|
||||
Help: "Number of data columns stored in a batch",
|
||||
})
|
||||
dataColumnPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_prune_latency",
|
||||
Help: "Latency of data column prune operations in milliseconds",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -128,9 +128,9 @@ type NoHeadAccessDatabase interface {
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
UpdateEarliestAvailableSlot(ctx context.Context, earliestAvailableSlot primitives.Slot) error
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
|
||||
// P2P Metadata operations.
|
||||
SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error
|
||||
|
||||
@@ -146,9 +146,9 @@ func (s *Store) UpdateEarliestAvailableSlot(ctx context.Context, earliestAvailab
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database
|
||||
// only if `subscribed` is `true`.
|
||||
// It returns the previous subscription status.
|
||||
// UpdateSubscribedToAllDataSubnets updates whether the node is subscribed to all data subnets (supernode mode).
|
||||
// This is a one-way flag - once set to true, it cannot be reverted to false.
|
||||
// Returns the previous state.
|
||||
func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateSubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
@@ -156,13 +156,11 @@ func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed
|
||||
result := false
|
||||
if !subscribed {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the subscribe all data subnets flag.
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) == 0 {
|
||||
return nil
|
||||
@@ -181,7 +179,6 @@ func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
|
||||
@@ -67,6 +67,7 @@ func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
||||
return subscribed
|
||||
}
|
||||
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -274,6 +275,17 @@ func TestUpdateSubscribedToAllDataSubnets(t *testing.T) {
|
||||
require.Equal(t, false, stored)
|
||||
})
|
||||
|
||||
t.Run("initial update with empty database - set to true", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
@@ -288,7 +300,7 @@ func TestUpdateSubscribedToAllDataSubnets(t *testing.T) {
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
t.Run("update from true to true (no change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
_, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
|
||||
@@ -532,12 +532,19 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
|
||||
if len(result) != 0 {
|
||||
getBlobsV2Latency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -2683,7 +2683,7 @@ func createBlobServerV2(t *testing.T, numBlobs int, blobMasks []bool) *httptest.
|
||||
Blob: []byte("0xblob"),
|
||||
KzgProofs: []hexutil.Bytes{},
|
||||
}
|
||||
for j := 0; j < int(params.BeaconConfig().NumberOfColumns); j++ {
|
||||
for range fieldparams.NumberOfColumns {
|
||||
cellProof := make([]byte, 48)
|
||||
blobAndCellProofs[i].KzgProofs = append(blobAndCellProofs[i].KzgProofs, cellProof)
|
||||
}
|
||||
|
||||
@@ -27,6 +27,13 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
getBlobsV2Latency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_blobs_v2_latency_milliseconds",
|
||||
Help: "Captures RPC latency for getBlobsV2 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_parse_error_count",
|
||||
Help: "The number of errors that occurred while parsing execution payload",
|
||||
|
||||
@@ -240,7 +240,7 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
|
||||
if node.slot == epochStart {
|
||||
return true, nil
|
||||
}
|
||||
if !features.Get().DisableLastEpochTargets {
|
||||
if !features.Get().IgnoreUnviableAttestations {
|
||||
// Allow any node from the checkpoint epoch - 1 to be viable.
|
||||
nodeEpoch := slots.ToEpoch(node.slot)
|
||||
if nodeEpoch+1 == cp.Epoch {
|
||||
@@ -642,8 +642,12 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
|
||||
if !ok || node == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
if slots.ToEpoch(node.slot) >= epoch && node.parent != nil {
|
||||
node = node.parent
|
||||
if slots.ToEpoch(node.slot) >= epoch {
|
||||
if node.parent != nil {
|
||||
node = node.parent
|
||||
} else {
|
||||
return f.store.finalizedDependentRoot, nil
|
||||
}
|
||||
}
|
||||
return node.root, nil
|
||||
}
|
||||
|
||||
@@ -212,6 +212,9 @@ func (s *Store) prune(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save the new finalized dependent root because it will be pruned
|
||||
s.finalizedDependentRoot = finalizedNode.parent.root
|
||||
|
||||
// Prune nodeByRoot starting from root
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
|
||||
return err
|
||||
|
||||
@@ -465,6 +465,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
f := setup(1, 1)
|
||||
|
||||
// Insert a block in slot 32
|
||||
state, blk, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
@@ -475,6 +476,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dependent, [32]byte{})
|
||||
|
||||
// Insert a block in slot 33
|
||||
state, blk1, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'b'}, blk.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk1))
|
||||
@@ -488,7 +490,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dependent, [32]byte{})
|
||||
|
||||
// Insert a block for the next epoch (missed slot 0)
|
||||
// Insert a block for the next epoch (missed slot 0), slot 65
|
||||
|
||||
state, blk2, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'c'}, blk1.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -509,6 +511,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dependent, blk1.Root())
|
||||
|
||||
// Insert a block at slot 66
|
||||
state, blk3, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+2, [32]byte{'d'}, blk2.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk3))
|
||||
@@ -533,8 +536,11 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
|
||||
dependent, err = f.DependentRoot(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{}, dependent)
|
||||
dependent, err = f.DependentRoot(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk1.Root(), dependent)
|
||||
|
||||
// Insert a block for next epoch (slot 0 present)
|
||||
// Insert a block for the next epoch, slot 96 (descends from finalized at slot 33)
|
||||
state, blk4, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch, [32]byte{'e'}, blk1.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk4))
|
||||
@@ -551,6 +557,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dependent, blk1.Root())
|
||||
|
||||
// Insert a block at slot 97
|
||||
state, blk5, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'f'}, blk4.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk5))
|
||||
@@ -600,12 +607,16 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blk1.Root())
|
||||
|
||||
// Prune finalization
|
||||
// Prune finalization, finalize the block at slot 96
|
||||
s.finalizedCheckpoint.Root = blk4.Root()
|
||||
require.NoError(t, s.prune(ctx))
|
||||
target, err = f.TargetRootForEpoch(blk4.Root(), 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk4.Root(), target)
|
||||
// Dependent root for the finalized block should be the root of the pruned block at slot 33
|
||||
dependent, err = f.DependentRootForEpoch(blk4.Root(), 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk1.Root(), dependent)
|
||||
}
|
||||
|
||||
func TestStore_DependentRootForEpoch(t *testing.T) {
|
||||
|
||||
@@ -31,6 +31,7 @@ type Store struct {
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
|
||||
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
|
||||
95
beacon-chain/graffiti/graffiti-proposal-brief.md
Normal file
95
beacon-chain/graffiti/graffiti-proposal-brief.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# Graffiti Version Info Implementation
|
||||
|
||||
## Summary
|
||||
Add automatic EL+CL version info to block graffiti following [ethereum/execution-apis#517](https://github.com/ethereum/execution-apis/pull/517). Uses the [flexible standard](https://hackmd.io/@wmoBhF17RAOH2NZ5bNXJVg/BJX2c9gja) to pack client info into leftover space after user graffiti.
|
||||
|
||||
More details: https://github.com/ethereum/execution-apis/blob/main/src/engine/identification.md
|
||||
|
||||
## Implementation
|
||||
|
||||
### Core Component: GraffitiInfo Struct
|
||||
Thread-safe struct holding version information:
|
||||
```go
|
||||
const clCode = "PR"
|
||||
|
||||
type GraffitiInfo struct {
|
||||
mu sync.RWMutex
|
||||
userGraffiti string // From --graffiti flag (set once at startup)
|
||||
clCommit string // From version.GetCommitPrefix() helper function
|
||||
elCode string // From engine_getClientVersionV1
|
||||
elCommit string // From engine_getClientVersionV1
|
||||
}
|
||||
```
|
||||
|
||||
### Flow
|
||||
1. **Startup**: Parse flags, create GraffitiInfo with user graffiti and CL info.
|
||||
2. **Wiring**: Pass struct to both execution service and RPC validator server
|
||||
3. **Runtime**: Execution service goroutine periodically calls `engine_getClientVersionV1` and updates EL fields
|
||||
4. **Block Proposal**: RPC validator server calls `GenerateGraffiti()` to get formatted graffiti
|
||||
|
||||
### Flexible Graffiti Format
|
||||
Packs as much client info as space allows (after user graffiti):
|
||||
|
||||
| Available Space | Format | Example |
|
||||
|----------------|--------|---------|
|
||||
| ≥12 bytes | `EL(2)+commit(4)+CL(2)+commit(4)+user` | `GE168dPR63afBob` |
|
||||
| 8-11 bytes | `EL(2)+commit(2)+CL(2)+commit(2)+user` | `GE16PR63my node here` |
|
||||
| 4-7 bytes | `EL(2)+CL(2)+user` | `GEPRthis is my graffiti msg` |
|
||||
| 2-3 bytes | `EL(2)+user` | `GEalmost full graffiti message` |
|
||||
| <2 bytes | user only | `full 32 byte user graffiti here` |
|
||||
|
||||
```go
|
||||
func (g *GraffitiInfo) GenerateGraffiti() [32]byte {
|
||||
available := 32 - len(userGraffiti)
|
||||
|
||||
if elCode == "" {
|
||||
elCommit2 = elCommit4 = ""
|
||||
}
|
||||
|
||||
switch {
|
||||
case available >= 12:
|
||||
return elCode + elCommit4 + clCode + clCommit4 + userGraffiti
|
||||
case available >= 8:
|
||||
return elCode + elCommit2 + clCode + clCommit2 + userGraffiti
|
||||
case available >= 4:
|
||||
return elCode + clCode + userGraffiti
|
||||
case available >= 2:
|
||||
return elCode + userGraffiti
|
||||
default:
|
||||
return userGraffiti
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Update Logic
|
||||
Single testable function in execution service:
|
||||
```go
|
||||
func (s *Service) updateGraffitiInfo() {
|
||||
versions, err := s.GetClientVersion(ctx)
|
||||
if err != nil {
|
||||
return // Keep last good value
|
||||
}
|
||||
if len(versions) == 1 {
|
||||
s.graffitiInfo.UpdateFromEngine(versions[0].Code, versions[0].Commit)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Goroutine calls this on `slot % 8 == 4` timing (4 times per epoch, avoids slot boundaries).
|
||||
|
||||
### Files Changes Required
|
||||
|
||||
**New:**
|
||||
- `beacon-chain/execution/graffiti_info.go` - The struct and methods
|
||||
- `beacon-chain/execution/graffiti_info_test.go` - Unit tests
|
||||
- `runtime/version/version.go` - Add `GetCommitPrefix()` helper that extracts first 4 hex chars from the git commit injected via Bazel ldflags at build time
|
||||
|
||||
**Modified:**
|
||||
- `beacon-chain/execution/service.go` - Add goroutine + updateGraffitiInfo()
|
||||
- `beacon-chain/execution/engine_client.go` - Add GetClientVersion() method that does engine call
|
||||
- `beacon-chain/rpc/.../validator/proposer.go` - Call GenerateGraffiti()
|
||||
- `beacon-chain/node/node.go` - Wire GraffitiInfo to services
|
||||
|
||||
### Testing Strategy
|
||||
- Unit test GraffitiInfo methods (priority logic, thread safety)
|
||||
- Unit test updateGraffitiInfo() with mocked engine client
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
|
||||
@@ -116,7 +117,7 @@ type BeaconNode struct {
|
||||
GenesisProviders []genesis.Provider
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
@@ -129,6 +130,7 @@ type BeaconNode struct {
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
ConfigOptions []params.Option
|
||||
SyncNeedsWaiter func() (das.SyncNeeds, error)
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -193,7 +195,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
params.LogDigests(params.BeaconConfig())
|
||||
|
||||
synchronizer := startup.NewClockSynchronizer()
|
||||
beacon.clockWaiter = synchronizer
|
||||
beacon.ClockWaiter = synchronizer
|
||||
beacon.forkChoicer = doublylinkedtree.New()
|
||||
|
||||
depositAddress, err := execution.DepositContractAddress()
|
||||
@@ -233,12 +235,13 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
|
||||
beacon.lhsp = &verification.LazyHeadStateProvider{}
|
||||
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
|
||||
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen, beacon.lhsp)
|
||||
beacon.ClockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen, beacon.lhsp)
|
||||
|
||||
beacon.BackfillOpts = append(
|
||||
beacon.BackfillOpts,
|
||||
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
|
||||
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
|
||||
backfill.WithSyncNeedsWaiter(beacon.SyncNeedsWaiter),
|
||||
)
|
||||
|
||||
if err := registerServices(cliCtx, beacon, synchronizer, bfs); err != nil {
|
||||
@@ -664,7 +667,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
StateGen: b.stateGen,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -706,7 +709,7 @@ func (b *BeaconNode) registerSlashingPoolService() error {
|
||||
return err
|
||||
}
|
||||
|
||||
s := slashings.NewPoolService(b.ctx, b.slashingsPool, slashings.WithElectraTimer(b.clockWaiter, chainService.CurrentSlot))
|
||||
s := slashings.NewPoolService(b.ctx, b.slashingsPool, slashings.WithElectraTimer(b.ClockWaiter, chainService.CurrentSlot))
|
||||
return b.services.RegisterService(s)
|
||||
}
|
||||
|
||||
@@ -828,7 +831,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
regularsync.WithReconstructor(web3Service),
|
||||
regularsync.WithClockWaiter(b.clockWaiter),
|
||||
regularsync.WithClockWaiter(b.ClockWaiter),
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
@@ -859,7 +862,8 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
P2P: b.fetchP2P(),
|
||||
StateNotifier: b,
|
||||
BlockNotifier: b,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
SyncNeedsWaiter: b.SyncNeedsWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
@@ -890,7 +894,7 @@ func (b *BeaconNode) registerSlasherService() error {
|
||||
SlashingPoolInserter: b.slashingsPool,
|
||||
SyncChecker: syncService,
|
||||
HeadStateFetcher: chainService,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -983,7 +987,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
MaxMsgSize: maxMsgSize,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
@@ -1128,7 +1132,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.DataColumnStorage, b.ClockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
|
||||
@@ -4,11 +4,18 @@ import (
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// StatusProvider describes the minimum capability that Assigner needs from peer status tracking.
|
||||
// That is, the ability to retrieve the best peers by finalized checkpoint.
|
||||
type StatusProvider interface {
|
||||
BestFinalized(ourFinalized primitives.Epoch) (primitives.Epoch, []peer.ID)
|
||||
}
|
||||
|
||||
// FinalizedCheckpointer describes the minimum capability that Assigner needs from forkchoice.
|
||||
// That is, the ability to retrieve the latest finalized checkpoint to help with peer evaluation.
|
||||
type FinalizedCheckpointer interface {
|
||||
@@ -17,9 +24,9 @@ type FinalizedCheckpointer interface {
|
||||
|
||||
// NewAssigner assists in the correct construction of an Assigner by code in other packages,
|
||||
// assuring all the important private member fields are given values.
|
||||
// The FinalizedCheckpointer is used to retrieve the latest finalized checkpoint each time peers are requested.
|
||||
// The StatusProvider is used to retrieve best peers, and FinalizedCheckpointer is used to retrieve the latest finalized checkpoint each time peers are requested.
|
||||
// Peers that report an older finalized checkpoint are filtered out.
|
||||
func NewAssigner(s *Status, fc FinalizedCheckpointer) *Assigner {
|
||||
func NewAssigner(s StatusProvider, fc FinalizedCheckpointer) *Assigner {
|
||||
return &Assigner{
|
||||
ps: s,
|
||||
fc: fc,
|
||||
@@ -28,7 +35,7 @@ func NewAssigner(s *Status, fc FinalizedCheckpointer) *Assigner {
|
||||
|
||||
// Assigner uses the "BestFinalized" peer scoring method to pick the next-best peer to receive rpc requests.
|
||||
type Assigner struct {
|
||||
ps *Status
|
||||
ps StatusProvider
|
||||
fc FinalizedCheckpointer
|
||||
}
|
||||
|
||||
@@ -38,38 +45,42 @@ type Assigner struct {
|
||||
var ErrInsufficientSuitable = errors.New("no suitable peers")
|
||||
|
||||
func (a *Assigner) freshPeers() ([]peer.ID, error) {
|
||||
required := min(flags.Get().MinimumSyncPeers, params.BeaconConfig().MaxPeersToSync)
|
||||
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.fc.FinalizedCheckpoint().Epoch)
|
||||
required := min(flags.Get().MinimumSyncPeers, min(flags.Get().MinimumSyncPeers, params.BeaconConfig().MaxPeersToSync))
|
||||
_, peers := a.ps.BestFinalized(a.fc.FinalizedCheckpoint().Epoch)
|
||||
if len(peers) < required {
|
||||
log.WithFields(logrus.Fields{
|
||||
"suitable": len(peers),
|
||||
"required": required}).Warn("Unable to assign peer while suitable peers < required ")
|
||||
"required": required}).Trace("Unable to assign peer while suitable peers < required")
|
||||
return nil, ErrInsufficientSuitable
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// AssignmentFilter describes a function that takes a list of peer.IDs and returns a filtered subset.
|
||||
// An example is the NotBusy filter.
|
||||
type AssignmentFilter func([]peer.ID) []peer.ID
|
||||
|
||||
// Assign uses the "BestFinalized" method to select the best peers that agree on a canonical block
|
||||
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
|
||||
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
|
||||
// the number of outbound requests to each peer from a given component.
|
||||
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
func (a *Assigner) Assign(filter AssignmentFilter) ([]peer.ID, error) {
|
||||
best, err := a.freshPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pickBest(busy, n, best), nil
|
||||
return filter(best), nil
|
||||
}
|
||||
|
||||
func pickBest(busy map[peer.ID]bool, n int, best []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0, n)
|
||||
for _, p := range best {
|
||||
if len(ps) == n {
|
||||
return ps
|
||||
}
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
// NotBusy is a filter that returns the list of peer.IDs that are not in the `busy` map.
|
||||
func NotBusy(busy map[peer.ID]bool) AssignmentFilter {
|
||||
return func(peers []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0, len(peers))
|
||||
for _, p := range peers {
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
return ps
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
@@ -14,82 +16,68 @@ func TestPickBest(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
busy map[peer.ID]bool
|
||||
n int
|
||||
best []peer.ID
|
||||
expected []peer.ID
|
||||
}{
|
||||
{
|
||||
name: "",
|
||||
n: 0,
|
||||
name: "don't limit",
|
||||
expected: best,
|
||||
},
|
||||
{
|
||||
name: "none busy",
|
||||
n: 1,
|
||||
expected: best[0:1],
|
||||
expected: best,
|
||||
},
|
||||
{
|
||||
name: "all busy except last",
|
||||
n: 1,
|
||||
busy: testBusyMap(best[0 : len(best)-1]),
|
||||
expected: best[len(best)-1:],
|
||||
},
|
||||
{
|
||||
name: "all busy except i=5",
|
||||
n: 1,
|
||||
busy: testBusyMap(slices.Concat(best[0:5], best[6:])),
|
||||
expected: []peer.ID{best[5]},
|
||||
},
|
||||
{
|
||||
name: "all busy - 0 results",
|
||||
n: 1,
|
||||
busy: testBusyMap(best),
|
||||
},
|
||||
{
|
||||
name: "first half busy",
|
||||
n: 5,
|
||||
busy: testBusyMap(best[0:5]),
|
||||
expected: best[5:],
|
||||
},
|
||||
{
|
||||
name: "back half busy",
|
||||
n: 5,
|
||||
busy: testBusyMap(best[5:]),
|
||||
expected: best[0:5],
|
||||
},
|
||||
{
|
||||
name: "pick all ",
|
||||
n: 10,
|
||||
expected: best,
|
||||
},
|
||||
{
|
||||
name: "none available",
|
||||
n: 10,
|
||||
best: []peer.ID{},
|
||||
},
|
||||
{
|
||||
name: "not enough",
|
||||
n: 10,
|
||||
best: best[0:1],
|
||||
expected: best[0:1],
|
||||
},
|
||||
{
|
||||
name: "not enough, some busy",
|
||||
n: 10,
|
||||
best: best[0:6],
|
||||
busy: testBusyMap(best[0:5]),
|
||||
expected: best[5:6],
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
name := fmt.Sprintf("n=%d", c.n)
|
||||
if c.name != "" {
|
||||
name += " " + c.name
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
if c.best == nil {
|
||||
c.best = best
|
||||
}
|
||||
pb := pickBest(c.busy, c.n, c.best)
|
||||
filt := NotBusy(c.busy)
|
||||
pb := filt(c.best)
|
||||
require.Equal(t, len(c.expected), len(pb))
|
||||
for i := range c.expected {
|
||||
require.Equal(t, c.expected[i], pb[i])
|
||||
@@ -113,3 +101,310 @@ func testPeerIds(n int) []peer.ID {
|
||||
}
|
||||
return pids
|
||||
}
|
||||
|
||||
// MockStatus is a test mock for the Status interface used in Assigner.
|
||||
type MockStatus struct {
|
||||
bestFinalizedEpoch primitives.Epoch
|
||||
bestPeers []peer.ID
|
||||
}
|
||||
|
||||
func (m *MockStatus) BestFinalized(ourFinalized primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
return m.bestFinalizedEpoch, m.bestPeers
|
||||
}
|
||||
|
||||
// MockFinalizedCheckpointer is a test mock for FinalizedCheckpointer interface.
|
||||
type MockFinalizedCheckpointer struct {
|
||||
checkpoint *forkchoicetypes.Checkpoint
|
||||
}
|
||||
|
||||
func (m *MockFinalizedCheckpointer) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
return m.checkpoint
|
||||
}
|
||||
|
||||
// TestAssign_HappyPath tests the Assign method with sufficient peers and various filters.
|
||||
func TestAssign_HappyPath(t *testing.T) {
|
||||
peers := testPeerIds(10)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
bestPeers []peer.ID
|
||||
finalizedEpoch primitives.Epoch
|
||||
filter AssignmentFilter
|
||||
expectedCount int
|
||||
}{
|
||||
{
|
||||
name: "sufficient peers with identity filter",
|
||||
bestPeers: peers,
|
||||
finalizedEpoch: 10,
|
||||
filter: func(p []peer.ID) []peer.ID { return p },
|
||||
expectedCount: 10,
|
||||
},
|
||||
{
|
||||
name: "sufficient peers with NotBusy filter (no busy)",
|
||||
bestPeers: peers,
|
||||
finalizedEpoch: 10,
|
||||
filter: NotBusy(make(map[peer.ID]bool)),
|
||||
expectedCount: 10,
|
||||
},
|
||||
{
|
||||
name: "sufficient peers with NotBusy filter (some busy)",
|
||||
bestPeers: peers,
|
||||
finalizedEpoch: 10,
|
||||
filter: NotBusy(testBusyMap(peers[0:5])),
|
||||
expectedCount: 5,
|
||||
},
|
||||
{
|
||||
name: "minimum threshold exactly met",
|
||||
bestPeers: peers[0:5],
|
||||
finalizedEpoch: 10,
|
||||
filter: func(p []peer.ID) []peer.ID { return p },
|
||||
expectedCount: 5,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockStatus := &MockStatus{
|
||||
bestFinalizedEpoch: tc.finalizedEpoch,
|
||||
bestPeers: tc.bestPeers,
|
||||
}
|
||||
mockCheckpointer := &MockFinalizedCheckpointer{
|
||||
checkpoint: &forkchoicetypes.Checkpoint{Epoch: tc.finalizedEpoch},
|
||||
}
|
||||
assigner := NewAssigner(mockStatus, mockCheckpointer)
|
||||
|
||||
result, err := assigner.Assign(tc.filter)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedCount, len(result),
|
||||
fmt.Sprintf("expected %d peers, got %d", tc.expectedCount, len(result)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAssign_InsufficientPeers tests error handling when not enough suitable peers are available.
|
||||
// Note: The actual peer threshold depends on config values MaxPeersToSync and MinimumSyncPeers.
|
||||
func TestAssign_InsufficientPeers(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
bestPeers []peer.ID
|
||||
expectedErr error
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "exactly at minimum threshold",
|
||||
bestPeers: testPeerIds(5),
|
||||
expectedErr: nil,
|
||||
description: "5 peers should meet the minimum threshold",
|
||||
},
|
||||
{
|
||||
name: "well above minimum threshold",
|
||||
bestPeers: testPeerIds(50),
|
||||
expectedErr: nil,
|
||||
description: "50 peers should easily meet requirements",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockStatus := &MockStatus{
|
||||
bestFinalizedEpoch: 10,
|
||||
bestPeers: tc.bestPeers,
|
||||
}
|
||||
mockCheckpointer := &MockFinalizedCheckpointer{
|
||||
checkpoint: &forkchoicetypes.Checkpoint{Epoch: 10},
|
||||
}
|
||||
assigner := NewAssigner(mockStatus, mockCheckpointer)
|
||||
|
||||
result, err := assigner.Assign(NotBusy(make(map[peer.ID]bool)))
|
||||
|
||||
if tc.expectedErr != nil {
|
||||
require.NotNil(t, err, tc.description)
|
||||
require.Equal(t, tc.expectedErr, err)
|
||||
} else {
|
||||
require.NoError(t, err, tc.description)
|
||||
require.Equal(t, len(tc.bestPeers), len(result))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAssign_FilterApplication verifies that filters are correctly applied to peer lists.
|
||||
func TestAssign_FilterApplication(t *testing.T) {
|
||||
peers := testPeerIds(10)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
bestPeers []peer.ID
|
||||
filterToApply AssignmentFilter
|
||||
expectedCount int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "identity filter returns all peers",
|
||||
bestPeers: peers,
|
||||
filterToApply: func(p []peer.ID) []peer.ID { return p },
|
||||
expectedCount: 10,
|
||||
description: "identity filter should not change peer list",
|
||||
},
|
||||
{
|
||||
name: "filter removes all peers (all busy)",
|
||||
bestPeers: peers,
|
||||
filterToApply: NotBusy(testBusyMap(peers)),
|
||||
expectedCount: 0,
|
||||
description: "all peers busy should return empty list",
|
||||
},
|
||||
{
|
||||
name: "filter removes first 5 peers",
|
||||
bestPeers: peers,
|
||||
filterToApply: NotBusy(testBusyMap(peers[0:5])),
|
||||
expectedCount: 5,
|
||||
description: "should only return non-busy peers",
|
||||
},
|
||||
{
|
||||
name: "filter removes last 5 peers",
|
||||
bestPeers: peers,
|
||||
filterToApply: NotBusy(testBusyMap(peers[5:])),
|
||||
expectedCount: 5,
|
||||
description: "should only return non-busy peers from beginning",
|
||||
},
|
||||
{
|
||||
name: "custom filter selects every other peer",
|
||||
bestPeers: peers,
|
||||
filterToApply: func(p []peer.ID) []peer.ID {
|
||||
result := make([]peer.ID, 0)
|
||||
for i := 0; i < len(p); i += 2 {
|
||||
result = append(result, p[i])
|
||||
}
|
||||
return result
|
||||
},
|
||||
expectedCount: 5,
|
||||
description: "custom filter selecting every other peer",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockStatus := &MockStatus{
|
||||
bestFinalizedEpoch: 10,
|
||||
bestPeers: tc.bestPeers,
|
||||
}
|
||||
mockCheckpointer := &MockFinalizedCheckpointer{
|
||||
checkpoint: &forkchoicetypes.Checkpoint{Epoch: 10},
|
||||
}
|
||||
assigner := NewAssigner(mockStatus, mockCheckpointer)
|
||||
|
||||
result, err := assigner.Assign(tc.filterToApply)
|
||||
|
||||
require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
|
||||
require.Equal(t, tc.expectedCount, len(result),
|
||||
fmt.Sprintf("%s: expected %d peers, got %d", tc.description, tc.expectedCount, len(result)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAssign_FinalizedCheckpointUsage verifies that the finalized checkpoint is correctly used.
|
||||
func TestAssign_FinalizedCheckpointUsage(t *testing.T) {
|
||||
peers := testPeerIds(10)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
finalizedEpoch primitives.Epoch
|
||||
bestPeers []peer.ID
|
||||
expectedCount int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "epoch 0",
|
||||
finalizedEpoch: 0,
|
||||
bestPeers: peers,
|
||||
expectedCount: 10,
|
||||
description: "epoch 0 should work",
|
||||
},
|
||||
{
|
||||
name: "epoch 100",
|
||||
finalizedEpoch: 100,
|
||||
bestPeers: peers,
|
||||
expectedCount: 10,
|
||||
description: "high epoch number should work",
|
||||
},
|
||||
{
|
||||
name: "epoch changes between calls",
|
||||
finalizedEpoch: 50,
|
||||
bestPeers: testPeerIds(5),
|
||||
expectedCount: 5,
|
||||
description: "epoch value should be used in checkpoint",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockStatus := &MockStatus{
|
||||
bestFinalizedEpoch: tc.finalizedEpoch,
|
||||
bestPeers: tc.bestPeers,
|
||||
}
|
||||
mockCheckpointer := &MockFinalizedCheckpointer{
|
||||
checkpoint: &forkchoicetypes.Checkpoint{Epoch: tc.finalizedEpoch},
|
||||
}
|
||||
assigner := NewAssigner(mockStatus, mockCheckpointer)
|
||||
|
||||
result, err := assigner.Assign(NotBusy(make(map[peer.ID]bool)))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedCount, len(result),
|
||||
fmt.Sprintf("%s: expected %d peers, got %d", tc.description, tc.expectedCount, len(result)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAssign_EdgeCases tests boundary conditions and edge cases.
|
||||
func TestAssign_EdgeCases(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
bestPeers []peer.ID
|
||||
filter AssignmentFilter
|
||||
expectedCount int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "filter returns empty from sufficient peers",
|
||||
bestPeers: testPeerIds(10),
|
||||
filter: func(p []peer.ID) []peer.ID { return []peer.ID{} },
|
||||
expectedCount: 0,
|
||||
description: "filter can return empty list even if sufficient peers available",
|
||||
},
|
||||
{
|
||||
name: "filter selects subset from sufficient peers",
|
||||
bestPeers: testPeerIds(10),
|
||||
filter: func(p []peer.ID) []peer.ID { return p[0:2] },
|
||||
expectedCount: 2,
|
||||
description: "filter can return subset of available peers",
|
||||
},
|
||||
{
|
||||
name: "filter selects single peer from many",
|
||||
bestPeers: testPeerIds(20),
|
||||
filter: func(p []peer.ID) []peer.ID { return p[0:1] },
|
||||
expectedCount: 1,
|
||||
description: "filter can select single peer from many available",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockStatus := &MockStatus{
|
||||
bestFinalizedEpoch: 10,
|
||||
bestPeers: tc.bestPeers,
|
||||
}
|
||||
mockCheckpointer := &MockFinalizedCheckpointer{
|
||||
checkpoint: &forkchoicetypes.Checkpoint{Epoch: 10},
|
||||
}
|
||||
assigner := NewAssigner(mockStatus, mockCheckpointer)
|
||||
|
||||
result, err := assigner.Assign(tc.filter)
|
||||
|
||||
require.NoError(t, err, fmt.Sprintf("%s: unexpected error: %v", tc.description, err))
|
||||
require.Equal(t, tc.expectedCount, len(result),
|
||||
fmt.Sprintf("%s: expected %d peers, got %d", tc.description, tc.expectedCount, len(result)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -704,76 +704,54 @@ func (p *Status) deprecatedPrune() {
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than `ourFinalizedEpoch`
|
||||
// that is agreed upon by the majority of peers, and the peers agreeing on this finalized epoch.
|
||||
// This method may not return the absolute highest finalized epoch, but the finalized epoch in which
|
||||
// most peers can serve blocks (plurality voting). Ideally, all peers would be reporting the same
|
||||
// finalized epoch but some may be behind due to their own latency, or because of their finalized
|
||||
// epoch at the time we queried them.
|
||||
func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
// BestFinalized groups all peers by their last known finalized epoch
|
||||
// and selects the epoch of the largest group as best.
|
||||
// Any peer with a finalized epoch < ourFinalized is excluded from consideration.
|
||||
// In the event of a tie in largest group size, the higher epoch is the tie breaker.
|
||||
// The selected epoch is returned, along with a list of peers with a finalized epoch >= the selected epoch.
|
||||
func (p *Status) BestFinalized(ourFinalized primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
connected := p.Connected()
|
||||
pids := make([]peer.ID, 0, len(connected))
|
||||
views := make(map[peer.ID]*pb.StatusV2, len(connected))
|
||||
|
||||
// key: finalized epoch, value: number of peers that support this finalized epoch.
|
||||
finalizedEpochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: finalized epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
votes := make(map[primitives.Epoch]uint64)
|
||||
winner := primitives.Epoch(0)
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
|
||||
// Skip if the peer's finalized epoch is not defined, or if the peer's finalized epoch is
|
||||
// lower than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.FinalizedEpoch < ourFinalizedEpoch {
|
||||
view, err := p.ChainState(pid)
|
||||
if err != nil || view == nil || view.FinalizedEpoch < ourFinalized {
|
||||
continue
|
||||
}
|
||||
pids = append(pids, pid)
|
||||
views[pid] = view
|
||||
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which is the epoch most peers agree upon.
|
||||
// If there is a tie, select the highest epoch.
|
||||
targetEpoch, mostVotes := primitives.Epoch(0), uint64(0)
|
||||
for epoch, count := range finalizedEpochVotes {
|
||||
if count > mostVotes || (count == mostVotes && epoch > targetEpoch) {
|
||||
mostVotes = count
|
||||
targetEpoch = epoch
|
||||
votes[view.FinalizedEpoch]++
|
||||
if winner == 0 {
|
||||
winner = view.FinalizedEpoch
|
||||
continue
|
||||
}
|
||||
e, v := view.FinalizedEpoch, votes[view.FinalizedEpoch]
|
||||
if v > votes[winner] || v == votes[winner] && e > winner {
|
||||
winner = e
|
||||
}
|
||||
}
|
||||
|
||||
// Sort PIDs by finalized (epoch, head), in decreasing order.
|
||||
sort.Slice(potentialPIDs, func(i, j int) bool {
|
||||
if pidEpoch[potentialPIDs[i]] == pidEpoch[potentialPIDs[j]] {
|
||||
return pidHead[potentialPIDs[i]] > pidHead[potentialPIDs[j]]
|
||||
// Descending sort by (finalized, head).
|
||||
sort.Slice(pids, func(i, j int) bool {
|
||||
iv, jv := views[pids[i]], views[pids[j]]
|
||||
if iv.FinalizedEpoch == jv.FinalizedEpoch {
|
||||
return iv.HeadSlot > jv.HeadSlot
|
||||
}
|
||||
|
||||
return pidEpoch[potentialPIDs[i]] > pidEpoch[potentialPIDs[j]]
|
||||
return iv.FinalizedEpoch > jv.FinalizedEpoch
|
||||
})
|
||||
|
||||
// Trim potential peers to those on or after target epoch.
|
||||
for i, pid := range potentialPIDs {
|
||||
if pidEpoch[pid] < targetEpoch {
|
||||
potentialPIDs = potentialPIDs[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
// Find the first peer with finalized epoch < winner, trim and all following (lower) peers.
|
||||
trim := sort.Search(len(pids), func(i int) bool {
|
||||
return views[pids[i]].FinalizedEpoch < winner
|
||||
})
|
||||
pids = pids[:trim]
|
||||
|
||||
// Trim potential peers to at most maxPeers.
|
||||
if len(potentialPIDs) > maxPeers {
|
||||
potentialPIDs = potentialPIDs[:maxPeers]
|
||||
}
|
||||
|
||||
return targetEpoch, potentialPIDs
|
||||
return winner, pids
|
||||
}
|
||||
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
|
||||
@@ -654,9 +654,10 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
FinalizedRoot: mockroot2[:],
|
||||
})
|
||||
|
||||
target, pids := p.BestFinalized(maxPeers, 0)
|
||||
target, pids := p.BestFinalized(0)
|
||||
assert.Equal(t, expectedTarget, target, "Incorrect target epoch retrieved")
|
||||
assert.Equal(t, maxPeers, len(pids), "Incorrect number of peers retrieved")
|
||||
// addPeer called 5 times above
|
||||
assert.Equal(t, 5, len(pids), "Incorrect number of peers retrieved")
|
||||
|
||||
// Expect the returned list to be ordered by finalized epoch and trimmed to max peers.
|
||||
assert.Equal(t, pid3, pids[0], "Incorrect first peer")
|
||||
@@ -1017,7 +1018,10 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
HeadSlot: peerConfig.headSlot,
|
||||
})
|
||||
}
|
||||
epoch, pids := p.BestFinalized(tt.limitPeers, tt.ourFinalizedEpoch)
|
||||
epoch, pids := p.BestFinalized(tt.ourFinalizedEpoch)
|
||||
if len(pids) > tt.limitPeers {
|
||||
pids = pids[:tt.limitPeers]
|
||||
}
|
||||
assert.Equal(t, tt.targetEpoch, epoch, "Unexpected epoch retrieved")
|
||||
assert.Equal(t, tt.targetEpochSupport, len(pids), "Unexpected number of peers supporting retrieved epoch")
|
||||
})
|
||||
@@ -1044,7 +1048,10 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
_, pids := p.BestFinalized(maxPeers, 0)
|
||||
_, pids := p.BestFinalized(0)
|
||||
if len(pids) > maxPeers {
|
||||
pids = pids[:maxPeers]
|
||||
}
|
||||
assert.Equal(t, maxPeers, len(pids), "Wrong number of peers returned")
|
||||
}
|
||||
|
||||
|
||||
@@ -204,6 +204,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our light client finality update map.
|
||||
@@ -223,5 +226,8 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func (e *endpoint) handlerWithMiddleware() http.HandlerFunc {
|
||||
handler.ServeHTTP(rw, r)
|
||||
|
||||
if rw.statusCode >= 400 {
|
||||
httpErrorCount.WithLabelValues(r.URL.Path, http.StatusText(rw.statusCode), r.Method).Inc()
|
||||
httpErrorCount.WithLabelValues(e.name, http.StatusText(rw.statusCode), r.Method).Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
|
||||
@@ -957,6 +958,13 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
}
|
||||
}
|
||||
}
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash block")
|
||||
}
|
||||
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not verify block signature")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
@@ -993,10 +1001,11 @@ func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
|
||||
}
|
||||
|
||||
func (s *Server) validateBlobs(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
commitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments")
|
||||
|
||||
@@ -130,6 +130,10 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
@@ -238,22 +242,14 @@ func (s *Server) handleAttestationsElectra(
|
||||
},
|
||||
})
|
||||
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
|
||||
}
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
// Broadcast first using CommitteeId directly (fast path)
|
||||
// This matches gRPC behavior and avoids blocking on state fetching
|
||||
wantedEpoch := slots.ToEpoch(singleAtt.Data.Slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get head validator indices")
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), singleAtt.CommitteeId, singleAtt.Data.Slot)
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, singleAtt); err != nil {
|
||||
failedBroadcasts = append(failedBroadcasts, &server.IndexedError{
|
||||
Index: i,
|
||||
@@ -264,17 +260,35 @@ func (s *Server) handleAttestationsElectra(
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
// Save to pool after broadcast (slow path - requires state fetching)
|
||||
// Run in goroutine to avoid blocking the HTTP response
|
||||
go func() {
|
||||
for _, singleAtt := range validAttestations {
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(context.Background(), singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get target state for attestation")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(context.Background(), targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get committee for attestation")
|
||||
continue
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if len(failedBroadcasts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -470,6 +484,10 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitPoolSyncCommitteeSignatures")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
var req structs.SubmitSyncCommitteeSignaturesRequest
|
||||
err := json.NewDecoder(r.Body).Decode(&req.Data)
|
||||
switch {
|
||||
@@ -711,6 +729,7 @@ func (s *Server) SubmitAttesterSlashingsV2(w http.ResponseWriter, r *http.Reques
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
v, err := version.FromString(versionHeader)
|
||||
if err != nil {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -622,6 +623,8 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
AttestationStateFetcher: chainService,
|
||||
}
|
||||
@@ -654,6 +657,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -673,6 +677,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("phase0 att post electra", func(t *testing.T) {
|
||||
@@ -793,6 +798,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -812,6 +818,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
@@ -861,6 +868,27 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
|
||||
})
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleAtt)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListVoluntaryExits(t *testing.T) {
|
||||
@@ -1057,14 +1085,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
|
||||
t.Run("single", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1089,14 +1122,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1120,13 +1158,18 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1149,7 +1192,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[]")
|
||||
@@ -1166,7 +1215,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -1179,6 +1234,26 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleSyncCommitteeMsg)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitSyncCommitteeSignatures(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBLSToExecutionChanges(t *testing.T) {
|
||||
@@ -2112,6 +2187,33 @@ func TestSubmitAttesterSlashingsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Invalid attester slashing", e.Message)
|
||||
})
|
||||
|
||||
t.Run("missing-version-header", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString(invalidAttesterSlashing)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
|
||||
// Intentionally do not set api.VersionHeader to verify missing header handling.
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, api.VersionHeader+" header is required", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
|
||||
@@ -654,6 +654,10 @@ func (m *futureSyncMockFetcher) StateBySlot(context.Context, primitives.Slot) (s
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func (m *futureSyncMockFetcher) StateByEpoch(context.Context, primitives.Epoch) (state.BeaconState, error) {
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func TestGetSyncCommittees_Future(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -3756,6 +3757,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = fs
|
||||
|
||||
@@ -3783,14 +3785,13 @@ func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate cell proofs for the blobs (flattened format like execution client)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
|
||||
for blobIdx := range blobCount {
|
||||
_, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlobs[blobIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
for colIdx := range numberOfColumns {
|
||||
cellProofIdx := uint64(blobIdx)*numberOfColumns + colIdx
|
||||
cellProofIdx := blobIdx*numberOfColumns + colIdx
|
||||
cellProofs[cellProofIdx] = proofs[colIdx][:]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,6 +40,7 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: data,
|
||||
})
|
||||
return
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
|
||||
@@ -155,20 +155,19 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MaxAttesterSlashingsElectra = 88
|
||||
config.MaxAttestationsElectra = 89
|
||||
config.MaxWithdrawalRequestsPerPayload = 90
|
||||
config.MaxCellsInExtendedMatrix = 91
|
||||
config.UnsetDepositRequestsStartIndex = 92
|
||||
config.MaxDepositRequestsPerPayload = 93
|
||||
config.MaxPendingDepositsPerEpoch = 94
|
||||
config.MaxBlobCommitmentsPerBlock = 95
|
||||
config.MaxBytesPerTransaction = 96
|
||||
config.MaxExtraDataBytes = 97
|
||||
config.BytesPerLogsBloom = 98
|
||||
config.MaxTransactionsPerPayload = 99
|
||||
config.FieldElementsPerBlob = 100
|
||||
config.KzgCommitmentInclusionProofDepth = 101
|
||||
config.BlobsidecarSubnetCount = 102
|
||||
config.BlobsidecarSubnetCountElectra = 103
|
||||
config.SyncMessageDueBPS = 104
|
||||
config.UnsetDepositRequestsStartIndex = 91
|
||||
config.MaxDepositRequestsPerPayload = 92
|
||||
config.MaxPendingDepositsPerEpoch = 93
|
||||
config.MaxBlobCommitmentsPerBlock = 94
|
||||
config.MaxBytesPerTransaction = 95
|
||||
config.MaxExtraDataBytes = 96
|
||||
config.BytesPerLogsBloom = 97
|
||||
config.MaxTransactionsPerPayload = 98
|
||||
config.FieldElementsPerBlob = 99
|
||||
config.KzgCommitmentInclusionProofDepth = 100
|
||||
config.BlobsidecarSubnetCount = 101
|
||||
config.BlobsidecarSubnetCountElectra = 102
|
||||
config.SyncMessageDueBPS = 103
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -206,7 +205,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 176, len(data))
|
||||
assert.Equal(t, 175, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -500,8 +499,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "1024", v)
|
||||
case "MAX_REQUEST_BLOCKS_DENEB":
|
||||
assert.Equal(t, "128", v)
|
||||
case "NUMBER_OF_COLUMNS":
|
||||
assert.Equal(t, "128", v)
|
||||
case "MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA":
|
||||
assert.Equal(t, "128000000000", v)
|
||||
case "MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT":
|
||||
@@ -538,14 +535,12 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "89", v)
|
||||
case "MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD":
|
||||
assert.Equal(t, "90", v)
|
||||
case "MAX_CELLS_IN_EXTENDED_MATRIX":
|
||||
assert.Equal(t, "91", v)
|
||||
case "UNSET_DEPOSIT_REQUESTS_START_INDEX":
|
||||
assert.Equal(t, "92", v)
|
||||
assert.Equal(t, "91", v)
|
||||
case "MAX_DEPOSIT_REQUESTS_PER_PAYLOAD":
|
||||
assert.Equal(t, "93", v)
|
||||
assert.Equal(t, "92", v)
|
||||
case "MAX_PENDING_DEPOSITS_PER_EPOCH":
|
||||
assert.Equal(t, "94", v)
|
||||
assert.Equal(t, "93", v)
|
||||
case "MAX_BLOBS_PER_BLOCK_ELECTRA":
|
||||
assert.Equal(t, "9", v)
|
||||
case "MAX_REQUEST_BLOB_SIDECARS_ELECTRA":
|
||||
@@ -563,25 +558,25 @@ func TestGetSpec(t *testing.T) {
|
||||
case "MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS":
|
||||
assert.Equal(t, "4096", v)
|
||||
case "MAX_BLOB_COMMITMENTS_PER_BLOCK":
|
||||
assert.Equal(t, "95", v)
|
||||
assert.Equal(t, "94", v)
|
||||
case "MAX_BYTES_PER_TRANSACTION":
|
||||
assert.Equal(t, "96", v)
|
||||
assert.Equal(t, "95", v)
|
||||
case "MAX_EXTRA_DATA_BYTES":
|
||||
assert.Equal(t, "97", v)
|
||||
assert.Equal(t, "96", v)
|
||||
case "BYTES_PER_LOGS_BLOOM":
|
||||
assert.Equal(t, "98", v)
|
||||
assert.Equal(t, "97", v)
|
||||
case "MAX_TRANSACTIONS_PER_PAYLOAD":
|
||||
assert.Equal(t, "99", v)
|
||||
assert.Equal(t, "98", v)
|
||||
case "FIELD_ELEMENTS_PER_BLOB":
|
||||
assert.Equal(t, "100", v)
|
||||
assert.Equal(t, "99", v)
|
||||
case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "101", v)
|
||||
assert.Equal(t, "100", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT":
|
||||
assert.Equal(t, "102", v)
|
||||
assert.Equal(t, "101", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
|
||||
assert.Equal(t, "103", v)
|
||||
assert.Equal(t, "102", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "104", v)
|
||||
assert.Equal(t, "103", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
blobSchedule, ok := v.([]any)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/shared"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -308,7 +309,7 @@ func (s *Server) DataColumnSidecars(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// parseDataColumnIndices filters out invalid and duplicate data column indices
|
||||
func parseDataColumnIndices(url *url.URL) ([]int, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
rawIndices := url.Query()["indices"]
|
||||
indices := make([]int, 0, numberOfColumns)
|
||||
invalidIndices := make([]string, 0)
|
||||
|
||||
@@ -709,15 +709,6 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestParseDataColumnIndices(t *testing.T) {
|
||||
// Save the original config
|
||||
originalConfig := params.BeaconConfig()
|
||||
defer func() { params.OverrideBeaconConfig(originalConfig) }()
|
||||
|
||||
// Set NumberOfColumns to 128 for testing
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams map[string][]string
|
||||
|
||||
@@ -116,6 +116,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateSlot := update.AttestedHeader().Beacon().Slot
|
||||
@@ -131,12 +132,15 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
chunkLength = ssz.MarshalUint64(chunkLength, uint64(len(updateSSZ)+4))
|
||||
if _, err := w.Write(chunkLength); err != nil {
|
||||
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
|
||||
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateSSZ); err != nil {
|
||||
httputil.HandleError(w, "Could not write update SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -145,6 +149,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateJson, err := structs.LightClientUpdateFromConsensus(update)
|
||||
|
||||
@@ -132,6 +132,7 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if s.SyncChecker.Synced() && !optimistic {
|
||||
return
|
||||
|
||||
@@ -228,7 +228,7 @@ func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.
|
||||
}
|
||||
st, err := s.Stater.StateBySlot(r.Context(), nextEpochEnd)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get state for epoch's starting slot: "+err.Error(), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return nil, false
|
||||
}
|
||||
return st, true
|
||||
|
||||
@@ -19,7 +19,6 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
@@ -78,6 +77,7 @@ go_test(
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/rewards/testing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user