Compare commits

...

291 Commits

Author SHA1 Message Date
satushh
4339822dbe Resolve more conflics 2025-08-15 09:40:12 +05:30
satushh
b40719a19c Merge branch 'peerdas-sync' into peerdas-getBlobsV2-sync 2025-08-15 09:39:56 +05:30
Manu NALEPA
22a6820ede Add extra flags 2025-08-15 01:48:16 +02:00
Manu NALEPA
00ab5a1051 Add DataColumnStorage and SubscribeAllDataSubnets flag. 2025-08-15 00:25:34 +02:00
Manu NALEPA
41cf0ec59e Fix James' comment. 2025-08-14 17:04:03 +02:00
Manu NALEPA
39ca2eaae1 Merge branch 'develop' into peerdas-sync 2025-08-14 14:27:28 +02:00
Manu NALEPA
d5c56355f8 Fix Potuz's comment. 2025-08-14 14:03:08 +02:00
Manu NALEPA
5a3e450067 Fix Potuz's comment. 2025-08-14 14:01:20 +02:00
Manu NALEPA
e50472ab25 Fix typo. 2025-08-14 09:34:31 +02:00
Manu NALEPA
53d69d407b selectPeers: Avoid map with key but empty value. 2025-08-14 09:28:11 +02:00
Manu NALEPA
58687620f6 Fix James' comment. 2025-08-13 22:30:33 +02:00
satushh
f8716d8f77 blockchain: get variable samplesPerSlot only when required 2025-08-13 20:15:44 +05:30
satushh
58795d5ce3 execution: use service context instead of function's for retry 2025-08-13 20:15:18 +05:30
Manu NALEPA
d36875332d Fix Preston's comment. 2025-08-13 16:00:13 +02:00
Manu NALEPA
0a9ff2dc8b Fix James' comment. 2025-08-13 15:04:51 +02:00
Manu NALEPA
cc8a5fc422 Revert "Fix James' comment."
This reverts commit a3f919205a.
2025-08-13 15:03:15 +02:00
Manu NALEPA
9dcb8be2df Fix Potuz's comment. 2025-08-13 15:01:31 +02:00
Manu NALEPA
51e465d690 Revert "Fix Potuz's comment."
This reverts commit c45230b455.
2025-08-13 14:15:27 +02:00
Manu NALEPA
ddbf9cb404 Implement TestFetchDataColumnSidecars. 2025-08-13 12:00:50 +02:00
Manu NALEPA
9ed1496c8f Fix James's comment. 2025-08-13 09:14:25 +02:00
Manu NALEPA
5b04cab118 Fix Preston's comment. 2025-08-13 01:11:22 +02:00
Manu NALEPA
0b681f6861 Update cmd/beacon-chain/flags/config.go
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-08-13 01:00:19 +02:00
Manu NALEPA
88b363e3cf Merge branch 'develop' into peerdas-sync 2025-08-13 00:45:31 +02:00
Manu NALEPA
f521948b4d Fix flakiness in TestSelectPeers. 2025-08-12 23:14:56 +02:00
Manu NALEPA
a3f919205a Fix James' comment. 2025-08-12 22:44:26 +02:00
Manu NALEPA
474f458834 Fix James' comment. 2025-08-12 22:35:48 +02:00
Manu NALEPA
bd17dfefb9 Fix James' commit. 2025-08-12 22:17:23 +02:00
Manu NALEPA
3f361a79a0 Fix James's comment. 2025-08-12 22:10:33 +02:00
Manu NALEPA
09b7fa6bc9 Fix James' comment. 2025-08-12 22:08:33 +02:00
Manu NALEPA
76d974694c Implement TestCategorizeIndices. 2025-08-12 15:47:29 +02:00
Manu NALEPA
dca7f282e6 Implement TestSelectPeers. 2025-08-12 15:11:33 +02:00
Manu NALEPA
327309b5f7 Implement TestUpdateResults. 2025-08-12 13:45:45 +02:00
Manu NALEPA
8c44999d21 Implement TestFetchDataColumnSidecarsFromPeers. 2025-08-12 12:23:20 +02:00
Manu NALEPA
bb2fd617a5 Implement TestSendDataColumnSidecarsRequest. 2025-08-12 11:59:30 +02:00
Manu NALEPA
c31c1e2674 Fix Satyajit's comment. 2025-08-12 10:09:39 +02:00
Manu NALEPA
978ffa4780 Add tests for sendDataColumnSidecarsRequest. 2025-08-11 00:35:40 +02:00
Manu NALEPA
407bf6785f Fix Potuz's comment. 2025-08-10 23:07:56 +02:00
Manu NALEPA
c45230b455 Fix Potuz's comment. 2025-08-10 22:47:00 +02:00
Manu NALEPA
0af6591001 Fix Potuz's comment. 2025-08-10 22:41:00 +02:00
Manu NALEPA
1af249da31 Fix Potuz's comment. 2025-08-10 22:02:22 +02:00
Manu NALEPA
901f6b6e6c Fix Potuz's comment. 2025-08-10 21:59:17 +02:00
Manu NALEPA
a3cdda56d9 Fix Potuz's comment. 2025-08-10 21:53:54 +02:00
Manu NALEPA
cf3200fa06 Fix Potuz's comment. 2025-08-10 21:47:55 +02:00
Manu NALEPA
04cafa1959 Partially fix Potuz's comment. 2025-08-10 21:39:27 +02:00
Manu NALEPA
498b945a61 Fix Satyajit's comment. 2025-08-10 21:35:39 +02:00
Manu NALEPA
90317ba5b5 Fix Potuz's comment. 2025-08-09 01:24:00 +02:00
Manu NALEPA
b35358f440 Fix Potuz's comment. 2025-08-09 00:47:55 +02:00
Manu NALEPA
b926066495 Fix Potuz's comment. 2025-08-08 20:59:28 +02:00
Manu NALEPA
263ddf9a7b PeerDAS: Implement sync 2025-08-08 20:05:12 +02:00
satushh
c558798fe8 execution: edge case - delete activeRetries on success 2025-08-08 16:13:31 +05:30
satushh
ba1699fdee sync: new appropriate mock service 2025-08-08 11:24:32 +05:30
satushh
adf62a6b45 sync: fix lint, test and add extra test for when data is actually not available 2025-08-07 12:47:15 +05:30
satushh
9e5b3fb599 lint: formatting and remove confusing comment 2025-08-07 11:43:11 +05:30
satushh
eaf4b4f9bf blockchain: cleaner DA check 2025-08-07 11:35:50 +05:30
satushh
0b0b7ff0a9 execution: make reconstructSingleflight part of the service struct 2025-08-06 21:29:31 +05:30
satushh
f1be39f7f1 blockchain: move IsDataAvailable interface to blockchain package 2025-08-06 21:11:28 +05:30
satushh
3815ff4c28 sync: don't call ReconstructDataColumnSidecars if not required 2025-08-06 20:47:21 +05:30
satushh
76a0759e13 execution: ensure single responsibility, execution should not do DA check 2025-08-06 18:13:05 +05:30
satushh
5cd2d99606 execution: ensure the retry actually happens when it needs to 2025-08-06 11:34:22 +05:30
satushh
1a2a0688e1 lint: format 2025-08-05 20:59:04 +05:30
satushh
6d0524dcf5 execution: retry logic inside ReconstructDataColumnSidecars itself 2025-08-05 20:45:54 +05:30
satushh
8ec9da81c0 lint: lint and use unused metrics 2025-08-04 18:15:16 +05:30
satushh
facb70e12c lint: formatting 2025-08-04 17:58:33 +05:30
satushh
3d91b35f4e blockchain: fix CustodyGroupCount return 2025-08-04 15:33:10 +05:30
satushh
dc70dae9d0 Merge branch 'peerDAS' into peerDAS-getBlobsV2 2025-08-04 14:43:40 +05:30
satushh
9e2c04400c bazel: bazel run //:gazelle -- fix 2025-08-04 12:58:46 +05:30
satushh
60058266e8 sync: remove unwanted tests 2025-08-04 12:56:46 +05:30
satushh
291c4ac9b5 da: updated IsDataAvailable 2025-08-04 12:00:07 +05:30
satushh
045776ff75 execution: retry atomicity test 2025-08-04 11:26:05 +05:30
satushh
0a386cbdfd execution: fix test 2025-08-04 11:09:36 +05:30
satushh
4f02e44446 sync: remove unwanted checks 2025-08-04 00:53:23 +05:30
satushh
41600b67e3 da: non blocking checks 2025-08-04 00:51:32 +05:30
satushh
cec236ff7d exec: hardcode retry interval 2025-08-03 23:25:17 +05:30
satushh
62dac40734 sync: no goroutine, getblobsv2 in absence of block as well, wrap error 2025-08-03 23:08:16 +05:30
Manu NALEPA
d3763d56cf Clean 2025-08-02 10:02:28 +02:00
Manu NALEPA
461fa50c34 Merge branch 'develop' into peerDAS 2025-08-02 09:25:43 +02:00
satushh
7b059560f6 engine: remove isDataAlreadyAvailable function 2025-08-01 18:47:41 +05:30
satushh
111e5c462f reconstruct: simplify multi goroutine case and avoid race condition 2025-08-01 18:03:01 +05:30
Manu NALEPA
6d4e1d5f7a Merge branch 'develop' into peerDAS 2025-07-31 15:53:56 +02:00
Manu NALEPA
415622ec49 Merge branch 'develop' into peerDAS 2025-07-31 14:42:39 +02:00
Manu NALEPA
df65458834 refactor 2025-07-31 14:42:17 +02:00
Manu NALEPA
2005d5c6f2 step 2: Reconstruct if needed. 2025-07-31 14:42:17 +02:00
Manu NALEPA
7d72fbebe7 step 1: Retrieve from DB. 2025-07-31 14:42:17 +02:00
satushh
43c111bca2 reconstruct: load once, correctly deliver the result to all waiting goroutines 2025-07-31 15:30:14 +05:30
Manu NALEPA
685761666d Merge branch 'develop' into peerDAS 2025-07-28 20:31:48 +02:00
satushh
41c2f1d802 Merge branch 'peerDAS' into peerDAS-getBlobsV2 2025-07-25 17:27:22 +01:00
Manu NALEPA
a75974b5f5 Fix TestCreateLocalNode. 2025-07-25 16:17:28 +02:00
Manu NALEPA
0725dff5e8 Merge branch 'develop' into peerDAS 2025-07-25 13:26:58 +02:00
Manu NALEPA
0d95d3d022 Validator custody: Update earliest available slot. (#15527) 2025-07-25 13:20:54 +02:00
satushh
384270f9a7 beacon: default retry interval 2025-07-24 16:03:11 +01:00
satushh
8e9d3f5f4f lint: remove unused field 2025-07-24 13:29:42 +01:00
satushh
d6d542889c sidecar: recover function and different context for retrying 2025-07-24 11:09:23 +01:00
satushh
f8e6b9d1a8 config: make retry interval configurable 2025-07-24 10:16:07 +01:00
satushh
8f25d1e986 lint: return error when it is not nil 2025-07-23 20:09:46 +01:00
satushh
81e9fda34b lint: fmt and log capitalisation 2025-07-23 10:54:49 +01:00
Manu NALEPA
ede560bee1 Merge branch 'develop' into peerDAS 2025-07-23 11:07:19 +02:00
Manu NALEPA
34a1bf835a Merge branch 'develop' into peerDAS 2025-07-22 17:42:04 +02:00
Manu NALEPA
b0bceac9c0 Implement validator custody with "go up only" according to the latest specification. (#15518)
* Simplify validator custody due to the latest spec.
(Go up only)

* Fix sync.
2025-07-22 17:41:15 +02:00
satushh
0ff2d2fa21 test: engine client and sync package, metrics 2025-07-22 10:40:28 +01:00
satushh
8477a84454 getBlobsV2: retry if reconstruction isnt successful 2025-07-21 19:11:31 +01:00
Manu NALEPA
e95d1c54cf reconstructSaveBroadcastDataColumnSidecars: Ensure a unique reconstruction. 2025-07-18 23:48:11 +02:00
Manu NALEPA
4af3763013 Merge branch 'develop' into peerDAS 2025-07-18 22:39:57 +02:00
Manu NALEPA
a520db7276 Merge branch 'develop' into peerDAS 2025-07-17 10:04:04 +02:00
terence
f8abf0565f Add bundle v2 support for submit blind block (#15198) 2025-07-16 08:19:07 -07:00
Manu NALEPA
11a6af9bf9 /eth/v1/node/identity: Add syncnets and custody_group_count. 2025-07-16 16:26:39 +02:00
Manu NALEPA
6f8a654874 Revert "Fixes server ignores request to gzip data (#14982)"
This reverts commit 4e5bfa9760.
2025-07-16 16:18:11 +02:00
Manu NALEPA
f0c01fdb4b Merge branch 'develop' into peerDAS-do-not-merge 2025-07-16 12:29:52 +02:00
Manu NALEPA
a015ae6a29 Merge branch 'develop' into peerDAS 2025-07-16 09:23:37 +02:00
Manu NALEPA
457aa117f3 Merge branch 'develop' into peerDAS 2025-07-11 09:38:37 +02:00
Manu NALEPA
d302b494df Execution reconstruction: Rename variables and logs. 2025-07-10 14:30:26 +02:00
Manu NALEPA
b3db1b6b74 Flags: Remove unused flag EnablePeerDAS 2025-07-10 13:56:53 +02:00
Manu NALEPA
66e4d5e816 Merge branch 'develop' into peerDAS 2025-07-04 01:34:12 +02:00
Manu NALEPA
41f109aa5b blocker_test.go: Remove unused functions. 2025-07-03 16:00:51 +02:00
Manu NALEPA
cfd4ceb4dd Merge branch 'develop' into peerDAS 2025-07-03 13:20:26 +02:00
Manu NALEPA
df211c3384 Merge branch 'develop' into peerDAS 2025-07-01 13:07:40 +02:00
Manu NALEPA
89e78d7da3 Remove peerSampling.
https://github.com/ethereum/consensus-specs/pull/4393#event-18356965177
2025-06-27 21:37:42 +02:00
Manu NALEPA
e76ea84596 Merge branch 'develop' into peerDAS 2025-06-26 15:03:22 +02:00
Manu NALEPA
f10d6e8e16 Merge branch 'develop' into peerDAS 2025-06-26 15:02:46 +02:00
Manu NALEPA
91eb43b595 Merge branch 'develop' into peerDAS 2025-06-24 23:53:09 +02:00
Manu NALEPA
90710ec57d Advertise correct cgc number starting at Altair. 2025-06-24 17:21:29 +02:00
Manu NALEPA
3dc65f991e Merge branch 'peerdas-send-data-columns-requests' into peerDAS 2025-06-24 10:51:32 +02:00
Manu NALEPA
4d9789401b Implement SendDataColumnSidecarsByRangeRequest and SendDataColumnSidecarsByRootRequest. 2025-06-24 01:06:42 +02:00
Manu NALEPA
f72d59b004 disconnectFromPeerOnError: Add peer agent in logs. 2025-06-23 13:02:13 +02:00
Manu NALEPA
e25497be3e Merge branch 'develop' into peerDAS 2025-06-20 20:04:27 +02:00
Manu NALEPA
8897a26f84 Merge branch 'develop' into peerDAS 2025-06-19 14:57:16 +02:00
Manu NALEPA
b2a26f2b62 earliest_available_slot implementation (networking only). 2025-06-19 13:52:47 +02:00
Manu NALEPA
09659010f8 Merge branch 'develop' into peerDAS 2025-06-19 12:01:45 +02:00
Manu NALEPA
589042df20 CreateTestVerifiedRoDataColumnSidecars: Use consistent block root. 2025-06-12 01:03:56 +02:00
terence tsao
312b93e9b1 Fix reconstruction matrix 2025-06-11 15:04:42 -07:00
Ekaterina Riazantseva
f86f76e447 Add PeerDAS reconstruction metrics (#14807)
* Add reconstruction metrics

* Fix time

* Fix format

* Fix format

* Update cells count function

* fix cells count

* Update reconstruction counter

* Fix peerDAS reconstruction counter metric

* Replace dataColumnSidecars with dataColumnSideCars
2025-06-11 19:03:31 +02:00
terence
c311e652eb Set subscribe all data subnets once (#15388) 2025-06-08 17:23:47 +02:00
Manu NALEPA
6a5d78a331 Merge branch 'develop' into peerDAS 2025-06-06 16:01:29 +02:00
Manu NALEPA
a2fd30497e Merge branch 'develop' into peerDAS 2025-06-06 12:46:48 +02:00
Manu NALEPA
a94561f8dc Merge branch 'develop' into peerDAS 2025-06-06 09:56:04 +02:00
Manu NALEPA
af875b78c9 Peer das misc (#15384)
* `ExchangeCapabilities`: Transform `O(n**2)` into `O(2n)` and fix logging.

* Find peers with subnets and logs: Refactor

* Validator custody: Do not wait being subscribed to advertise correct `cgc`. (temp hack)
2025-06-06 09:43:13 +02:00
Manu NALEPA
61207bd3ac Merge branch 'develop' into peerDAS 2025-06-02 14:15:22 +02:00
Manu NALEPA
0b6fcd7d17 Merge branch 'develop' into peerDAS 2025-05-28 21:05:22 +02:00
Manu NALEPA
fe2766e716 Merge branch 'develop' into peerDAS 2025-05-26 09:57:57 +02:00
Manu NALEPA
9135d765e1 Merge branch 'develop' into peerDAS 2025-05-23 15:41:27 +02:00
Manu NALEPA
eca87f29d1 Merge branch 'develop' into peerDAS 2025-05-22 14:37:11 +02:00
Manu NALEPA
00821c8f55 Merge branch 'develop' into peerDAS 2025-05-21 13:50:23 +02:00
Manu NALEPA
4b9e92bcd7 Peerdas by root req (#15275)
* `DataColumnStorageSummary`: Implement `HasAtLeastOneIndex`.

* `DataColumnStorage.Get`: Exit early if the root is found but no corresponding columns.

* `custodyColumnsFromPeers`: Simplify.

* Remove duplicate `uint64MapToSortedSlice` function.

* `DataColumnStorageSummary`: Add `Stored`.

* Refactor reconstruction related code.
2025-05-16 16:19:01 +02:00
terence
b01d9005b8 Update data column receive log (#15289) 2025-05-16 07:01:40 -07:00
Manu NALEPA
8d812d5f0e Merge branch 'develop' into peerDAS 2025-05-07 17:41:25 +02:00
terence
24a3cb2a8b Add column identifiers by root request (#15212)
* Add column identifiers by root request

* `DataColumnsByRootIdentifiers`: Fix Un/Marshal.

* alternate MashalSSZ impl

* remove sort.Interface impl

* optimize unmarshal and add defensive checks

* fix offsets in error messages

* Fix build, remove sort

* Fix `SendDataColumnSidecarsByRootRequest` and tests.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
Co-authored-by: Kasey <kasey@users.noreply.github.com>
2025-05-06 14:07:16 +02:00
Manu NALEPA
66d1d3e248 Use finalized state for validator custody instead of head state. (#15243)
* `finalizedState` ==> `FinalizedState`.
We'll need it in an other package later.

* `setTargetValidatorsCustodyRequirement`: Use finalized state instead of head state.

* Fix James's comment.
2025-05-05 21:13:49 +02:00
Manu NALEPA
99933678ea Peerdas fix get blobs v2 (#15234)
* `reconstructAndBroadcastDataColumnSidecars`: Improve logging.

* `ReconstructDataColumnSidecars`: Add comments and return early if needed.

* `reconstructAndBroadcastDataColumnSidecars`: Return early if not blobs are retrieved from the EL.

* `filterPeerWhichCustodyAtLeastOneDataColumn`: Remove unneded log field.

* Fix Terence's comment.
2025-05-02 17:34:32 +02:00
Manu NALEPA
34f8e1e92b Data colummns by range: Use all possible peers then filter them. (#15242) 2025-05-02 12:15:02 +02:00
terence
a6a41a8755 Add column sidecar inclusion proof cache (#15217) 2025-04-29 13:46:32 +02:00
terence
f110b94fac Add flag to subscribe to all blob column subnets (#15197)
* Seperate subscribe data columns from attestation and sync committee subnets

* Fix test

* Rename to subscribe-data-subnets

* Update to subscribe-all-data-subnets

* `--subscribe-all-data-subnets`: Add `.` at the end of help, since it seems to be the consensus.

* `ConfigureGlobalFlags`: Fix log.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-04-29 11:59:17 +02:00
Manu NALEPA
33023aa282 Merge branch 'develop' into peerDAS 2025-04-29 11:13:27 +02:00
Manu NALEPA
eeb3cdc99e Merge branch 'develop' into peerDAS 2025-04-18 08:37:33 +02:00
Preston Van Loon
1e7147f060 Remove --compilation_mode=opt, use supranational blst headers. 2025-04-17 20:53:54 +02:00
Manu NALEPA
8936beaff3 Merge branch 'develop' into peerDAS 2025-04-17 16:49:22 +02:00
Manu NALEPA
c00283f247 UpgradeToFulu: Add spec tests. (#15189) 2025-04-17 15:17:27 +02:00
Manu NALEPA
a4269cf308 Add tests (#15188) 2025-04-17 13:12:46 +02:00
Manu NALEPA
91f3c8a4d0 c-kzg-4844 lib: Update to v2.1.1. (#15185) 2025-04-17 01:25:36 +02:00
terence
30c7ee9c7b Validate parent block exists before signature (#15184)
* Validate parent block exists before signature

* `ValidProposerSignature`: Add comment

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-04-17 00:40:48 +02:00
Manu NALEPA
456d8b9eb9 Merge branch 'develop' into peerDAS-do-not-merge 2025-04-16 22:58:38 +02:00
Manu NALEPA
4fe3e6d31a Merge branch 'develop' into peerDAS-do-not-merge 2025-04-16 20:30:19 +02:00
Manu NALEPA
01ee1c80b4 merge from develop 2025-04-16 17:27:48 +02:00
Manu NALEPA
c14fe47a81 Data columns by range requests: Simplify and move from initial sync package to sync package. (#15179)
* `data_column.go`: Factorize declarations (no functional changes).

* Verification for data columns: Do not recompute again if already done.

* `SaveDataColumns`: Delete because unused.

* `MissingDataColumns`: Use `DataColumnStorageSummarizer` instead of `DataColumnStorage`

* `TestFetchDataColumnsFromPeers`: Move trusted setup load out of the loop for optimization.

* `TestFetchDataColumnsFromPeers`: Use fulu block instead of deneb block.

* `fetchDataColumnsFromPeers`: Use functions already implemented in the `sync` package instead of duplicated them here.

* `fetchDataColumnsFromPeers` ==> `fetchMissingDataColumnsFromPeers`.

* Data columns initial sync: simplify

* Requests data columns by range: Move from initial sync to sync package.

Since it will eventually be used by the backfill package, and
the backfill packages does not depend on the initial sync package.
2025-04-16 11:18:05 +02:00
terence
b9deabbf0a Execution API: Support blobs_bundle_v2 for PeerDAS (#15167)
* Execution api: add and use blobs_bundle_v2

* Execution bundle fulu can unmarshal

* Manus feedback and fix execution request decode
2025-04-16 10:53:55 +02:00
Manu NALEPA
5d66a98e78 Uniformize data columns sidecars validation pipeline (#15154)
* Rework the data column sidecars verification pipeline.

* Nishant's comment.

* `blocks.BlockWithROBlobs` ==> `blocks.BlockWithROSidecars`

* `batchBlobSync` ==> `batchSidecarSync`.

* `handleBlobs` ==> `handleSidecars`.

* Kasey comment about verification
2025-04-15 20:32:50 +02:00
Manu NALEPA
2d46d6ffae Various small optimizations (#15153)
* Reconstruct data columns from gossip source: Call `setSeenDataColumnIndex`.

* `reconstructAndBroadcastDataColumnSidecars`: Minor optimisation.

Avoid to range over all columns.

* Reconstructed data columns sidecars from EL: Avoid broadcasting already received data columns.
2025-04-09 11:38:28 +02:00
Manu NALEPA
57107e50a7 Cells proofs (#15152)
* Implement distributed block building.
Credits: Francis

* Add fixes.
2025-04-09 09:28:59 +02:00
Manu NALEPA
47271254f6 New Data Column Sidecar Storage Design, Data Columns as a First-Class Citizen & Unit Testing (#15061)
* DB Filesystem: Move all data column related code to `data_columns.go`

Only code move.

* Implement data columns storage

* Kasey comment: Fix typo

* Kasey comment: Fix clutter

* Kasey comment: `IsDataAvailable`: Remove `nodeID`.

* Kasey comment: indice ==> index

* Kasey comment: Move `CreateTestVerifiedRoDataColumnSidecars` in `beacon-chain/verification/fake`.

* `Store` ==> `Save`.

* Kasey comment: AAAA!

* Kasey comment: Fix typo.

* Kasey comment: Add comment.

* Kasey commnet: Stop exporting errors for nothing.

* Kasey comment: Read all metadata at once.

* Kasey comment: Compute file size instead of reading it from stats.

* Kasey comment: Lock mutexes before checking if the file exists.

* Kasey comment: `limit` ==> `nonZeroOffset`.

* Kasey comment: `DataColumnStorage.Get`: Set verified into the `verification package`.

* Kasey comment: `prune` - Flatten the `==` case.

* Kasey comment: Implement and use `storageIndices`.

* `DataColumnsAlignWithBlock`: Move into its own file.

* `DataColumnSidecar`: Rename variables to stick with
https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#datacolumnsidecar

* Kasey comment: Add `file.Sync`.

* `DataColumnStorage.Get`: Remove useless cast.

* (Internal) Kasey comment: Set automatically the count of saved data columns.
2025-04-08 23:20:38 +02:00
Francis Li
f304028874 Add new vars defined in consensus-spec (#15101) 2025-03-31 20:01:47 +02:00
Manu NALEPA
8abc5e159a DataColumnSidecarsForReconstruct: Add guards (#15051) 2025-03-14 10:29:15 +01:00
Manu NALEPA
b1ac53c4dd Set defaultEngineTimeout = 2 * time.Second (#15043) 2025-03-13 13:56:42 +01:00
Francis Li
27ab68c856 feat: implement reconstruct and broadcast data columns (#15023)
* Implement reconstructAndBroadcastDataColumns

* Fix merge error

* Fix tests

* Minor changes.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-03-13 11:19:34 +01:00
Niran Babalola
ddf5a3953b Fetch data columns from multiple peers instead of just supernodes (#14977)
* Extract the block fetcher's peer selection logic for data columns so it can be used in both by range and by root requests

* Refactor data column sidecar request to send requests to multiple peers instead of supernodes

* Remove comment

* Remove unused method

* Add tests for dmissiblePeersForDataColumns

* Extract data column fetching into standalone functions

* Remove AdmissibleCustodyGroupsPeers and replace the final call with requests to multiple peers

* Apply suggestions from code review

Co-authored-by: Manu NALEPA <nalepae@gmail.com>

* Wrap errors

* Use cached peedas.Info and properly convert custody groups to custody columns

* Rename filterPeersForRangeReq

* Preserve debugging descriptions when filtering out peers

* Remove unused functions.

* Initialize nested maps

* Fix comment

* First pass at retry logic for data column requests

* Select fresh peers for each retry

* Return an error if there are requested columns remaining

* Adjust errors

* Improve slightly the godoc.

* Improve wrapped error messages.

* `AdmissiblePeersForDataColumns`: Use value or `range`.

* Remove `convertCustodyGroupsToDataColumnsByPeer` since used only once.

* Minor fixes.

* Retry until we run out of peers

* Delete from the map of peers instead of filtering

* Remove unneeded break

* WIP: TestRequestDataColumnSidecars

* `RequestDataColumnSidecars`: Move the happy path in the for loop.

* Convert the peer ID to a node ID instead of using peer.EnodeID

* Extract AdmissiblePeersForDataColumns from a method into a function and use it (instead of a mock) in TestRequestDataColumnSidecars

* Track data column requests in tests to compare vs expectations

* Run gazelle

* Clean up test config changes so other tests don't break

* Clean up comments

* Minor changes.

* Add tests for peers that don't respond with all requested columns

* Respect MaxRequestDataColumnSidecars

---------

Co-authored-by: Manu NALEPA <nalepae@gmail.com>
Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-03-12 11:46:20 +01:00
Manu NALEPA
92d2fc101d Implement validator custody (#14948)
* Node info: Rename cache and mutex.

* Add `VALIDATOR_CUSTODY_REQUIREMENT` and `BALANCE_PER_ADDITIONAL_CUSTODY_GROUP`.

* Implement `ValidatorsCustodyRequirement`.

* Sync service: Add tracked validators cache.

* `dataColumnSidecarByRootRPCHandler`: Remove custody columns in logs.

* `dataColumnSidecarByRangeRPCHandler`: Remove custody columns in logs.

* `blobsFromStoredDataColumns`: Simplify.

Do not make any more a difference between "can theoretically reconstruct" and "can actually reconstruct".

* Implement validator custody.

* Fix Nishant's comment.

* Fix Nishant's commit.
2025-03-11 11:11:23 +01:00
Francis Li
8996000d2b feature: Implement data column support for different storage layouts (#15014)
* Implement data column support for different storage layouts

* Fix errors

* Fix linting

* `slotFromFile`: First try to decode as a data column.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-03-07 20:25:31 +01:00
Francis Li
a2fcba2349 feat: implement reconstruct data column sidecars (#15005) 2025-03-05 17:23:58 +01:00
Francis Li
abe8638991 feat: update ckzg lib to support ComputeCells (#15004)
* Update ckzg version to include ComputeCells

* Minor fix

* Run `bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%prysm_deps -prune=true`

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-03-04 17:48:18 +01:00
Francis Li
0b5064b474 feat: cell proof computation related proto and generated go files (#15003)
* Add new message type to proto and generate .go files

* `proto/engine/v1`: Remove `execution_engine_eip7594.go` since this file does not exists.

Rerun ` hack/update-go-pbs.sh` and `hack/update-go-ssz.sh `.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-03-04 17:48:01 +01:00
Manu NALEPA
da9d4cf5b9 Merge branch 'develop' into peerDAS 2025-02-21 16:03:20 +01:00
Manu NALEPA
a62cca15dd Merge branch 'develop' into peerDAS 2025-02-20 15:48:07 +01:00
Manu NALEPA
ac04246a2a Avoid computing peerDAS info again and again. (#14893)
* `areDataColumnsAvailable`: `signed` ==> `signedBlock`.

* peerdas: Split `helpers.go` in multiple files respecting the specification.

* peerDAS: Implement `Info`.

* peerDAS: Use cached `Info` when possible.
2025-02-14 18:06:04 +01:00
Manu NALEPA
0923145bd7 Merge branch 'develop' into peerDAS 2025-02-14 16:51:05 +01:00
Manu NALEPA
a216cb4105 Merge branch 'develop' into peerDAS 2025-02-13 18:22:21 +01:00
Manu NALEPA
01705d1f3d Peer das sync empty requests (#14854)
* `TestBuildBwbSlices`: Add test case failing with the current implementation.

* Fix `buildBwbSlices` to comply with the new test case.

* `block_fetchers.go`: Improve logging and godoc.

* `DataColumnsRPCMinValidSlot`: Update to Fulu.
2025-02-03 15:23:04 +01:00
Manu NALEPA
14f93b4e9d Sync: Integrate batch directly in buildBwbSlices. (#14843)
Previously, `buildBwbSlices` were built, and then only to big requests were batched in `buildDataColumnSidecarsByRangeRequests`.

In some edge cases, this lead to requesting data columns to peers for blocks with no blobs.

Splitting by batch directly in `buildBwbSlices` fixes the issue.
2025-01-30 12:11:06 +01:00
Manu NALEPA
ad11036c36 reconstructAndBroadcastBlobs: Temporarily deactivate starting at Fulu. 2025-01-27 15:15:34 +01:00
Manu NALEPA
632a06076b Merge branch 'develop' into peerDAS 2025-01-22 21:30:32 +01:00
Manu NALEPA
242c2b0268 Merge branch 'develop' into peerDAS 2025-01-22 20:08:10 +01:00
Ekaterina Riazantseva
19662da905 Add PeerDAS kzg and inclusion proof verification metrics (#14814) 2025-01-21 16:20:10 +01:00
Ekaterina Riazantseva
7faee5af35 Add PeerDAS gossip verification metrics (#14796) 2025-01-21 16:16:12 +01:00
Ekaterina Riazantseva
805ee1bf31 Add 'beacon' prefix to 'data_column_sidecar_computation' metric (#14790) 2025-01-21 16:14:26 +01:00
Manu NALEPA
bea46fdfa1 Merge branch 'develop' into peerDAS 2025-01-20 13:37:29 +01:00
Manu NALEPA
f6b1fb1c88 Merge branch 'develop' into peerDAS 2025-01-16 10:23:21 +01:00
Manu NALEPA
6fb349ea76 unmarshalState: Use hasFuluKey. 2025-01-15 20:48:25 +01:00
Manu NALEPA
e5a425f5c7 Merge branch 'develop' into peerDAS 2025-01-15 17:18:34 +01:00
Manu NALEPA
f157d37e4c peerDAS: Decouple network subnets from das-core. (#14784)
https://github.com/ethereum/consensus-specs/pull/3832/
2025-01-14 10:45:05 +01:00
Manu NALEPA
5f08559bef Merge branch 'develop' into peerDAS 2025-01-08 10:18:18 +01:00
Manu NALEPA
a082d2aecd Merge branch 'fulu-boilerplate' into peerDAS 2025-01-06 13:45:33 +01:00
Manu NALEPA
bcfaff8504 Upgraded state to <fork> log: Move from debug to info.
Rationale:
This log is the only one notifying the user a new fork happened.
A new fork is always a little bit stressful for a node operator.
Having at least one log indicating the client switched fork is something useful.
2025-01-05 16:22:43 +01:00
Manu NALEPA
d8e09c346f Implement the Fulu fork boilerplate. 2025-01-05 16:22:38 +01:00
Manu NALEPA
876519731b Prepare for future fork boilerplate. 2025-01-05 16:14:02 +01:00
Manu NALEPA
de05b83aca Merge branch 'develop' into peerDAS 2024-12-30 15:11:02 +01:00
Manu NALEPA
56c73e7193 Merge branch 'develop' into peerDAS 2024-12-27 22:11:36 +01:00
Manu NALEPA
859ac008a8 Activate peerDAS at electra. (#14734) 2024-12-27 09:48:57 +01:00
Manu NALEPA
f882bd27c8 Merge branch 'develop' into peerDAS 2024-12-18 16:15:32 +01:00
Manu NALEPA
361e5759c1 Merge branch 'develop' into peerDAS 2024-12-17 22:19:20 +01:00
Manu NALEPA
34ef0da896 Merge branch 'develop' into peerDAS 2024-12-10 23:11:45 +01:00
Manu NALEPA
726e8b962f Revert "Revert "Add error count prom metric (#14670)""
This reverts commit 5f17317c1c.
2024-12-10 21:49:40 +01:00
Manu NALEPA
453ea01deb disconnectFromPeer: Remove unused function. 2024-11-28 17:37:30 +01:00
Manu NALEPA
6537f8011e Merge branch 'peerDAS' into peerDAS-do-not-merge 2024-11-28 17:27:44 +01:00
Manu NALEPA
5f17317c1c Revert "Add error count prom metric (#14670)"
This reverts commit b28b1ed6ce.
2024-11-28 16:37:19 +01:00
Manu NALEPA
3432ffa4a3 PeerDAS: Batch columns verifications (#14559)
* `ColumnAlignsWithBlock`: Split lines.

* Data columns verifications: Batch

* Remove completely `DataColumnBatchVerifier`.

Only `DataColumnsVerifier` (with `s`) on columns remains.
It is the responsability of the function which receive the data column
(either by gossip, by range request or by root request) to verify the
data column wrt. corresponding checks.

* Fix Nishant's comment.
2024-11-27 10:37:03 +01:00
Manu NALEPA
9dac67635b streamDataColumnBatch: Sort columns by index. (#14542)
https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#datacolumnsidecarsbyrange-v1

The following data column sidecars, where they exist, MUST be sent in (slot, column_index) order.
2024-11-27 10:37:03 +01:00
Manu NALEPA
9be69fbd07 PeerDAS: Fix major bug in dataColumnSidecarsByRangeRPCHandler and allow syncing from full nodes. (#14532)
* `validateDataColumnsByRange`: `current` ==> `currentSlot`.

* `validateRequest`: Extract `remotePeer` variable.

* `dataColumnSidecarsByRangeRPCHandler`: Small non functional refactor.

* `streamDataColumnBatch`: Fix major bug.

Before this commit, the node was unable to respond with a data column index higher than the count of stored data columns.
For example, if there is 8 data columns stored for a given block, the node was
able to respond for data columns indices 1, 3, and 5, but not for 10, 16 or 127.

The issue was visible only for full nodes, since super nodes always store 128 data columns.

* Initial sync: Fetch data columns from all peers.
(Not only from supernodes.)

* Nishant's comment: Fix `lastSlot` and `endSlot` duplication.

* Address Nishant's comment.
2024-11-27 10:37:03 +01:00
Manu NALEPA
e21261e893 Data columns initial sync: Rework. (#14522) 2024-11-27 10:37:03 +01:00
Nishant Das
da53a8fc48 Fix Commitments Check (#14493)
* Fix Commitments Check

* `highestFinalizedEpoch`: Refactor (no functional change).

* `retrieveMissingDataColumnsFromPeers`: Fix logs.

* `VerifyDataColumnSidecarKZGProofs`: Optimise with capacity.

* Save data columns when initial syncing.

* `dataColumnSidecarsByRangeRPCHandler`: Add logs when a request enters.

* Improve logging.

* Improve logging.

* `peersWithDataColumns: Do not filter any more on peer head slot.

* Fix Nishant's comment.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-11-27 10:37:03 +01:00
Manu NALEPA
a14634e656 PeerDAS: Improve initial sync logs (#14496)
* `retrieveMissingDataColumnsFromPeers`: Search only for needed peers.

* Improve logging.
2024-11-27 10:37:03 +01:00
Manu NALEPA
43761a8066 PeerDAS: Fix initial sync with super nodes (#14495)
* Improve logging.

* `retrieveMissingDataColumnsFromPeers`: Limit to `512` items per request.

* `retrieveMissingDataColumnsFromPeers`: Allow `nil` peers.

Before this commit:
If, when this funcion is called, we are not yet connected to enough peers, then `peers` will be possibly not be satisfaying,
and, if new peers are connected, we will never see them.

After this commit:
If `peers` is `nil`, then we regularly check for all connected peers.
If `peers` is not `nil`, then we use them.
2024-11-27 10:37:03 +01:00
Manu NALEPA
01dbc337c0 PeerDAS: Fix initial sync (#14494)
* `BestFinalized`: Refactor (no functional change).

* `BestNonFinalized`: Refactor (no functional change).

* `beaconBlocksByRangeRPCHandler`: Remove useless log.

The same is already printed at the start of the function.

* `calculateHeadAndTargetEpochs`: Avoid `else`.

* `ConvertPeerIDToNodeID`: Improve error.

* Stop printing noisy "peer should be banned" logs.

* Initial sync: Request data columns from peers which:
- custody a superset of columns we need, and
- have a head slot >= our target slot.

* `requestDataColumnsFromPeers`: Shuffle peers before requesting.

Before this commit, we always requests peers in the same order,
until one responds something.
Without shuffling, we always requests data columns from the same
peer.

* `requestDataColumnsFromPeers`: If error from a peer, just log the error and skip the peer.

* Improve logging.

* Fix tests.
2024-11-27 10:37:03 +01:00
Nishant Das
92f9b55fcb Put Subscriber in Goroutine (#14486) 2024-11-27 10:36:18 +01:00
Manu NALEPA
f65f12f58b Stop disconnecting peers for bad response / excessive colocation. (#14483) 2024-11-27 10:36:17 +01:00
Manu NALEPA
f2b61a3dcf PeerDAS: Misc improvements (#14482)
* `retrieveMissingDataColumnsFromPeers`: Improve logging.

* `dataColumnSidecarByRootRPCHandler`: Stop decreasing peer's score if asking for a column we do not custody.

* `dataColumnSidecarByRootRPCHandler`: If a data column is unavailable, stop waiting for it.

This behaviour was useful for peer sampling.
Now, just return the data column if we store it.
If we don't, skip.

* Dirty code comment.

* `retrieveMissingDataColumnsFromPeers`: Improve logs.

* `SendDataColumnsByRangeRequest`: Improve logs.

* `dataColumnSidecarsByRangeRPCHandler`: Improve logs.
2024-11-27 10:34:38 +01:00
Manu NALEPA
77a6d29a2e PeerDAS: Re-enable full node joining the main fork (#14475)
* `columnErrBuilder`: Uses `Wrap` instead of `Join`.

Reason: `Join` makes a carriage return. The log is quite unreadable.

* `validateDataColumn`: Improve log.

* `areDataColumnsAvailable`: Improve log.

* `SendDataColumnSidecarByRoot` ==> `SendDataColumnSidecarsByRootRequest`.

* `handleDA`: Refactor error message.

* `sendRecentBeaconBlocksRequest` ==> `sendBeaconBlocksRequest`.

Reason: There is no notion at all of "recent" in the function.

If the caller decides to call this function only with "recent" blocks, that's fine.
However, the function itself will know nothing about the "recentness" of these blocks.

* `sendBatchRootRequest`: Improve comments.

* `sendBeaconBlocksRequest`: Avoid `else` usage and use map of bool instead of `struct{}`.

* `wrapAndReportValidation`: Remove `agent` from log.

Reason: This prevent the log to hold on one line, and it is not really useful to debug.

* `validateAggregateAndProof`: Add comments.

* `GetValidCustodyPeers`: Fix typo.

* `GetValidCustodyPeers` ==> `DataColumnsAdmissibleCustodyPeers`.

* `CustodyHandler` ==> `DataColumnsHandler`.

* `CustodyCountFromRemotePeer` ==> `DataColumnsCustodyCountFromRemotePeer`.

* Implement `DataColumnsAdmissibleSubnetSamplingPeers`.

* Use `SubnetSamplingSize` instead of `CustodySubnetCount` where needed.

* Revert "`wrapAndReportValidation`: Remove `agent` from log."

This reverts commit 55db351102.
2024-11-27 10:34:38 +01:00
Manu NALEPA
31d16da3a0 PeerDAS: Multiple improvements (#14467)
* `scheduleReconstructedDataColumnsBroadcast`: Really minor refactor.

* `receivedDataColumnsFromRootLock` -> `dataColumnsFromRootLock`

* `reconstructDataColumns`: Stop looking into the DB to know if we have some columns.

Before this commit:
Each time we receive a column, we look into the filesystem for all columns we store.
==> For 128 columns, it looks for 1 + 2 + 3 + ... + 128 = 128(128+1)/2 = 8256 files look.

Also, as soon as a column is saved into the file system, then if, right after, we
look at the filesystem again, we assume the column will be available (strict consistency).
It happens not to be always true.

==> Sometimes, we can reconstruct and reseed columns more than once, because of this lack of filesystem strict consistency.

After this commit:
We use a (strictly consistent) cache to determine if we received a column or not.
==> No more consistency issue, and less stress for the filesystem.

* `dataColumnSidecarByRootRPCHandler`: Improve logging.

Before this commit, logged values assumed that all requested columns correspond to
the same block root, which is not always the case.

After this commit, we know which columns are requested for which root.

* Add a log when broadcasting a data column.

This is useful to debug "lost data columns" in devnet.

* Address Nishant's comment
2024-11-27 10:34:38 +01:00
Justin Traglia
19221b77bd Update c-kzg-4844 to v2.0.1 (#14421) 2024-11-27 10:34:38 +01:00
Manu NALEPA
83df293647 Peerdas: Several updates (#14459)
* `validateDataColumn`: Refactor logging.

* `dataColumnSidecarByRootRPCHandler`: Improve logging.

* `isDataAvailable`: Improve logging.

* Add hidden debug flag: `--data-columns-reject-slot-multiple`.

* Add more logs about peer disconnection.

* `validPeersExist` --> `enoughPeersAreConnected`

* `beaconBlocksByRangeRPCHandler`: Add remote Peer ID in logs.

* Stop calling twice `writeErrorResponseToStream` in case of rate limit.
2024-11-27 10:34:37 +01:00
Manu NALEPA
c20c09ce36 Peerdas: Full subnet sampling and sendBatchRootRequest fix. (#14452)
* `sendBatchRootRequest`: Refactor and add comments.

* `sendBatchRootRequest`: Do send requests to peers that custodies a superset of our columns.

Before this commit, we sent "data columns by root requests" for data columns peers do not custody.

* Data columns: Use subnet sampling only.

(Instead of peer sampling.)

aaa

* `areDataColumnsAvailable`: Improve logs.

* `GetBeaconBlock`: Improve logs.

Rationale: A `begin` log should always be followed by a `success` log or a `failure` log.
2024-11-27 10:30:29 +01:00
Manu NALEPA
2191faaa3f Fix CPU usage in small devnets (#14446)
* `CustodyCountFromRemotePeer`: Set happy path in the outer scope.

* `FindPeersWithSubnet`: Improve logging.

* `listenForNewNodes`: Avoid infinite loop in a small subnet.

* Address Nishant's comment.

* FIx Nishant's comment.
2024-11-27 10:30:29 +01:00
Nishant Das
2de1e6f3e4 Revert "Change Custody Count to Uint8 (#14386)" (#14415)
This reverts commit bd7ec3fa97.
2024-11-27 10:30:29 +01:00
Manu NALEPA
db44df3964 Fix Initial Sync with 128 data columns subnets (#14403)
* `pingPeers`: Add log with new ENR when modified.

* `p2p Start`: Use idiomatic go error syntax.

* P2P `start`: Fix error message.

* Use not bootnodes at all if the `--chain-config-file` flag is used and no `--bootstrap-node` flag is used.

Before this commit, if the  `--chain-config-file` flag is used and no `--bootstrap-node` flag is used, then bootnodes are (incorrectly) defaulted on `mainnet` ones.

* `validPeersExist`: Centralize logs.

* `AddConnectionHandler`: Improve logging.

"Peer connected" does not really reflect the fact that a new peer is actually connected. --> "New peer connection" is more clear.

Also, instead of writing `0`, `1`or `2` for direction, now it's writted "Unknown", "Inbound", "Outbound".

* Logging: Add 2 decimals for timestamt in text and JSON logs.

* Improve "no valid peers" logging.

* Improve "Some columns have no peers responsible for custody" logging.

* `pubsubSubscriptionRequestLimit`: Increase to be consistent with data columns.

* `sendPingRequest`: Improve logging.

* `FindPeersWithSubnet`: Regularly recheck in our current set of peers if we have enough peers for this topic.

Before this commit, new peers HAD to be found, even if current peers are eventually acceptable.
For very small network, it used to lead to infinite search.

* `subscribeDynamicWithSyncSubnets`: Use exactly the same subscription function initially and every slot.

* Make deepsource happier.

* Nishant's commend: Change peer disconnected log.

* NIshant's comment: Change `Too many incoming subscription` log from error to debug.

* `FindPeersWithSubnet`: Address Nishant's comment.

* `batchSize`: Address Nishant's comment.

* `pingPeers` ==> `pingPeersAndLogEnr`.

* Update beacon-chain/sync/subscriber.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2024-11-27 10:30:29 +01:00
Nishant Das
f92eb44c89 Add Data Column Computation Metrics (#14400)
* Add Data Column Metrics

* Shift it All To Peerdas Package
2024-11-27 10:24:03 +01:00
Nishant Das
a26980b64d Set Precompute at 8 (#14399) 2024-11-27 10:24:03 +01:00
Manu NALEPA
f58cf7e626 PeerDAS: Improve logging and reduce the number of needed goroutines for reconstruction (#14397)
* `broadcastAndReceiveDataColumns`: Use real `sidecar.ColumnIndex` instead of position in the slice.

And improve logging as well.

* `isDataColumnsAvailable`: Improve logging.

* `validateDataColumn`: Print `Accepted data column sidecar gossip` really at the end.

* Subscriber: Improve logging.

* `sendAndSaveDataColumnSidecars`: Use common used function for logging.

* `dataColumnSidecarByRootRPCHandler`: Logging - Pring `all` instead of all the columns for a super node.

* Verification: Improve logging.

* `DataColumnsWithholdCount`: Set as `uint64` instead `int`.

* `DataColumnFields`: Improve logging.

* Logging: Remove now useless private `columnFields`function.

* Avoid useless goroutines blocking for reconstruction.

* Update beacon-chain/sync/subscriber.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>

* Address Nishant's comment.

* Improve logging.

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2024-11-27 10:24:03 +01:00
Nishant Das
68da7dabe2 Fix Bugs in PeerDAS Testing (#14396)
* Fix Various Bugs in PeerDAS

* Remove Log

* Remove useless copy var.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-11-27 10:24:03 +01:00
Nishant Das
d1e43a2c02 Change Custody Count to Uint8 (#14386)
* Add Changes for Uint8 Csc

* Fix Build

* Fix Build for Sync

* Fix Discovery Test
2024-11-27 10:24:03 +01:00
Nishant Das
3652bec2f8 Use Data Column Validation Across Prysm (#14377)
* Use Data Column Validation Everywhere

* Fix Build

* Fix Lint

* Fix Clock Synchronizer

* Fix Panic
2024-11-27 10:24:03 +01:00
Nishant Das
81b7a1725f Update Config To Latest Value (#14352)
* Update values

* Update Spec To v1.5.0-alpha.5

* Fix Discovery Tests

* Hardcode Subnet Count For Tests

* Fix All Initial Sync Tests

* Gazelle

* Less Chaotic Service Initialization

* Gazelle
2024-11-27 10:24:03 +01:00
Nishant Das
0c917079c4 Fix CI in PeerDAS (#14347)
* Update go.yml

* Disable mnd

* Update .golangci.yml

* Update go.yml

* Update go.yml

* Update .golangci.yml

* Update go.yml

* Fix Lint Issues

* Remove comment

* Update .golangci.yml
2024-11-27 10:24:03 +01:00
Manu NALEPA
a732fe7021 Implement /eth/v1/beacon/blob_sidecars/{block_id} for peerDAS. (#14312)
* `parseIndices`: `O(n**2)` ==> `O(n)`.

* PeerDAS: Implement `/eth/v1/beacon/blob_sidecars/{block_id}`.

* Update beacon-chain/core/peerdas/helpers.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Rename some functions.

* `Blobs`: Fix empty slice.

* `recoverCellsAndProofs` --> Move function in `beacon-chain/core/peerdas`.

* peerDAS helpers: Add missing tests.

* Implement `CustodyColumnCount`.

* `RecoverCellsAndProofs`: Remove useless argument `columnsCount`.

* Tests: Add cleanups.

* `blobsFromStoredDataColumns`: Reconstruct if needed.

* Make deepsource happy.

* Beacon API: Use provided indices.

* Make deepsource happier.

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-11-27 10:24:03 +01:00
Nishant Das
d75a7aae6a Add Data Column Verification (#14287)
* Persist All Changes

* Fix All Tests

* Fix Build

* Fix Build

* Fix Build

* Fix Test Again

* Add missing verification

* Add Test Cases for Data Column Validation

* Fix comments for methods

* Fix comments for methods

* Fix Test

* Manu's Review
2024-11-27 10:24:03 +01:00
Manu NALEPA
e788a46e82 PeerDAS: Add MetadataV3 with custody_subnet_count (#14274)
* `sendPingRequest`: Add some comments.

* `sendPingRequest`: Replace `stream.Conn().RemotePeer()` by `peerID`.

* `pingHandler`: Add comments.

* `sendMetaDataRequest`: Add comments and implement an unique test.

* Gather `SchemaVersion`s in the same `const` definition.

* Define `SchemaVersionV3`.

* `MetaDataV1`: Fix comment.

* Proto: Define `MetaDataV2`.

* `MetaDataV2`: Generate SSZ.

* `newColumnSubnetIDs`: Use smaller lines.

* `metaDataHandler` and `sendMetaDataRequest`: Manage `MetaDataV2`.

* `RefreshPersistentSubnets`: Refactor tests (no functional change).

* `RefreshPersistentSubnets`: Refactor and add comments (no functional change).

* `RefreshPersistentSubnets`: Compare cache with both ENR & metadata.

* `RefreshPersistentSubnets`: Manage peerDAS.

* `registerRPCHandlersPeerDAS`: Register `RPCMetaDataTopicV3`.

* `CustodyCountFromRemotePeer`: Retrieve the count from metadata.

Then default to ENR, then default to the default value.

* Update beacon-chain/sync/rpc_metadata.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>

* Fix duplicate case.

* Remove version testing.

* `debug.proto`: Stop breaking ordering.

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2024-11-27 10:24:03 +01:00
Manu NALEPA
199543125a Fix data columns sampling (#14263)
* Fix the obvious...

* Data columns sampling: Modify logging.

* `waitForChainStart`: Set it threadsafe - Do only wait once.

* Sampling: Wait for chain start before running the sampling.

Reason: `newDataColumnSampler1D` needs `s.ctxMap`.
`s.ctxMap` is only set when chain is started.

Previously `waitForChainStart` was only called in `s.registerHandlers`, it self called in a go-routine.

==> We had a race condition here: Sometimes `newDataColumnSampler1D` were called once `s.ctxMap` were set, sometimes not.

* Adresse Nishant's comments.

* Sampling: Improve logging.

* `waitForChainStart`: Remove `chainIsStarted` check.
2024-11-27 10:19:07 +01:00
Manu NALEPA
ca63efa770 PeerDAS: Fix initial sync (#14208)
* `SendDataColumnsByRangeRequest`: Add some new fields in logs.

* `BlobStorageSummary`: Implement `HasDataColumnIndex` and `AllDataColumnsAvailable`.

* Implement `fetchDataColumnsFromPeers`.

* `fetchBlobsFromPeer`: Return only one error.
2024-11-27 10:19:07 +01:00
Manu NALEPA
345e6edd9c Make deepsource happy (#14237)
* DeepSource: Pass heavy objects by pointers.

* `removeBlockFromQueue`: Remove redundant error checking.

* `fetchBlobsFromPeer`: Use same variable for `append`.

* Remove unused arguments.

* Combine types.

* `Persist`: Add documentation.

* Remove unused receiver

* Remove duplicated import.

* Stop using both pointer and value receiver at the same time.

* `verifyAndPopulateColumns`: Remove unused parameter

* Stop using mpty slice literal used to declare a variable.
2024-11-27 10:19:07 +01:00
Manu NALEPA
6403064126 PeerDAS: Run reconstruction in parallel. (#14236)
* PeerDAS: Run reconstruction in parallel.

* `isDataAvailableDataColumns` --> `isDataColumnsAvailable`

* `isDataColumnsAvailable`: Return `nil` as soon as half of the columns are received.

* Make deepsource happy.
2024-11-27 10:19:07 +01:00
Justin Traglia
0517d76631 Update ckzg4844 to latest version of das branch (#14223)
* Update ckzg4844 to latest version

* Run go mod tidy

* Remove unnecessary tests & run goimports

* Remove fieldparams from blockchain/kzg

* Add back blank line

* Avoid large copies

* Run gazelle

* Use trusted setup from the specs & fix issue with struct

* Run goimports

* Fix mistake in makeCellsAndProofs

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-11-27 10:19:07 +01:00
Nishant Das
000d480f77 Add Current Changes (#14231) 2024-11-27 10:19:07 +01:00
Manu NALEPA
b40a8ed37e Implement and use filterPeerForDataColumnsSubnet. (#14230) 2024-11-27 10:19:07 +01:00
Francis Li
d21c2bd63e [PeerDAS] Parallelize data column sampling (#14105)
* PeerDAS: parallelizing sample queries

* PeerDAS: select sample from non custodied columns

* Finish rebase

* Add more test cases
2024-11-27 10:19:07 +01:00
kevaundray
7a256e93f7 chore!: Use RecoverCellsAndKZGProofs instead of RecoverAllCells -> CellsToBlob -> ComputeCellsAndKZGProofs (#14183)
* use recoverCellsAndKZGProofs

* make recoverAllCells and CellsToBlob private

* chore: all methods now return CellsAndProof struct

* chore: update code
2024-11-27 10:19:07 +01:00
Nishant Das
07fe76c2da Trigger PeerDAS At Deneb For E2E (#14193)
* Trigger At Deneb

* Fix Rate Limits
2024-11-27 10:19:07 +01:00
Manu NALEPA
54affa897f PeerDAS: Add KZG verification when sampling (#14187)
* `validateDataColumn`: Add comments and remove debug computation.

* `sampleDataColumnsFromPeer`: Add KZG verification

* `VerifyKZGInclusionProofColumn`: Add unit test.

* Make deepsource happy.

* Address Nishant's comment.

* Address Nishant's comment.
2024-11-27 10:16:50 +01:00
kevaundray
ac4c5fae3c chore!: Make Cell be a flat sequence of bytes (#14159)
* chore: move all ckzg related functionality into kzg package

* refactor code to match

* run: bazel run //:gazelle -- fix

* chore: add some docs and stop copying large objects when converting between types

* fixes

* manually add kzg.go dep to Build.Hazel

* move kzg methods to kzg.go

* chore: add RecoverCellsAndProofs method

* bazel run //:gazelle -- fix

* make Cells be flattened sequence of bytes

* chore: add test for flattening roundtrip

* chore: remove code that was doing the flattening outside of the kzg package

* fix merge

* fix

* remove now un-needed conversion

* use pointers for Cell parameters

* linter

* rename cell conversion methods (this only applies to old version of c-kzg)
2024-11-27 10:16:50 +01:00
Manu NALEPA
2845d87077 Move log from error to debug. (#14194)
Reason: If a peer does not exposes its `csc` field into it's ENR,
then there is nothing we can do.
2024-11-27 10:16:50 +01:00
Nishant Das
dc2c90b8ed Activate PeerDAS with the EIP7594 Fork Epoch (#14184)
* Save All the Current Changes

* Add check for data sampling

* Fix Test

* Gazelle

* Manu's Review

* Fix Test
2024-11-27 10:16:50 +01:00
kevaundray
b469157e1f chore!: Refactor RecoverBlob to RecoverCellsAndProofs (#14160)
* change recoverBlobs to recoverCellsAndProofs

* modify code to take in the cells and proofs for a particular blob instead of the blob itself

* add CellsAndProofs structure

* modify recoverCellsAndProofs to return `cellsAndProofs` structure

* modify `DataColumnSidecarsForReconstruct` to accept the `cellsAndKZGProofs` structure

* bazel run //:gazelle -- fix

* use kzg abstraction for kzg method

* move CellsAndProofs to kzg.go
2024-11-27 10:16:50 +01:00
kevaundray
2697794e58 chore: Encapsulate all kzg functionality for PeerDAS into the kzg package (#14136)
* chore: move all ckzg related functionality into kzg package

* refactor code to match

* run: bazel run //:gazelle -- fix

* chore: add some docs and stop copying large objects when converting between types

* fixes

* manually add kzg.go dep to Build.Hazel

* move kzg methods to kzg.go

* chore: add RecoverCellsAndProofs method

* bazel run //:gazelle -- fix

* use BytesPerBlob constant

* chore: fix some deepsource issues

* one declaration for commans and blobs
2024-11-27 10:16:50 +01:00
Manu NALEPA
48cf24edb4 PeerDAS: Implement IncrementalDAS (#14109)
* `ConvertPeerIDToNodeID`: Add tests.

* Remove `extractNodeID` and uses `ConvertPeerIDToNodeID` instead.

* Implement IncrementalDAS.

* `DataColumnSamplingLoop` ==> `DataColumnSamplingRoutine`.

* HypergeomCDF: Add test.

* `GetValidCustodyPeers`: Optimize and add tests.

* Remove blank identifiers.

* Implement `CustodyCountFromRecord`.

* Implement `TestP2P.CustodyCountFromRemotePeer`.

* `NewTestP2P`: Add `swarmt.Option` parameters.

* `incrementalDAS`: Rework and add tests.

* Remove useless warning.
2024-11-27 10:16:50 +01:00
Francis Li
78f90db90b PeerDAS: add data column batch config (#14122) 2024-11-27 10:15:27 +01:00
Francis Li
d0a3b9bc1d [PeerDAS] rework ENR custody_subnet_count and add tests (#14077)
* [PeerDAS] rework ENR custody_subnet_count related code

* update according to proposed spec change

* Run gazelle
2024-11-27 10:15:27 +01:00
Manu NALEPA
bfdb6dab86 Fix columns sampling (#14118) 2024-11-27 10:15:27 +01:00
Francis Li
7dd2fd52af [PeerDAS] implement DataColumnSidecarsByRootReq and fix related bugs (#14103)
* [PeerDAS] add data column related protos and fix data column by root bug

* Add more tests
2024-11-27 10:15:27 +01:00
Francis Li
b6bad9331b [PeerDAS] fixes and tests for gossiping out data columns (#14102)
* [PeerDAS] Minor fixes and tests for gossiping out data columns

* Fix metrics
2024-11-27 10:15:27 +01:00
Francis Li
6e2122085d [PeerDAS] rework ENR custody_subnet_count and add tests (#14077)
* [PeerDAS] rework ENR custody_subnet_count related code

* update according to proposed spec change

* Run gazelle
2024-11-27 10:15:27 +01:00
Manu NALEPA
7a847292aa PeerDAS: Stop generating new P2P private key at start. (#14099)
* `privKey`: Improve logs.

* peerDAS: Move functions in file. Add documentation.

* PeerDAS: Remove unused `ComputeExtendedMatrix` and `RecoverMatrix` functions.

* PeerDAS: Stop generating new P2P private key at start.

* Fix sammy' comment.
2024-11-27 10:15:27 +01:00
Manu NALEPA
81f4db0afa PeerDAS: Gossip the reconstructed columns (#14079)
* PeerDAS: Broadcast not seen via gossip but reconstructed data columns.

* Address Nishant's comment.
2024-11-27 10:15:27 +01:00
Manu NALEPA
a7dc2e6c8b PeerDAS: Only saved custodied columns even after reconstruction. (#14083) 2024-11-27 10:15:27 +01:00
Manu NALEPA
0a010b5088 recoverBlobs: Cover the 0 < blobsCount < fieldparams.MaxBlobsPerBlock case. (#14066)
* `recoverBlobs`: Cover the `0 < blobsCount < fieldparams.MaxBlobsPerBlock` case.

* Fix Nishant's comment.
2024-11-27 10:15:27 +01:00
Manu NALEPA
1e335e2cf2 PeerDAS: Withhold data on purpose. (#14076)
* Introduce hidden flag `data-columns-withhold-count`.

* Address Nishant's comment.
2024-11-27 10:15:27 +01:00
Manu NALEPA
42f4c0f14e PeerDAS: Implement / use data column feed from database. (#14062)
* Remove some `_` identifiers.

* Blob storage: Implement a notifier system for data columns.

* `dataColumnSidecarByRootRPCHandler`: Remove ugly `time.Sleep(100 * time.Millisecond)`.

* Address Nishant's comment.
2024-11-27 10:15:27 +01:00
Manu NALEPA
d3c12abe25 PeerDAS: Implement reconstruction. (#14036)
* Wrap errors, add logs.

* `missingColumnRequest`: Fix blobs <-> data columns mix.

* `ColumnIndices`: Return `map[uint64]bool` instead of `[fieldparams.NumberOfColumns]bool`.

* `DataColumnSidecars`: `interfaces.SignedBeaconBlock` ==> `interfaces.ReadOnlySignedBeaconBlock`.

We don't need any of the non read-only methods.

* Fix comments.

* `handleUnblidedBlock` ==> `handleUnblindedBlock`.

* `SaveDataColumn`: Move log from debug to trace.

If we attempt to save an already existing data column sidecar,
a debug log was printed.

This case could be quite common now with the data column reconstruction enabled.

* `sampling_data_columns.go` --> `data_columns_sampling.go`.

* Reconstruct data columns.
2024-11-27 10:15:27 +01:00
Nishant Das
b0ba05b4f4 Fix Custody Columns (#14021) 2024-11-27 10:15:27 +01:00
Nishant Das
e206506489 Disable Evaluators For E2E (#14019)
* Hack E2E

* Fix it For Real

* Gofmt

* Remove
2024-11-27 10:15:27 +01:00
Nishant Das
013cb28663 Request Data Columns When Fetching Pending Blocks (#14007)
* Support Data Columns For By Root Requests

* Revert Config Changes

* Fix Panic

* Fix Process Block

* Fix Flags

* Lint

* Support Checkpoint Sync

* Manu's Review

* Add Support For Columns in Remaining Methods

* Unmarshal Uncorrectly
2024-11-27 10:15:27 +01:00
Manu NALEPA
496914cb39 Fix CustodyColumns to comply with alpha-2 spectests. (#14008)
* Adding error wrapping

* Fix `CustodyColumnSubnets` tests.
2024-11-27 10:15:27 +01:00
Nishant Das
c032e78888 Set Custody Count Correctly (#14004)
* Set Custody Count Correctly

* Fix Discovery Count
2024-11-27 10:15:26 +01:00
Manu NALEPA
5e4deff6fd Sample from peers some data columns. (#13980)
* PeerDAS: Implement sampling.

* `TestNewRateLimiter`: Fix with the new number of expected registered topics.
2024-11-27 10:15:26 +01:00
Nishant Das
6daa91c465 Implement Data Columns By Range Request And Response Methods (#13972)
* Add Data Structure for New Request Type

* Add Data Column By Range Handler

* Add Data Column Request Methods

* Add new validation for columns by range requests

* Fix Build

* Allow Prysm Node To Fetch Data Columns

* Allow Prysm Node To Fetch Data Columns And Sync

* Bug Fixes For Interop

* GoFmt

* Use different var

* Manu's Review
2024-11-27 10:15:26 +01:00
Nishant Das
32ce6423eb Enable E2E For PeerDAS (#13945)
* Enable E2E And Add Fixes

* Register Same Topic For Data Columns

* Initialize Capacity Of Slice

* Fix Initialization of Data Column Receiver

* Remove Mix In From Merkle Proof

* E2E: Subscribe to all subnets.

* Remove Index Check

* Remaining Bug Fixes to Get It Working

* Change Evaluator to Allow Test to Finish

* Fix Build

* Add Data Column Verification

* Fix LoopVar Bug

* Do Not Allocate Memory

* Update beacon-chain/blockchain/process_block.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Update beacon-chain/core/peerdas/helpers.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Update beacon-chain/core/peerdas/helpers.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Gofmt

* Fix It Again

* Fix Test Setup

* Fix Build

* Fix Trusted Setup panic

* Fix Trusted Setup panic

* Use New Test

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-11-27 10:15:26 +01:00
Justin Traglia
b0ea450df5 [PeerDAS] Upgrade c-kzg-4844 package (#13967)
* Upgrade c-kzg-4844 package

* Upgrade bazel deps
2024-11-27 10:15:26 +01:00
Manu NALEPA
8bd10df423 SendDataColumnSidecarByRoot: Return RODataColumn instead of ROBlob. (#13957)
* `SendDataColumnSidecarByRoot`: Return `RODataColumn` instead of `ROBlob`.

* Make deepsource happier.
2024-11-27 10:15:26 +01:00
Manu NALEPA
dcbb543be2 Spectests (#13940)
* Update `consensus_spec_version` to `v1.5.0-alpha.1`.

* `CustodyColumns`: Fix and implement spec tests.

* Make deepsource happy.

* `^uint64(0)` => `math.MaxUint64`.

* Fix `TestLoadConfigFile` test.
2024-11-27 10:15:26 +01:00
Nishant Das
be0580e1a9 Add DA Check For Data Columns (#13938)
* Add new DA check

* Exit early in the event no commitments exist.

* Gazelle

* Fix Mock Broadcaster

* Fix Test Setup

* Update beacon-chain/blockchain/process_block.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Manu's Review

* Fix Build

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-11-27 10:15:26 +01:00
Manu NALEPA
1355178115 Implement peer DAS proposer RPC (#13922)
* Remove capital letter from error messages.

* `[4]byte` => `[fieldparams.VersionLength]byte`.

* Prometheus: Remove extra `committee`.

They are probably due to a bad copy/paste.

Note: The name of the probe itself is remaining,
to ensure backward compatibility.

* Implement Proposer RPC for data columns.

* Fix TestProposer_ProposeBlock_OK test.

* Remove default peerDAS activation.

* `validateDataColumn`: Workaround to return a `VerifiedRODataColumn`
2024-11-27 10:15:26 +01:00
Nishant Das
b78c3485b9 Update .bazelrc (#13931) 2024-11-27 10:15:26 +01:00
Manu NALEPA
f503efc6ed Implement custody_subnet_count ENR field. (#13915)
https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
2024-11-27 10:15:26 +01:00
Manu NALEPA
1bfbd3980e Peer das core (#13877)
* Bump `c-kzg-4844` lib to the `das` branch.

* Implement `MerkleProofKZGCommitments`.

* Implement `das-core.md`.

* Use `peerdas.CustodyColumnSubnets` and `peerdas.CustodyColumns`.

* `CustodyColumnSubnets`: Include `i` in the for loop.

* Remove `computeSubscribedColumnSubnet`.

* Remove `peerdas.CustodyColumns` out of the for loop.
2024-11-27 10:15:26 +01:00
Nishant Das
3e722ea1bc Add Request And Response RPC Methods For Data Columns (#13909)
* Add RPC Handler

* Add Column Requests

* Update beacon-chain/db/filesystem/blob.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Update beacon-chain/p2p/rpc_topic_mappings.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Manu's Review

* Manu's Review

* Interface Fixes

* mock manager

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-11-27 10:15:26 +01:00
Nishant Das
d844026433 Add Data Column Gossip Handlers (#13894)
* Add Data Column Subscriber

* Add Data Column Vaidator

* Wire all Handlers In

* Fix Build

* Fix Test

* Fix IP in Test

* Fix IP in Test
2024-11-27 10:15:26 +01:00
Nishant Das
9ffc19d5ef Add Support For Discovery Of Column Subnets (#13883)
* Add Support For Discovery Of Column Subnets

* Lint for SubnetsPerNode

* Manu's Review

* Change to a better name
2024-11-27 10:15:26 +01:00
Nishant Das
3e23f6e879 add it (#13865) 2024-11-27 10:11:55 +01:00
Manu NALEPA
c688c84393 Add in column sidecars protos (#13862) 2024-11-27 10:11:55 +01:00
109 changed files with 8110 additions and 1718 deletions

View File

@@ -6,20 +6,20 @@ import (
)
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
func Verify(sidecars ...blocks.ROBlob) error {
if len(sidecars) == 0 {
func Verify(blobSidecars ...blocks.ROBlob) error {
if len(blobSidecars) == 0 {
return nil
}
if len(sidecars) == 1 {
if len(blobSidecars) == 1 {
return kzgContext.VerifyBlobKZGProof(
bytesToBlob(sidecars[0].Blob),
bytesToCommitment(sidecars[0].KzgCommitment),
bytesToKZGProof(sidecars[0].KzgProof))
bytesToBlob(blobSidecars[0].Blob),
bytesToCommitment(blobSidecars[0].KzgCommitment),
bytesToKZGProof(blobSidecars[0].KzgProof))
}
blobs := make([]GoKZG.Blob, len(sidecars))
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
proofs := make([]GoKZG.KZGProof, len(sidecars))
for i, sidecar := range sidecars {
blobs := make([]GoKZG.Blob, len(blobSidecars))
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
for i, sidecar := range blobSidecars {
blobs[i] = *bytesToBlob(sidecar.Blob)
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
proofs[i] = bytesToKZGProof(sidecar.KzgProof)

View File

@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
}
func TestVerify(t *testing.T) {
sidecars := make([]blocks.ROBlob, 0)
require.NoError(t, Verify(sidecars...))
blobSidecars := make([]blocks.ROBlob, 0)
require.NoError(t, Verify(blobSidecars...))
}
func TestBytesToAny(t *testing.T) {

View File

@@ -240,9 +240,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
}
}
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
}
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
@@ -308,6 +309,30 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
blockVersion := roBlock.Version()
block := roBlock.Block()
slot := block.Slot()
if blockVersion >= version.Fulu {
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
}
return nil
}
if blockVersion >= version.Deneb {
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
}
return nil
}
return nil
}
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
@@ -901,6 +926,118 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
}
}
// areDataColumnsImmediatelyAvailable checks if all required data columns are currently
// available in the database without waiting for missing ones.
func (s *Service) areDataColumnsImmediatelyAvailable(
ctx context.Context,
root [fieldparams.RootLength]byte,
block interfaces.ReadOnlyBeaconBlock,
) error {
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return nil
}
body := block.Body()
if body == nil {
return errors.New("invalid nil beacon block body")
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "blob KZG commitments")
}
// If block has no commitments there is nothing to check.
if len(kzgCommitments) == 0 {
return nil
}
// All columns to sample need to be available for the block to be considered available.
nodeID := s.cfg.P2P.NodeID()
// Get the custody group sampling size for the node.
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
if err != nil {
return errors.Wrap(err, "custody group count error")
}
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// Compute the sampling size.
samplingSize := max(samplesPerSlot, custodyGroupCount)
// Get the peer info for the node.
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
if err != nil {
return errors.Wrap(err, "peer info")
}
// Get the count of data columns we already have in the store.
summary := s.dataColumnStorage.Summary(root)
storedDataColumnsCount := summary.Count()
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
// We don't need to wait for the rest of the data columns to declare the block as available.
if storedDataColumnsCount >= minimumColumnCountToReconstruct {
return nil
}
// Get a map of data column indices that are not currently available.
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
if err != nil {
return errors.Wrap(err, "missing data columns")
}
// If there are no missing indices, all data column sidecars are available.
if len(missingMap) == 0 {
return nil
}
// If any data is missing, return error immediately (don't wait)
missingIndices := uint64MapToSortedSlice(missingMap)
return fmt.Errorf("data columns not immediately available, missing %v", missingIndices)
}
// areBlobsImmediatelyAvailable checks if all required blobs are currently
// available in the database without waiting for missing ones.
func (s *Service) areBlobsImmediatelyAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
return nil
}
body := block.Body()
if body == nil {
return errors.New("invalid nil beacon block body")
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "could not get KZG commitments")
}
// expected is the number of kzg commitments observed in the block.
expected := len(kzgCommitments)
if expected == 0 {
return nil
}
// get a map of BlobSidecar indices that are not currently available.
missing, err := missingBlobIndices(s.blobStorage, root, kzgCommitments, block.Slot())
if err != nil {
return errors.Wrap(err, "missing indices")
}
// If there are no missing indices, all BlobSidecars are available.
if len(missing) == 0 {
return nil
}
// If any blobs are missing, return error immediately (don't wait)
missingIndices := uint64MapToSortedSlice(missing)
return fmt.Errorf("blobs not immediately available, missing %v", missingIndices)
}
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
output := make([]uint64, 0, len(input))

View File

@@ -30,6 +30,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -37,12 +38,22 @@ import (
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// DataAvailabilityChecker defines an interface for checking if data is available
// for a given block root. This interface is implemented by the blockchain service
// which has knowledge of the beacon chain's data availability requirements.
// Returns nil if data is available, ErrDataNotAvailable if data is not available,
// or another error for other failures.
type DataAvailabilityChecker interface {
IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error
}
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
@@ -106,25 +117,32 @@ type Checker interface {
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
// ErrDataNotAvailable is returned when block data is not immediately available for processing.
var ErrDataNotAvailable = errors.New("block data is not available")
type blobNotifierMap struct {
sync.RWMutex
notifiers map[[32]byte]chan uint64
seenIndex map[[32]byte][]bool
// TODO: Separate blobs from data columns
// seenIndex map[[32]byte][]bool
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
}
// notifyIndex notifies a blob by its index for a given root.
// It uses internal maps to keep track of seen indices and notifier channels.
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if idx >= uint64(maxBlobsPerBlock) {
return
}
// TODO: Separate blobs from data columns
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
// if idx >= uint64(maxBlobsPerBlock) {
// return
// }
bn.Lock()
seen := bn.seenIndex[root]
if seen == nil {
seen = make([]bool, maxBlobsPerBlock)
}
// TODO: Separate blobs from data columns
// if seen == nil {
// seen = make([]bool, maxBlobsPerBlock)
// }
if seen[idx] {
bn.Unlock()
return
@@ -135,7 +153,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
// Retrieve or create the notifier channel for the given root.
c, ok := bn.notifiers[root]
if !ok {
c = make(chan uint64, maxBlobsPerBlock)
// TODO: Separate blobs from data columns
// c = make(chan uint64, maxBlobsPerBlock)
c = make(chan uint64, fieldparams.NumberOfColumns)
bn.notifiers[root] = c
}
@@ -145,12 +165,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
}
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
// TODO: Separate blobs from data columns
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
bn.Lock()
defer bn.Unlock()
c, ok := bn.notifiers[root]
if !ok {
c = make(chan uint64, maxBlobsPerBlock)
// TODO: Separate blobs from data columns
// c = make(chan uint64, maxBlobsPerBlock)
c = make(chan uint64, fieldparams.NumberOfColumns)
bn.notifiers[root] = c
}
return c
@@ -176,7 +199,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
bn := &blobNotifierMap{
notifiers: make(map[[32]byte]chan uint64),
seenIndex: make(map[[32]byte][]bool),
// TODO: Separate blobs from data columns
// seenIndex: make(map[[32]byte][]bool),
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
}
srv := &Service{
ctx: ctx,
@@ -518,6 +543,32 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
return earliestAvailableSlot, custodyGroupCount, nil
}
// IsDataAvailable implements the DataAvailabilityChecker interface for use by the execution service.
// It checks if all required blob and data column data is immediately available in the database without waiting.
func (s *Service) IsDataAvailable(ctx context.Context, blockRoot [fieldparams.RootLength]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
block := signedBlock.Block()
if block == nil {
return errors.New("invalid nil beacon block")
}
blockVersion := block.Version()
if blockVersion >= version.Fulu {
if err := s.areDataColumnsImmediatelyAvailable(ctx, blockRoot, block); err != nil {
return errors.Wrap(ErrDataNotAvailable, err.Error())
}
return nil
}
if blockVersion >= version.Deneb {
if err := s.areBlobsImmediatelyAvailable(ctx, blockRoot, block); err != nil {
return errors.Wrap(ErrDataNotAvailable, err.Error())
}
}
return nil
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
currentTime := prysmTime.Now()
if currentTime.After(genesisTime) {

View File

@@ -554,7 +554,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
func TestNotifyIndex(t *testing.T) {
// Initialize a blobNotifierMap
bn := &blobNotifierMap{
seenIndex: make(map[[32]byte][]bool),
// TODO: Separate blobs from data columns
// seenIndex: make(map[[32]byte][]bool),
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
notifiers: make(map[[32]byte]chan uint64),
}

View File

@@ -732,6 +732,11 @@ func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]b
return c.TargetRoot, nil
}
// IsDataAvailable implements the data availability checker interface for testing
func (c *ChainService) IsDataAvailable(_ context.Context, _ [32]byte, _ interfaces.ReadOnlySignedBeaconBlock) error {
return nil
}
// MockSyncChecker is a mock implementation of blockchain.Checker.
// We can't make an assertion here that this is true because that would create a circular dependency.
type MockSyncChecker struct {

View File

@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
}
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
params.SetupTestConfigCleanup(t)
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"availability_blobs.go",
"availability_columns.go",
"blob_cache.go",
"data_column_cache.go",
"iface.go",
@@ -13,7 +12,6 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
@@ -23,7 +21,6 @@ go_library(
"//runtime/logging:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
@@ -33,7 +30,6 @@ go_test(
name = "go_default_test",
srcs = [
"availability_blobs_test.go",
"availability_columns_test.go",
"blob_cache_test.go",
"data_column_cache_test.go",
],
@@ -49,7 +45,6 @@ go_test(
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -53,30 +53,25 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
// by the given block are guaranteed to be persisted for the remainder of the retention period.
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROBlob) error {
if len(sidecars) == 0 {
return nil
}
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sidecars)
if err != nil {
return errors.Wrap(err, "blob sidecars from sidecars")
}
if len(blobSidecars) > 1 {
firstRoot := blobSidecars[0].BlockRoot()
for _, sidecar := range blobSidecars[1:] {
if len(sidecars) > 1 {
firstRoot := sidecars[0].BlockRoot()
for _, sidecar := range sidecars[1:] {
if sidecar.BlockRoot() != firstRoot {
return errMixedRoots
}
}
}
if !params.WithinDAPeriod(slots.ToEpoch(blobSidecars[0].Slot()), slots.ToEpoch(current)) {
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
return nil
}
key := keyFromSidecar(blobSidecars[0])
key := keyFromSidecar(sidecars[0])
entry := s.cache.ensure(key)
for _, blobSidecar := range blobSidecars {
for _, blobSidecar := range sidecars {
if err := entry.stash(&blobSidecar); err != nil {
return err
}

View File

@@ -118,23 +118,21 @@ func TestLazilyPersistent_Missing(t *testing.T) {
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
as := NewLazilyPersistentStore(store, mbv)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(1, scs[2]))
require.NoError(t, as.Persist(1, blobSidecars[2]))
err := as.IsDataAvailable(ctx, 1, blk)
require.ErrorIs(t, err, errMissingSidecar)
// All but one persisted, return missing idx
require.NoError(t, as.Persist(1, scs[0]))
require.NoError(t, as.Persist(1, blobSidecars[0]))
err = as.IsDataAvailable(ctx, 1, blk)
require.ErrorIs(t, err, errMissingSidecar)
// All persisted, return nil
require.NoError(t, as.Persist(1, scs...))
require.NoError(t, as.Persist(1, blobSidecars...))
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
}
@@ -149,10 +147,8 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
as := NewLazilyPersistentStore(store, mbv)
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(1, scs[0]))
require.NoError(t, as.Persist(1, blobSidecars[0]))
err := as.IsDataAvailable(ctx, 1, blk)
require.NotNil(t, err)
require.ErrorIs(t, err, errCommitmentMismatch)
@@ -161,29 +157,25 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
func TestLazyPersistOnceCommitted(t *testing.T) {
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
// stashes as expected
require.NoError(t, as.Persist(1, scs...))
require.NoError(t, as.Persist(1, blobSidecars...))
// ignores duplicates
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
// ignores index out of bound
blobSidecars[0].Index = 6
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
// ignores sidecars before the retention period
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
require.NoError(t, err)
require.NoError(t, as.Persist(32+slotOOB, more[0]))
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
// doesn't ignore new sidecars with a different block root
require.NoError(t, as.Persist(1, more...))
require.NoError(t, as.Persist(1, moreBlobSidecars...))
}
type mockBlobBatchVerifier struct {

View File

@@ -1,213 +0,0 @@
package das
import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
errors "github.com/pkg/errors"
)
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
// block, at which time they will undergo full verification and be saved to the disk.
type LazilyPersistentStoreColumn struct {
store *filesystem.DataColumnStorage
nodeID enode.ID
cache *dataColumnCache
newDataColumnsVerifier verification.NewDataColumnsVerifier
custodyGroupCount uint64
}
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
// they are all available, the interface takes a slice of data column sidecars.
type DataColumnsVerifier interface {
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
}
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
func NewLazilyPersistentStoreColumn(
store *filesystem.DataColumnStorage,
nodeID enode.ID,
newDataColumnsVerifier verification.NewDataColumnsVerifier,
custodyGroupCount uint64,
) *LazilyPersistentStoreColumn {
return &LazilyPersistentStoreColumn{
store: store,
nodeID: nodeID,
cache: newDataColumnCache(),
newDataColumnsVerifier: newDataColumnsVerifier,
custodyGroupCount: custodyGroupCount,
}
}
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
// by the given block are guaranteed to be persisted for the remainder of the retention period.
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
if len(sidecars) == 0 {
return nil
}
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
if err != nil {
return errors.Wrap(err, "blob sidecars from sidecars")
}
// It is safe to retrieve the first sidecar.
firstSidecar := dataColumnSidecars[0]
if len(sidecars) > 1 {
firstRoot := firstSidecar.BlockRoot()
for _, sidecar := range dataColumnSidecars[1:] {
if sidecar.BlockRoot() != firstRoot {
return errMixedRoots
}
}
}
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
return nil
}
key := cacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
entry := s.cache.ensure(key)
for _, sidecar := range dataColumnSidecars {
if err := entry.stash(&sidecar); err != nil {
return errors.Wrap(err, "stash DataColumnSidecar")
}
}
return nil
}
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
if err != nil {
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
}
// Return early for blocks that do not have any commitments.
if blockCommitments.count() == 0 {
return nil
}
// Get the root of the block.
blockRoot := block.Root()
// Build the cache key for the block.
key := cacheKey{slot: block.Block().Slot(), root: blockRoot}
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
entry := s.cache.ensure(key)
// Delete the cache entry for the block at the end.
defer s.cache.delete(key)
// Set the disk summary for the block in the cache entry.
entry.setDiskSummary(s.store.Summary(blockRoot))
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
if err != nil {
return errors.Wrap(err, "entry filter")
}
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
if err := verifier.ValidFields(); err != nil {
return errors.Wrap(err, "valid fields")
}
if err := verifier.SidecarInclusionProven(); err != nil {
return errors.Wrap(err, "sidecar inclusion proven")
}
if err := verifier.SidecarKzgProofVerified(); err != nil {
return errors.Wrap(err, "sidecar KZG proof verified")
}
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
if err != nil {
return errors.Wrap(err, "verified RO data columns - should never happen")
}
if err := s.store.Save(verifiedRoDataColumns); err != nil {
return errors.Wrap(err, "save data column sidecars")
}
return nil
}
// fullCommitmentsToCheck returns the commitments to check for a given block.
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// Return early for blocks that are pre-Fulu.
if block.Version() < version.Fulu {
return &safeCommitmentsArray{}, nil
}
// Compute the block epoch.
blockSlot := block.Block().Slot()
blockEpoch := slots.ToEpoch(blockSlot)
// Compute the current epoch.
currentEpoch := slots.ToEpoch(currentSlot)
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return &safeCommitmentsArray{}, nil
}
// Retrieve the KZG commitments for the block.
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
// Return early if there are no commitments in the block.
if len(kzgCommitments) == 0 {
return &safeCommitmentsArray{}, nil
}
// Retrieve peer info.
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
if err != nil {
return nil, errors.Wrap(err, "peer info")
}
// Create a safe commitments array for the custody columns.
commitmentsArray := &safeCommitmentsArray{}
commitmentsArraySize := uint64(len(commitmentsArray))
for column := range peerInfo.CustodyColumns {
if column >= commitmentsArraySize {
return nil, errors.Errorf("custody column index %d too high (max allowed %d) - should never happen", column, commitmentsArraySize)
}
commitmentsArray[column] = kzgCommitments
}
return commitmentsArray, nil
}

View File

@@ -1,313 +0,0 @@
package das
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
)
var commitments = [][]byte{
bytesutil.PadTo([]byte("a"), 48),
bytesutil.PadTo([]byte("b"), 48),
bytesutil.PadTo([]byte("c"), 48),
bytesutil.PadTo([]byte("d"), 48),
}
func TestPersist(t *testing.T) {
t.Run("no sidecars", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(0)
require.NoError(t, err)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("mixed roots", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := []util.DataColumnParam{
{Slot: 1, Index: 1},
{Slot: 2, Index: 2},
}
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
require.ErrorIs(t, err, errMixedRoots)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("outside DA period", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := []util.DataColumnParam{
{Slot: 1, Index: 1},
}
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
require.NoError(t, err)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("nominal", func(t *testing.T) {
const slot = 42
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := []util.DataColumnParam{
{Slot: slot, Index: 1},
{Slot: slot, Index: 5},
}
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
require.NoError(t, err)
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
require.Equal(t, true, ok)
// A call to Persist does NOT save the sidecars to disk.
require.Equal(t, uint64(0), entry.diskSummary.Count())
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
for i, roDataColumn := range entry.scs {
if map[int]bool{1: true, 5: true}[i] {
continue
}
require.IsNil(t, roDataColumn)
}
})
}
func TestIsDataAvailable(t *testing.T) {
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
}
ctx := t.Context()
t.Run("without commitments", func(t *testing.T) {
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
require.NoError(t, err)
})
t.Run("with commitments", func(t *testing.T) {
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
block := signedRoBlock.Block()
slot := block.Slot()
proposerIndex := block.ProposerIndex()
parentRoot := block.ParentRoot()
stateRoot := block.StateRoot()
bodyRoot, err := block.Body().HashTreeRoot()
require.NoError(t, err)
root := signedRoBlock.Root()
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
for _, index := range indices {
dataColumnParams := util.DataColumnParam{
Index: index,
KzgCommitments: commitments,
Slot: slot,
ProposerIndex: proposerIndex,
ParentRoot: parentRoot[:],
StateRoot: stateRoot[:],
BodyRoot: bodyRoot[:],
}
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
}
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
key := cacheKey{root: root}
entry := lazilyPersistentStoreColumns.cache.ensure(key)
defer lazilyPersistentStoreColumns.cache.delete(key)
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
require.NoError(t, err)
}
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
require.NoError(t, err)
actual, err := dataColumnStorage.Get(root, indices[:])
require.NoError(t, err)
summary := dataColumnStorage.Summary(root)
require.Equal(t, uint64(len(indices)), summary.Count())
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
})
}
func TestFullCommitmentsToCheck(t *testing.T) {
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
require.NoError(t, err)
testCases := []struct {
name string
commitments [][]byte
block func(*testing.T) blocks.ROBlock
slot primitives.Slot
}{
{
name: "Pre-Fulu block",
block: func(t *testing.T) blocks.ROBlock {
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
},
},
{
name: "Commitments outside data availability window",
block: func(t *testing.T) blocks.ROBlock {
beaconBlockElectra := util.NewBeaconBlockElectra()
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
return newSignedRoBlock(t, beaconBlockElectra)
},
slot: windowSlots + 1,
},
{
name: "Commitments within data availability window",
block: func(t *testing.T) blocks.ROBlock {
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
signedBeaconBlockFulu.Block.Slot = 100
return newSignedRoBlock(t, signedBeaconBlockFulu)
},
commitments: commitments,
slot: 100,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
b := tc.block(t)
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
require.NoError(t, err)
for _, commitments := range commitmentsArray {
require.DeepEqual(t, tc.commitments, commitments)
}
})
}
}
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
for _, roDataColumn := range roDataColumns {
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
}
return roSidecars, roDataColumns
}
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
}
type mockDataColumnsVerifier struct {
t *testing.T
dataColumnSidecars []blocks.RODataColumn
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
}
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
for _, dataColumnSidecar := range m.dataColumnSidecars {
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
}
return verifiedDataColumnSidecars, nil
}
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
func (m *mockDataColumnsVerifier) ValidFields() error {
m.validCalled = true
return nil
}
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
return nil
}
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
return nil
}
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
return nil
}
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
m.SidecarInclusionProvenCalled = true
return nil
}
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
m.SidecarKzgProofVerifiedCalled = true
return nil
}
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }

View File

@@ -15,5 +15,5 @@ import (
// durably persisted before returning a non-error value.
type AvailabilityStore interface {
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
}

View File

@@ -5,13 +5,12 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
errors "github.com/pkg/errors"
)
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
type MockAvailabilityStore struct {
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
PersistBlobsCallback func(current primitives.Slot, sc ...blocks.ROBlob) error
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
}
var _ AvailabilityStore = &MockAvailabilityStore{}
@@ -25,13 +24,9 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
}
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
if err != nil {
return errors.Wrap(err, "blob sidecars from sidecars")
}
func (m *MockAvailabilityStore) Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error {
if m.PersistBlobsCallback != nil {
return m.PersistBlobsCallback(current, blobSidecars...)
return m.PersistBlobsCallback(current, blobSidecar...)
}
return nil
}

View File

@@ -100,6 +100,14 @@ type (
}
)
// DataColumnStorageReader is an interface to read data column sidecars from the filesystem.
type DataColumnStorageReader interface {
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error)
}
var _ DataColumnStorageReader = &DataColumnStorage{}
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
func WithDataColumnBasePath(base string) DataColumnStorageOption {
return func(b *DataColumnStorage) error {

View File

@@ -84,12 +84,6 @@ func (s DataColumnStorageSummary) Stored() map[uint64]bool {
return stored
}
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
type DataColumnStorageSummarizer interface {
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
}
type dataColumnStorageSummaryCache struct {
mu sync.RWMutex
dataColumnCount float64
@@ -98,8 +92,6 @@ type dataColumnStorageSummaryCache struct {
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
}
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
return &dataColumnStorageSummaryCache{
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),

View File

@@ -144,14 +144,3 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
}
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
c := newDataColumnStorageSummaryCache()
for root, indices := range set {
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
t.Fatal(err)
}
}
return c
}

View File

@@ -115,6 +115,17 @@ type NoHeadAccessDatabase interface {
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
// Genesis operations.
LoadGenesis(ctx context.Context, stateBytes []byte) error
SaveGenesisData(ctx context.Context, state state.BeaconState) error
EnsureEmbeddedGenesis(ctx context.Context) error
// Support for checkpoint sync and backfill.
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
// Custody operations.
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
@@ -131,16 +142,6 @@ type HeadAccessDatabase interface {
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
HeadBlockRoot() ([32]byte, error)
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
// Genesis operations.
LoadGenesis(ctx context.Context, stateBytes []byte) error
SaveGenesisData(ctx context.Context, state state.BeaconState) error
EnsureEmbeddedGenesis(ctx context.Context) error
// Support for checkpoint sync and backfill.
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
}
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.

View File

@@ -74,6 +74,7 @@ go_library(
"@com_github_sirupsen_logrus//:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_x_sync//singleflight:go_default_library",
],
)
@@ -84,6 +85,7 @@ go_test(
"block_cache_test.go",
"block_reader_test.go",
"deposit_test.go",
"engine_client_broadcast_test.go",
"engine_client_fuzz_test.go",
"engine_client_test.go",
"execution_chain_test.go",

View File

@@ -99,6 +99,8 @@ const (
GetBlobsV2 = "engine_getBlobsV2"
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
defaultEngineTimeout = time.Second
// defaultGetBlobsRetryInterval is the default retry interval for getBlobsV2 calls.
defaultGetBlobsRetryInterval = 200 * time.Millisecond
)
var (
@@ -652,9 +654,94 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
}
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
// and constructs the corresponding verified read-only data column sidecars.
// It uses singleflight to ensure only one reconstruction per blockRoot.
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
// Use singleflight to ensure only one reconstruction per blockRoot
v, err, _ := s.reconstructSingleflight.Do(fmt.Sprintf("%x", blockRoot), func() (interface{}, error) {
// Try reconstruction once
result, err := s.reconstructDataColumnSidecarsOnce(ctx, signedROBlock, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "failed to reconstruct data column sidecars")
}
if len(result) > 0 {
return result, nil // Success - return data
}
// Empty result - initiate retry mechanism
// Create a new context with a timeout for the retry goroutine.
retryCtx, cancel := context.WithTimeout(s.ctx, time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)
// LoadOrStore atomically checks for an existing retry and stores
// a new one if none exists. This prevents a race condition.
// The stored value is the cancel function for the new context.
_, loaded := s.activeRetries.LoadOrStore(blockRoot, cancel)
if loaded {
// Another goroutine already started the retry process. The current one can exit.
cancel() // Cancel the context we just created as it won't be used.
return []blocks.VerifiedRODataColumn{}, nil
}
// This goroutine is now responsible for starting the retry.
// Perform periodic retry attempts for data column reconstruction inline.
go func() {
startTime := time.Now()
// Defer the cancellation of the context and the removal of the active retry tracker.
defer func() {
cancel()
s.activeRetries.Delete(blockRoot)
}()
ticker := time.NewTicker(defaultGetBlobsRetryInterval)
defer ticker.Stop()
attemptCount := 0
retryLog := log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot))
for {
select {
case <-ticker.C:
attemptCount++
getBlobsRetryAttempts.WithLabelValues("attempt").Inc()
// Retry reconstruction
retryLog.WithField("attempt", attemptCount).Debug("Retrying data column reconstruction")
result, err := s.reconstructDataColumnSidecarsOnce(retryCtx, signedROBlock, blockRoot)
if err != nil {
retryLog.WithError(err).Debug("Reconstruction attempt failed, will retry")
continue
}
if len(result) > 0 {
retryLog.WithField("attempts", attemptCount).Debug("Retry succeeded")
getBlobsRetryAttempts.WithLabelValues("success_reconstructed").Inc()
getBlobsRetryDuration.WithLabelValues("success").Observe(time.Since(startTime).Seconds())
// Clean up active retry tracker immediately on success
s.activeRetries.Delete(blockRoot)
return
}
case <-retryCtx.Done():
retryLog.WithField("attempts", attemptCount).Debug("Retry timeout")
getBlobsRetryAttempts.WithLabelValues("timeout").Inc()
getBlobsRetryDuration.WithLabelValues("timeout").Observe(time.Since(startTime).Seconds())
return
}
}
}()
// Return empty result for now; the background retry will handle it.
return []blocks.VerifiedRODataColumn{}, nil
})
if err != nil {
return nil, err
}
return v.([]blocks.VerifiedRODataColumn), nil
}
// reconstructDataColumnSidecarsOnce performs a single attempt to reconstruct data column sidecars.
func (s *Service) reconstructDataColumnSidecarsOnce(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
block := signedROBlock.Block()
log := log.WithFields(logrus.Fields{
@@ -1008,6 +1095,12 @@ func toBlockNumArg(number *big.Int) string {
return hexutil.EncodeBig(number)
}
// hasActiveRetry checks if there's an active retry for the given block root.
func (s *Service) hasActiveRetry(blockRoot [fieldparams.RootLength]byte) bool {
_, exists := s.activeRetries.Load(blockRoot)
return exists
}
// wrapWithBlockRoot returns a new error with the given block root.
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))

View File

@@ -0,0 +1,92 @@
package execution
import (
"sync"
"sync/atomic"
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
// TestStartRetryIfNeeded_AtomicBehavior tests that the atomic retry start behavior
// prevents race conditions by ensuring only one retry can be active per blockRoot.
func TestStartRetryIfNeeded_AtomicBehavior(t *testing.T) {
t.Run("prevents multiple concurrent retry claims", func(t *testing.T) {
service := &Service{
activeRetries: sync.Map{},
}
blockRoot := [32]byte{1, 2, 3}
claimCount := int64(0)
numConcurrentCalls := 20
var wg sync.WaitGroup
startSignal := make(chan struct{})
// Launch multiple goroutines that try to claim retry slot simultaneously
for i := 0; i < numConcurrentCalls; i++ {
wg.Add(1)
go func() {
defer wg.Done()
<-startSignal // Wait for signal to maximize race contention
// Simulate the atomic claim logic from startRetryIfNeeded
cancelFunc := func() {}
if _, loaded := service.activeRetries.LoadOrStore(blockRoot, cancelFunc); !loaded {
// We won the race - count successful claims
atomic.AddInt64(&claimCount, 1)
// Simulate some work before cleaning up
time.Sleep(1 * time.Millisecond)
service.activeRetries.Delete(blockRoot)
}
}()
}
// Start all goroutines simultaneously to maximize race condition
close(startSignal)
wg.Wait()
// Verify only one goroutine successfully claimed the retry slot
actualClaimCount := atomic.LoadInt64(&claimCount)
require.Equal(t, int64(1), actualClaimCount, "Only one goroutine should successfully claim retry slot despite %d concurrent attempts", numConcurrentCalls)
t.Logf("Success: %d concurrent attempts resulted in only 1 successful claim (atomic behavior verified)", numConcurrentCalls)
})
t.Run("hasActiveRetry correctly detects active retries", func(t *testing.T) {
service := &Service{
activeRetries: sync.Map{},
}
blockRoot1 := [32]byte{1, 2, 3}
blockRoot2 := [32]byte{4, 5, 6}
// Initially no active retries
if service.hasActiveRetry(blockRoot1) {
t.Error("Should not have active retry initially")
}
// Add active retry for blockRoot1
service.activeRetries.Store(blockRoot1, func() {})
// Verify detection
if !service.hasActiveRetry(blockRoot1) {
t.Error("Should detect active retry for blockRoot1")
}
if service.hasActiveRetry(blockRoot2) {
t.Error("Should not detect active retry for blockRoot2")
}
// Remove active retry
service.activeRetries.Delete(blockRoot1)
// Verify removal
if service.hasActiveRetry(blockRoot1) {
t.Error("Should not detect active retry after deletion")
}
t.Logf("Success: hasActiveRetry correctly tracks retry state")
})
}

View File

@@ -11,7 +11,10 @@ import (
"net/http"
"net/http/httptest"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
@@ -2723,3 +2726,412 @@ func testNewBlobVerifier() verification.NewBlobVerifier {
}
}
}
// Test retry helper methods
func TestRetryHelperMethods(t *testing.T) {
client := &Service{}
blockRoot := [32]byte{1, 2, 3}
t.Run("hasActiveRetry returns false initially", func(t *testing.T) {
hasActive := client.hasActiveRetry(blockRoot)
require.Equal(t, false, hasActive)
})
t.Run("hasActiveRetry returns true after storing cancel function", func(t *testing.T) {
_, cancel := context.WithCancel(context.Background())
defer cancel()
client.activeRetries.Store(blockRoot, cancel)
hasActive := client.hasActiveRetry(blockRoot)
require.Equal(t, true, hasActive)
// Clean up
client.activeRetries.Delete(blockRoot)
})
}
// Test ReconstructDataColumnSidecars with retry logic
func TestReconstructDataColumnSidecars_WithRetry(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Setup test config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 4
params.OverrideBeaconConfig(cfg)
// Create test block
kzgCommitments := createRandomKzgCommitments(t, 3)
sb := util.NewBeaconBlockFulu()
sb.Block.Body.BlobKzgCommitments = kzgCommitments
signedB, err := blocks.NewSignedBeaconBlock(sb)
require.NoError(t, err)
r := [32]byte{1, 2, 3}
t.Run("successful initial call does not trigger retry", func(t *testing.T) {
ctx := context.Background()
// Setup server that returns all blobs
blobMasks := []bool{true, true, true}
srv := createBlobServerV2(t, 3, blobMasks)
defer srv.Close()
client := &Service{}
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
require.NoError(t, err)
require.Equal(t, 128, len(dataColumns))
// Should not have any active retries since initial call succeeded
require.Equal(t, false, client.hasActiveRetry(r))
})
t.Run("failed initial call triggers retry", func(t *testing.T) {
ctx := context.Background()
// Setup server that returns no blobs
srv := createBlobServerV2(t, 0, []bool{})
defer srv.Close()
client := &Service{}
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
require.NoError(t, err)
require.Equal(t, 0, len(dataColumns))
// Wait a bit for the goroutine to start
time.Sleep(10 * time.Millisecond)
// Should have active retry since initial call returned empty
require.Equal(t, true, client.hasActiveRetry(r))
// Clean up
if cancel, ok := client.activeRetries.Load(r); ok {
cancel.(context.CancelFunc)()
}
})
t.Run("does not start duplicate retry", func(t *testing.T) {
ctx := context.Background()
// Setup server that returns no blobs
srv := createBlobServerV2(t, 0, []bool{})
defer srv.Close()
client := &Service{}
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
// First call should start retry
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
require.NoError(t, err)
require.Equal(t, 0, len(dataColumns))
// Wait a bit for the goroutine to start
time.Sleep(10 * time.Millisecond)
require.Equal(t, true, client.hasActiveRetry(r))
// Second call should not start another retry
dataColumns, err = client.ReconstructDataColumnSidecars(ctx, signedB, r)
require.NoError(t, err)
require.Equal(t, 0, len(dataColumns))
require.Equal(t, true, client.hasActiveRetry(r))
// Clean up
if cancel, ok := client.activeRetries.Load(r); ok {
cancel.(context.CancelFunc)()
}
})
}
// Test timeout and cleanup behavior
func TestRetryTimeout(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Setup test config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 4
params.OverrideBeaconConfig(cfg)
// Create test block
kzgCommitments := createRandomKzgCommitments(t, 1)
sb := util.NewBeaconBlockFulu()
sb.Block.Body.BlobKzgCommitments = kzgCommitments
signedB, err := blocks.NewSignedBeaconBlock(sb)
require.NoError(t, err)
r := [32]byte{1, 2, 3}
t.Run("retry cleans up after timeout", func(t *testing.T) {
// Setup server that always returns no blobs
srv := createBlobServerV2(t, 0, []bool{})
defer srv.Close()
client := &Service{}
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
// Modify config to have very short slot time for testing
originalConfig := params.BeaconConfig()
cfg := originalConfig.Copy()
cfg.SecondsPerSlot = 1 // 1 second timeout for retry
params.OverrideBeaconConfig(cfg)
defer params.OverrideBeaconConfig(originalConfig)
// Call ReconstructDataColumnSidecars which will start retry internally
ctx := context.Background()
_, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
require.NoError(t, err) // Should not error, just return empty result
// Wait a bit for the retry goroutine to start
time.Sleep(10 * time.Millisecond)
// Should have active retry initially
require.Equal(t, true, client.hasActiveRetry(r))
// Wait for timeout (longer than the 1 second timeout we set)
time.Sleep(1200 * time.Millisecond)
// Should be cleaned up after timeout
require.Equal(t, false, client.hasActiveRetry(r))
})
}
// Test concurrent retry scenarios
func TestConcurrentRetries(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Setup test config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 4
params.OverrideBeaconConfig(cfg)
t.Run("multiple blocks can have concurrent retries", func(t *testing.T) {
// Setup server that returns no blobs
srv := createBlobServerV2(t, 0, []bool{})
defer srv.Close()
client := &Service{}
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
// Create multiple test blocks
testBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, 3)
roots := make([][32]byte, 3)
for i := 0; i < 3; i++ {
kzgCommitments := createRandomKzgCommitments(t, 1)
sb := util.NewBeaconBlockFulu()
sb.Block.Body.BlobKzgCommitments = kzgCommitments
signedB, err := blocks.NewSignedBeaconBlock(sb)
require.NoError(t, err)
testBlocks[i] = signedB
roots[i] = [32]byte{byte(i), byte(i), byte(i)}
}
ctx := context.Background()
// Start retries for all blocks
for i := 0; i < 3; i++ {
_, err := client.ReconstructDataColumnSidecars(ctx, testBlocks[i], roots[i])
require.NoError(t, err)
}
// Wait a bit for the goroutines to start
time.Sleep(10 * time.Millisecond)
// All should have active retries
for i := 0; i < 3; i++ {
require.Equal(t, true, client.hasActiveRetry(roots[i]))
}
// Clean up
for i := 0; i < 3; i++ {
if cancel, ok := client.activeRetries.Load(roots[i]); ok {
cancel.(context.CancelFunc)()
}
}
})
}
// Test end-to-end retry behavior with data availability changes
func TestRetryBehaviorWithDataAvailability(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Setup test config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 4
params.OverrideBeaconConfig(cfg)
// Create test block
kzgCommitments := createRandomKzgCommitments(t, 1)
sb := util.NewBeaconBlockFulu()
sb.Block.Body.BlobKzgCommitments = kzgCommitments
signedB, err := blocks.NewSignedBeaconBlock(sb)
require.NoError(t, err)
r := [32]byte{1, 2, 3}
t.Run("retry stops when data becomes available", func(t *testing.T) {
// Setup server that returns no blobs initially
srv := createBlobServerV2(t, 0, []bool{})
defer srv.Close()
client := &Service{}
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
// Start the initial reconstruction which should trigger retry
ctx := context.Background()
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
require.NoError(t, err)
require.Equal(t, 0, len(dataColumns))
// Wait a bit for the goroutine to start
time.Sleep(10 * time.Millisecond)
// Verify retry started
require.Equal(t, true, client.hasActiveRetry(r))
// Wait for retry timeout (the retry will continue since there's no way to stop it now)
time.Sleep(300 * time.Millisecond)
// Retry should still be active since there's no availability check to stop it
require.Equal(t, true, client.hasActiveRetry(r))
})
t.Run("retry continues when data is not available", func(t *testing.T) {
// Setup server that returns no blobs
srv := createBlobServerV2(t, 0, []bool{})
defer srv.Close()
client := &Service{}
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
// Start the initial reconstruction which should trigger retry
ctx := context.Background()
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
require.NoError(t, err)
require.Equal(t, 0, len(dataColumns))
// Wait a bit for the goroutine to start
time.Sleep(10 * time.Millisecond)
// Verify retry started
require.Equal(t, true, client.hasActiveRetry(r))
// Wait a bit - retry should still be active
time.Sleep(100 * time.Millisecond)
require.Equal(t, true, client.hasActiveRetry(r))
// Clean up
if cancel, ok := client.activeRetries.Load(r); ok {
cancel.(context.CancelFunc)()
}
// Wait for cleanup
time.Sleep(50 * time.Millisecond)
require.Equal(t, false, client.hasActiveRetry(r))
})
}
// TestConcurrentReconstructDataColumnSidecars tests that concurrent calls to ReconstructDataColumnSidecars
// don't result in multiple getBlobsV2 calls for the same block root
func TestConcurrentReconstructDataColumnSidecars(t *testing.T) {
t.Run("concurrent calls share result", func(t *testing.T) {
// Setup server that tracks call count
callCount := int32(0)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt32(&callCount, 1)
w.Header().Set("Content-Type", "application/json")
// Simulate some processing time
time.Sleep(10 * time.Millisecond)
if strings.Contains(r.URL.RequestURI(), GetBlobsV2) {
// Return empty result - simulating EL doesn't have the data yet
resp := []interface{}{nil}
respJSON, _ := json.Marshal(map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": resp,
})
_, _ = w.Write(respJSON)
return
}
}))
defer srv.Close()
// Setup client
client := &Service{}
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
// Create test block with KZG commitments
slot := primitives.Slot(100)
block := util.NewBeaconBlockDeneb()
block.Block.Slot = slot
commitment := [48]byte{1, 2, 3}
block.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
blockRoot, err := signedBlock.Block().HashTreeRoot()
require.NoError(t, err)
ctx := context.Background()
// Start multiple concurrent calls
numCalls := 5
var wg sync.WaitGroup
results := make([][]blocks.VerifiedRODataColumn, numCalls)
errors := make([]error, numCalls)
for i := 0; i < numCalls; i++ {
wg.Add(1)
go func(index int) {
defer wg.Done()
result, err := client.ReconstructDataColumnSidecars(ctx, signedBlock, blockRoot)
results[index] = result
errors[index] = err
}(i)
}
// Wait for all calls to complete
wg.Wait()
// Verify that GetBlobsV2 was called only once, not numCalls times
finalCallCount := atomic.LoadInt32(&callCount)
require.Equal(t, int32(1), finalCallCount, "Expected GetBlobsV2 to be called only once, but was called %d times", finalCallCount)
// Verify all calls got the same result length
for i := 1; i < numCalls; i++ {
require.Equal(t, len(results[0]), len(results[i]), "All concurrent calls should return same result length")
}
})
}

View File

@@ -71,4 +71,19 @@ var (
Name: "execution_payload_bodies_count",
Help: "The number of requested payload bodies is too large",
})
getBlobsRetryAttempts = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "getblobs_retry_attempts_total",
Help: "Total number of getBlobsV2 retry attempts",
},
[]string{"result"},
)
getBlobsRetryDuration = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "getblobs_retry_duration_seconds",
Help: "Duration of getBlobsV2 retry cycles",
Buckets: []float64{0.1, 0.5, 1.0, 2.0, 5.0, 10.0, 15.0},
},
[]string{"result"},
)
)

View File

@@ -13,6 +13,8 @@ import (
"sync"
"time"
"golang.org/x/sync/singleflight"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
@@ -162,6 +164,8 @@ type Service struct {
verifierWaiter *verification.InitializerWaiter
blobVerifier verification.NewBlobVerifier
capabilityCache *capabilityCache
activeRetries sync.Map // map[blockRoot]context.CancelFunc for tracking active retries
reconstructSingleflight singleflight.Group
}
// NewService sets up a new instance with an ethclient when given a web3 endpoint as a string in the config.

View File

@@ -845,6 +845,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
ClockWaiter: b.clockWaiter,
InitialSyncComplete: complete,
BlobStorage: b.BlobStorage,
DataColumnStorage: b.DataColumnStorage,
}, opts...)
return b.services.RegisterService(is)
}
@@ -1124,4 +1125,4 @@ func hasNetworkFlag(cliCtx *cli.Context) bool {
}
}
return false
}
}

View File

@@ -5,7 +5,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
)
var _ Scorer = (*BadResponsesScorer)(nil)
@@ -132,13 +131,14 @@ func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
if peerData, ok := s.store.PeerData(pid); ok {
if peerData.BadResponses >= s.config.Threshold {
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
}
// if peerData, ok := s.store.PeerData(pid); ok {
// TODO: Remote this out of devnet
// if peerData.BadResponses >= s.config.Threshold {
// return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
// }
return nil
}
// return nil
// }
return nil
}

View File

@@ -1,7 +1,6 @@
package scorers_test
import (
"sort"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
@@ -13,39 +12,41 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
)
func TestScorers_BadResponses_Score(t *testing.T) {
const pid = "peer1"
// TODO: Uncomment when out of devnet
// func TestScorers_BadResponses_Score(t *testing.T) {
// const pid = "peer1"
ctx := t.Context()
// ctx, cancel := context.WithCancel(context.Background())
// defer cancel()
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 4,
},
},
})
scorer := peerStatuses.Scorers().BadResponsesScorer()
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: 4,
// },
// },
// })
// scorer := peerStatuses.Scorers().BadResponsesScorer()
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
// assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
scorer.Increment(pid)
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, -2.5, scorer.Score(pid))
// scorer.Increment(pid)
// assert.NoError(t, scorer.IsBadPeer(pid))
// assert.Equal(t, -2.5, scorer.Score(pid))
scorer.Increment(pid)
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-5), scorer.Score(pid))
// scorer.Increment(pid)
// assert.NoError(t, scorer.IsBadPeer(pid))
// assert.Equal(t, float64(-5), scorer.Score(pid))
scorer.Increment(pid)
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-7.5), scorer.Score(pid))
// scorer.Increment(pid)
// assert.NoError(t, scorer.IsBadPeer(pid))
// assert.Equal(t, float64(-7.5), scorer.Score(pid))
scorer.Increment(pid)
assert.NotNil(t, scorer.IsBadPeer(pid))
assert.Equal(t, -100.0, scorer.Score(pid))
}
// scorer.Increment(pid)
// assert.NotNil(t, scorer.IsBadPeer(pid))
// assert.Equal(t, -100.0, scorer.Score(pid))
// }
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
ctx := t.Context()
@@ -137,56 +138,60 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
assert.Equal(t, 1, badResponses, "unexpected bad responses for pid3")
}
func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
ctx := t.Context()
// TODO: Uncomment when out of devnet
// func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
// ctx, cancel := context.WithCancel(context.Background())
// defer cancel()
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
})
scorer := peerStatuses.Scorers().BadResponsesScorer()
pid := peer.ID("peer1")
assert.NoError(t, scorer.IsBadPeer(pid))
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{},
// })
// scorer := peerStatuses.Scorers().BadResponsesScorer()
// pid := peer.ID("peer1")
// assert.NoError(t, scorer.IsBadPeer(pid))
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
assert.NoError(t, scorer.IsBadPeer(pid))
// peerStatuses.Add(nil, pid, nil, network.DirUnknown)
// assert.NoError(t, scorer.IsBadPeer(pid))
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
scorer.Increment(pid)
if i == scorers.DefaultBadResponsesThreshold-1 {
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
} else {
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
}
}
}
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
// scorer.Increment(pid)
// if i == scorers.DefaultBadResponsesThreshold-1 {
// assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
// } else {
// assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
// }
// }
// }
func TestScorers_BadResponses_BadPeers(t *testing.T) {
ctx := t.Context()
// TODO: Uncomment when out of devnet
// func TestScorers_BadResponses_BadPeers(t *testing.T) {
// ctx, cancel := context.WithCancel(context.Background())
// defer cancel()
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
})
scorer := peerStatuses.Scorers().BadResponsesScorer()
pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
for i := 0; i < len(pids); i++ {
peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
}
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
scorer.Increment(pids[1])
scorer.Increment(pids[2])
scorer.Increment(pids[4])
}
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
want := []peer.ID{pids[1], pids[2], pids[4]}
badPeers := scorer.BadPeers()
sort.Slice(badPeers, func(i, j int) bool {
return badPeers[i] < badPeers[j]
})
assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
}
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{},
// })
// scorer := peerStatuses.Scorers().BadResponsesScorer()
// pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
// for i := 0; i < len(pids); i++ {
// peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
// }
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
// scorer.Increment(pids[1])
// scorer.Increment(pids[2])
// scorer.Increment(pids[4])
// }
// assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
// assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
// assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
// assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
// assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
// want := []peer.ID{pids[1], pids[2], pids[4]}
// badPeers := scorer.BadPeers()
// sort.Slice(badPeers, func(i, j int) bool {
// return badPeers[i] < badPeers[j]
// })
// assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
// }

View File

@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
},
check: func(scorer *scorers.GossipScorer) {
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
_, _, topicMap, err := scorer.GossipData("peer1")
assert.NoError(t, err)
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")

View File

@@ -211,99 +211,102 @@ func TestScorers_Service_Score(t *testing.T) {
})
}
func TestScorers_Service_loop(t *testing.T) {
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
defer cancel()
// TODO: Uncomment when out of devnet
// func TestScorers_Service_loop(t *testing.T) {
// ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
// defer cancel()
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 5,
DecayInterval: 50 * time.Millisecond,
},
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
DecayInterval: 25 * time.Millisecond,
Decay: 64,
},
},
})
s1 := peerStatuses.Scorers().BadResponsesScorer()
s2 := peerStatuses.Scorers().BlockProviderScorer()
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: 5,
// DecayInterval: 50 * time.Millisecond,
// },
// BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
// DecayInterval: 25 * time.Millisecond,
// Decay: 64,
// },
// },
// })
// s1 := peerStatuses.Scorers().BadResponsesScorer()
// s2 := peerStatuses.Scorers().BlockProviderScorer()
pid1 := peer.ID("peer1")
peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
for i := 0; i < s1.Params().Threshold+5; i++ {
s1.Increment(pid1)
}
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
// pid1 := peer.ID("peer1")
// peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
// for i := 0; i < s1.Params().Threshold+5; i++ {
// s1.Increment(pid1)
// }
// assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
s2.IncrementProcessedBlocks("peer1", 221)
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
// s2.IncrementProcessedBlocks("peer1", 221)
// assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
done := make(chan struct{}, 1)
go func() {
defer func() {
done <- struct{}{}
}()
ticker := time.NewTicker(50 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
return
}
case <-ctx.Done():
t.Error("Timed out")
return
}
}
}()
// done := make(chan struct{}, 1)
// go func() {
// defer func() {
// done <- struct{}{}
// }()
// ticker := time.NewTicker(50 * time.Millisecond)
// defer ticker.Stop()
// for {
// select {
// case <-ticker.C:
// if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
// return
// }
// case <-ctx.Done():
// t.Error("Timed out")
// return
// }
// }
// }()
<-done
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
}
// <-done
// assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
// assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
// }
func TestScorers_Service_IsBadPeer(t *testing.T) {
peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 2,
DecayInterval: 50 * time.Second,
},
},
})
// TODO: Uncomment when out of devnet
// func TestScorers_Service_IsBadPeer(t *testing.T) {
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: 2,
// DecayInterval: 50 * time.Second,
// },
// },
// })
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
}
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
// }
func TestScorers_Service_BadPeers(t *testing.T) {
peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 2,
DecayInterval: 50 * time.Second,
},
},
})
// TODO: Uncomment when out of devnet
// func TestScorers_Service_BadPeers(t *testing.T) {
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: 2,
// DecayInterval: 50 * time.Second,
// },
// },
// })
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
for _, pid := range []peer.ID{"peer1", "peer3"} {
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
}
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
}
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
// assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
// for _, pid := range []peer.ID{"peer1", "peer3"} {
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
// }
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
// assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
// }

View File

@@ -62,7 +62,9 @@ const (
const (
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
CollocationLimit = 5
// TODO: Revert this when out of devnet.
// CollocationLimit = 5
CollocationLimit = 9999
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
maxLimitBuffer = 150
@@ -780,6 +782,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
// BestNonFinalized returns the highest known epoch, higher than ours,
// and is shared by at least minPeers.
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
// Retrieve all connected peers.
connected := p.Connected()
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
ourHeadSlot := slotsPerEpoch.Mul(uint64(ourHeadEpoch))

View File

@@ -2,7 +2,6 @@ package peers_test
import (
"crypto/rand"
"strconv"
"testing"
"time"
@@ -328,55 +327,56 @@ func TestPeerWithNilChainState(t *testing.T) {
require.Equal(t, resChainState, nothing)
}
func TestPeerBadResponses(t *testing.T) {
maxBadResponses := 2
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: maxBadResponses,
},
},
})
// TODO: Uncomment when out of devnet
// func TestPeerBadResponses(t *testing.T) {
// maxBadResponses := 2
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: maxBadResponses,
// },
// },
// })
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
require.NoError(t, err)
{
_, err := id.MarshalBinary()
require.NoError(t, err)
}
// id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
// require.NoError(t, err)
// {
// _, err := id.MarshalBinary()
// require.NoError(t, err)
// }
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
require.NoError(t, err, "Failed to create address")
direction := network.DirInbound
p.Add(new(enr.Record), id, address, direction)
// address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
// require.NoError(t, err, "Failed to create address")
// direction := network.DirInbound
// p.Add(new(enr.Record), id, address, direction)
scorer := p.Scorers().BadResponsesScorer()
resBadResponses, err := scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
// scorer := p.Scorers().BadResponsesScorer()
// resBadResponses, err := scorer.Count(id)
// require.NoError(t, err)
// assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
// scorer.Increment(id)
// resBadResponses, err = scorer.Count(id)
// require.NoError(t, err)
// assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
// scorer.Increment(id)
// resBadResponses, err = scorer.Count(id)
// require.NoError(t, err)
// assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
}
// scorer.Increment(id)
// resBadResponses, err = scorer.Count(id)
// require.NoError(t, err)
// assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
// }
func TestAddMetaData(t *testing.T) {
maxBadResponses := 2
@@ -495,100 +495,102 @@ func TestPeerValidTime(t *testing.T) {
assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers")
}
func TestPrune(t *testing.T) {
maxBadResponses := 2
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: maxBadResponses,
},
},
})
// TODO: Uncomment when out of devnet
// func TestPrune(t *testing.T) {
// maxBadResponses := 2
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: maxBadResponses,
// },
// },
// })
for i := 0; i < p.MaxPeerLimit()+100; i++ {
if i%7 == 0 {
// Peer added as disconnected.
_ = addPeer(t, p, peers.Disconnected)
}
// Peer added to peer handler.
_ = addPeer(t, p, peers.Connected)
}
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
// if i%7 == 0 {
// // Peer added as disconnected.
// _ = addPeer(t, p, peers.PeerDisconnected)
// }
// // Peer added to peer handler.
// _ = addPeer(t, p, peers.PeerConnected)
// }
disPeers := p.Disconnected()
firstPID := disPeers[0]
secondPID := disPeers[1]
thirdPID := disPeers[2]
// disPeers := p.Disconnected()
// firstPID := disPeers[0]
// secondPID := disPeers[1]
// thirdPID := disPeers[2]
scorer := p.Scorers().BadResponsesScorer()
// scorer := p.Scorers().BadResponsesScorer()
// Make first peer a bad peer
scorer.Increment(firstPID)
scorer.Increment(firstPID)
// // Make first peer a bad peer
// scorer.Increment(firstPID)
// scorer.Increment(firstPID)
// Add bad response for p2.
scorer.Increment(secondPID)
// // Add bad response for p2.
// scorer.Increment(secondPID)
// Prune peers
p.Prune()
// // Prune peers
// p.Prune()
// Bad peer is expected to still be kept in handler.
badRes, err := scorer.Count(firstPID)
assert.NoError(t, err, "error is supposed to be nil")
assert.Equal(t, 2, badRes, "Did not get expected amount")
// // Bad peer is expected to still be kept in handler.
// badRes, err := scorer.Count(firstPID)
// assert.NoError(t, err, "error is supposed to be nil")
// assert.Equal(t, 2, badRes, "Did not get expected amount")
// Not so good peer is pruned away so that we can reduce the
// total size of the handler.
_, err = scorer.Count(secondPID)
assert.ErrorContains(t, "peer unknown", err)
// // Not so good peer is pruned away so that we can reduce the
// // total size of the handler.
// _, err = scorer.Count(secondPID)
// assert.ErrorContains(t, "peer unknown", err)
// Last peer has been removed.
_, err = scorer.Count(thirdPID)
assert.ErrorContains(t, "peer unknown", err)
}
// // Last peer has been removed.
// _, err = scorer.Count(thirdPID)
// assert.ErrorContains(t, "peer unknown", err)
// }
func TestPeerIPTracker(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: false,
})
defer resetCfg()
maxBadResponses := 2
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: maxBadResponses,
},
},
})
// TODO: Uncomment when out of devnet
// func TestPeerIPTracker(t *testing.T) {
// resetCfg := features.InitWithReset(&features.Flags{
// EnablePeerScorer: false,
// })
// defer resetCfg()
// maxBadResponses := 2
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: maxBadResponses,
// },
// },
// })
badIP := "211.227.218.116"
var badPeers []peer.ID
for i := 0; i < peers.CollocationLimit+10; i++ {
port := strconv.Itoa(3000 + i)
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
if err != nil {
t.Fatal(err)
}
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
}
for _, pr := range badPeers {
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
}
// badIP := "211.227.218.116"
// var badPeers []peer.ID
// for i := 0; i < peers.CollocationLimit+10; i++ {
// port := strconv.Itoa(3000 + i)
// addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
// if err != nil {
// t.Fatal(err)
// }
// badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
// }
// for _, pr := range badPeers {
// assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
// }
// Add in bad peers, so that our records are trimmed out
// from the peer store.
for i := 0; i < p.MaxPeerLimit()+100; i++ {
// Peer added to peer handler.
pid := addPeer(t, p, peers.Disconnected)
p.Scorers().BadResponsesScorer().Increment(pid)
}
p.Prune()
// // Add in bad peers, so that our records are trimmed out
// // from the peer store.
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
// // Peer added to peer handler.
// pid := addPeer(t, p, peers.PeerDisconnected)
// p.Scorers().BadResponsesScorer().Increment(pid)
// }
// p.Prune()
for _, pr := range badPeers {
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
}
}
// for _, pr := range badPeers {
// assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
// }
// }
func TestTrimmedOrderedPeers(t *testing.T) {
p := peers.NewStatus(t.Context(), &peers.StatusConfig{

View File

@@ -169,7 +169,7 @@ var (
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
// DataColumnSidecarsByRoot v1 Message
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
}
// Maps all registered protocol prefixes.

View File

@@ -11,8 +11,6 @@ import (
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -391,48 +389,49 @@ func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.C
return params.ForkDigest(clock.CurrentEpoch())
}
func TestService_connectWithPeer(t *testing.T) {
params.SetupTestConfigCleanup(t)
tests := []struct {
name string
peers *peers.Status
info peer.AddrInfo
wantErr string
}{
{
name: "bad peer",
peers: func() *peers.Status {
ps := peers.NewStatus(t.Context(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
for i := 0; i < 10; i++ {
ps.Scorers().BadResponsesScorer().Increment("bad")
}
return ps
}(),
info: peer.AddrInfo{ID: "bad"},
wantErr: "bad peer",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h, _, _ := createHost(t, 34567)
defer func() {
if err := h.Close(); err != nil {
t.Fatal(err)
}
}()
ctx := t.Context()
s := &Service{
host: h,
peers: tt.peers,
}
err := s.connectWithPeer(ctx, tt.info)
if len(tt.wantErr) > 0 {
require.ErrorContains(t, tt.wantErr, err)
} else {
require.NoError(t, err)
}
})
}
}
// TODO: Uncomment out of devnet.
// func TestService_connectWithPeer(t *testing.T) {
// params.SetupTestConfigCleanup(t)
// tests := []struct {
// name string
// peers *peers.Status
// info peer.AddrInfo
// wantErr string
// }{
// {
// name: "bad peer",
// peers: func() *peers.Status {
// ps := peers.NewStatus(t.Context(), &peers.StatusConfig{
// ScorerParams: &scorers.Config{},
// })
// for i := 0; i < 10; i++ {
// ps.Scorers().BadResponsesScorer().Increment("bad")
// }
// return ps
// }(),
// info: peer.AddrInfo{ID: "bad"},
// wantErr: "bad peer",
// },
// }
// for _, tt := range tests {
// t.Run(tt.name, func(t *testing.T) {
// h, _, _ := createHost(t, 34567)
// defer func() {
// if err := h.Close(); err != nil {
// t.Fatal(err)
// }
// }()
// ctx := t.Context()
// s := &Service{
// host: h,
// peers: tt.peers,
// }
// err := s.connectWithPeer(ctx, tt.info)
// if len(tt.wantErr) > 0 {
// require.ErrorContains(t, tt.wantErr, err)
// } else {
// require.NoError(t, err)
// }
// })
// }
// }

View File

@@ -206,8 +206,8 @@ func (s BlobSidecarsByRootReq) Swap(i, j int) {
}
// Len is the number of elements in the collection.
func (s BlobSidecarsByRootReq) Len() int {
return len(s)
func (s *BlobSidecarsByRootReq) Len() int {
return len(*s)
}
// ====================================

View File

@@ -1,3 +1,5 @@
# gazelle:ignore
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
@@ -37,6 +39,7 @@ go_library(
"//api/client/builder:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
@@ -47,6 +50,7 @@ go_library(
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -181,7 +185,6 @@ common_deps = [
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
]
# gazelle:ignore
go_test(
name = "go_default_test",
timeout = "moderate",

View File

@@ -29,12 +29,19 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
require.NoError(t, err)
r1, err := eb.Block.HashTreeRoot()
require.NoError(t, err)
result, err := vs.constructGenericBeaconBlock(b, nil, primitives.ZeroWei())
bundle := &enginev1.BlobsBundleV2{
KzgCommitments: [][]byte{{1, 2, 3}},
Proofs: [][]byte{{4, 5, 6}},
Blobs: [][]byte{{7, 8, 9}},
}
result, err := vs.constructGenericBeaconBlock(b, bundle, primitives.ZeroWei())
require.NoError(t, err)
r2, err := result.GetFulu().Block.HashTreeRoot()
require.NoError(t, err)
require.Equal(t, r1, r2)
require.Equal(t, result.IsBlinded, false)
require.DeepEqual(t, bundle.Blobs, result.GetFulu().GetBlobs())
require.DeepEqual(t, bundle.Proofs, result.GetFulu().GetKzgProofs())
})
// Test for Electra version

View File

@@ -544,4 +544,4 @@ func blobsAndProofs(req *ethpb.GenericSignedBeaconBlock) ([][]byte, [][]byte, er
default:
return nil, nil, errors.Errorf("unknown request type provided: %T", req)
}
}
}

View File

@@ -69,6 +69,7 @@ type Server struct {
SyncCommitteePool synccommittee.Pool
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher

View File

@@ -89,6 +89,7 @@ type Config struct {
AttestationReceiver blockchain.AttestationReceiver
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
ExecutionChainService execution.Chain
ChainStartFetcher execution.ChainStartFetcher
ExecutionChainInfoFetcher execution.ChainInfoFetcher
@@ -120,6 +121,7 @@ type Config struct {
Router *http.ServeMux
ClockWaiter startup.ClockWaiter
BlobStorage *filesystem.BlobStorage
DataColumnStorage *filesystem.DataColumnStorage
TrackedValidatorsCache *cache.TrackedValidatorsCache
PayloadIDCache *cache.PayloadIDCache
LCStore *lightClient.Store
@@ -196,6 +198,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlobStorage: s.cfg.BlobStorage,
DataColumnStorage: s.cfg.DataColumnStorage,
}
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
coreService := &core.Service{
@@ -236,6 +239,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
P2P: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
BlobReceiver: s.cfg.BlobReceiver,
DataColumnReceiver: s.cfg.DataColumnReceiver,
MockEth1Votes: s.cfg.MockEth1Votes,
Eth1BlockFetcher: s.cfg.ExecutionChainService,
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,

View File

@@ -8,6 +8,7 @@ go_library(
"broadcast_bls_changes.go",
"context.go",
"custody.go",
"data_column_sidecars.go",
"data_columns_reconstruct.go",
"deadlines.go",
"decode_pubsub.go",
@@ -136,6 +137,7 @@ go_library(
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_libp2p_go_libp2p//core:go_default_library",
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
@@ -159,7 +161,7 @@ go_library(
go_test(
name = "go_default_test",
size = "small",
size = "medium",
srcs = [
"batch_verifier_test.go",
"blobs_test.go",
@@ -167,7 +169,9 @@ go_test(
"broadcast_bls_changes_test.go",
"context_test.go",
"custody_test.go",
"data_column_sidecars_test.go",
"data_columns_reconstruct_test.go",
"data_columns_test.go",
"decode_pubsub_test.go",
"error_test.go",
"fork_watcher_test.go",
@@ -192,6 +196,7 @@ go_test(
"slot_aware_cache_test.go",
"subscriber_beacon_aggregate_proof_test.go",
"subscriber_beacon_blocks_test.go",
"subscriber_data_column_sidecar_trigger_test.go",
"subscriber_test.go",
"subscription_topic_handler_test.go",
"sync_fuzz_test.go",
@@ -261,6 +266,7 @@ go_test(
"//container/leaky-bucket:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/ecdsa:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/equality:go_default_library",
@@ -274,13 +280,17 @@ go_test(
"//testing/util:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_d4l3k_messagediff//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_golang_snappy//:go_default_library",
"@com_github_libp2p_go_libp2p//:go_default_library",
"@com_github_libp2p_go_libp2p//core:go_default_library",
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",

View File

@@ -91,9 +91,7 @@ func (bs *blobSync) validateNext(rb blocks.ROBlob) error {
return err
}
sc := blocks.NewSidecarFromBlobSidecar(rb)
if err := bs.store.Persist(bs.current, sc); err != nil {
if err := bs.store.Persist(bs.current, rb); err != nil {
return err
}

View File

@@ -32,7 +32,7 @@ func (w *p2pWorker) run(ctx context.Context) {
case b := <-w.todo:
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("Backfill worker received batch")
if b.state == batchBlobSync {
w.done <- w.handleBlobs(ctx, b)
w.done <- w.handleSidecars(ctx, b)
} else {
w.done <- w.handleBlocks(ctx, b)
}
@@ -80,7 +80,7 @@ func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
return b.withResults(vb, bs)
}
func (w *p2pWorker) handleBlobs(ctx context.Context, b batch) batch {
func (w *p2pWorker) handleSidecars(ctx context.Context, b batch) batch {
b.blobPid = b.busy
start := time.Now()
// we don't need to use the response for anything other than metrics, because blobResponseValidation

View File

@@ -175,7 +175,7 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
params.OverrideBeaconConfig(cfg)
}
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock(0))
chain, clock := defaultMockChain(t)
chain, clock := defaultMockChain(t, 0)
if c.chain == nil {
c.chain = chain
}
@@ -270,7 +270,7 @@ func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
}
}
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
func defaultMockChain(t *testing.T, currentSlot uint64) (*mock.ChainService, *startup.Clock) {
de := params.BeaconConfig().DenebForkEpoch
df, err := params.Fork(de)
require.NoError(t, err)

View File

@@ -78,9 +78,10 @@ func (bb *blockRangeBatcher) next(ctx context.Context, stream libp2pcore.Stream)
if !more {
return blockBatch{}, false
}
if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
}
// TODO: Uncomment out of devnet.
// if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
// return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
// }
// Wait for the ticker before doing anything expensive, unless this is the first batch.
if bb.ticker != nil && bb.current != nil {

View File

@@ -0,0 +1,869 @@
package sync
import (
"bytes"
"context"
"slices"
"sync"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
prysmP2P "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v6/crypto/rand"
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
goPeer "github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// DataColumnSidecarsParams stores the common parameters needed to
// fetch data column sidecars from peers.
type DataColumnSidecarsParams struct {
Ctx context.Context // Context
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
P2P prysmP2P.P2P // P2P network interface
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
Storage filesystem.DataColumnStorageReader // Data columns storage
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
}
// FetchDataColumnSidecars retrieves data column sidecars from storage and peers for the given
// blocks and requested data column indices. It employs a multi-step strategy:
//
// 1. Direct retrieval: If all requested columns are available in storage, they are
// retrieved directly without reconstruction.
// 2. Reconstruction-based retrieval: If some requested columns are missing but sufficient
// stored columns exist (at least the minimum required for reconstruction), the function
// reconstructs all columns and extracts the requested indices.
// 3. Peer retrieval: If storage and reconstruction fail, missing columns are requested
// from connected peers that are expected to custody the required data.
//
// The function returns a map of block roots to their corresponding verified read-only data
// columns. It returns an error if data column storage is unavailable, if storage/reconstruction
// operations fail unexpectedly, or if not all requested columns could be retrieved from peers.
func FetchDataColumnSidecars(
params DataColumnSidecarsParams,
roBlocks []blocks.ROBlock,
indicesMap map[uint64]bool,
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
if len(roBlocks) == 0 || len(indicesMap) == 0 {
return nil, nil
}
indices := sortedSliceFromMap(indicesMap)
slotsWithCommitments := make(map[primitives.Slot]bool)
indicesByRootToQuery := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
indicesByRootStored := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
for _, roBlock := range roBlocks {
// Filter out blocks without commitments.
block := roBlock.Block()
commitments, err := block.Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrapf(err, "get blob kzg commitments for block root %#x", roBlock.Root())
}
if len(commitments) == 0 {
continue
}
slotsWithCommitments[block.Slot()] = true
root := roBlock.Root()
// Step 1: Get the requested sidecars for this root if available in storage
requestedColumns, err := tryGetDirectColumns(params.Storage, root, indices)
if err != nil {
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
}
if requestedColumns != nil {
result[root] = requestedColumns
continue
}
// Step 2: If step 1 failed, reconstruct the requested sidecars from what is available in storage
requestedColumns, err = tryGetReconstructedColumns(params.Storage, root, indices)
if err != nil {
return nil, errors.Wrapf(err, "try get reconstructed columns for root %#x", root)
}
if requestedColumns != nil {
result[root] = requestedColumns
continue
}
// Step 3a: If steps 1 and 2 failed, keep track of the sidecars that need to be queried from peers
// and those that are already stored.
indicesToQueryMap, indicesStoredMap := categorizeIndices(params.Storage, root, indices)
if len(indicesToQueryMap) > 0 {
indicesByRootToQuery[root] = indicesToQueryMap
}
if len(indicesStoredMap) > 0 {
indicesByRootStored[root] = indicesStoredMap
}
}
// Early return if no sidecars need to be queried from peers.
if len(indicesByRootToQuery) == 0 {
return result, nil
}
// Step 3b: Request missing sidecars from peers.
start, count := time.Now(), computeTotalCount(indicesByRootToQuery)
fromPeersResult, err := tryRequestingColumnsFromPeers(params, roBlocks, slotsWithCommitments, indicesByRootToQuery)
if err != nil {
return nil, errors.Wrap(err, "request from peers")
}
log.WithFields(logrus.Fields{"duration": time.Since(start), "count": count}).Debug("Requested data column sidecars from peers")
for root, verifiedSidecars := range fromPeersResult {
result[root] = append(result[root], verifiedSidecars...)
}
// Step 3c: Load the stored sidecars.
for root, indicesStored := range indicesByRootStored {
requestedColumns, err := tryGetDirectColumns(params.Storage, root, sortedSliceFromMap(indicesStored))
if err != nil {
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
}
result[root] = append(result[root], requestedColumns...)
}
return result, nil
}
// tryGetDirectColumns attempts to retrieve all requested columns directly from storage
// if they are all available. Returns the columns if successful, and nil if at least one
// requested sidecar is not available in the storage.
func tryGetDirectColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
// Check if all requested indices are present in cache
storedIndices := storage.Summary(blockRoot).Stored()
allRequestedPresent := true
for _, requestedIndex := range indices {
if !storedIndices[requestedIndex] {
allRequestedPresent = false
break
}
}
if !allRequestedPresent {
return nil, nil
}
// All requested data is present, retrieve directly from DB
requestedColumns, err := storage.Get(blockRoot, indices)
if err != nil {
return nil, errors.Wrapf(err, "failed to get data columns for block root %#x", blockRoot)
}
return requestedColumns, nil
}
// tryGetReconstructedColumns attempts to retrieve columns using reconstruction
// if sufficient columns are available. Returns the columns if successful, nil and nil if insufficient columns,
// or nil and error if an error occurs.
func tryGetReconstructedColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
// Check if we have enough columns for reconstruction
summary := storage.Summary(blockRoot)
if summary.Count() < peerdas.MinimumColumnCountToReconstruct() {
return nil, nil
}
// Retrieve all stored columns for reconstruction
allStoredColumns, err := storage.Get(blockRoot, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to get all stored columns for reconstruction for block root %#x", blockRoot)
}
// Attempt reconstruction
reconstructedColumns, err := peerdas.ReconstructDataColumnSidecars(allStoredColumns)
if err != nil {
return nil, errors.Wrapf(err, "failed to reconstruct data columns for block root %#x", blockRoot)
}
// Health check: ensure we have the expected number of columns
numberOfColumns := params.BeaconConfig().NumberOfColumns
if uint64(len(reconstructedColumns)) != numberOfColumns {
return nil, errors.Errorf("reconstructed %d columns but expected %d for block root %#x", len(reconstructedColumns), numberOfColumns, blockRoot)
}
// Extract only the requested indices from reconstructed data using direct indexing
requestedColumns := make([]blocks.VerifiedRODataColumn, 0, len(indices))
for _, requestedIndex := range indices {
if requestedIndex >= numberOfColumns {
return nil, errors.Errorf("requested column index %d exceeds maximum %d for block root %#x", requestedIndex, numberOfColumns-1, blockRoot)
}
requestedColumns = append(requestedColumns, reconstructedColumns[requestedIndex])
}
return requestedColumns, nil
}
// categorizeIndices separates indices into those that need to be queried from peers
// and those that are already stored.
func categorizeIndices(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) (map[uint64]bool, map[uint64]bool) {
indicesToQuery := make(map[uint64]bool, len(indices))
indicesStored := make(map[uint64]bool, len(indices))
allStoredIndices := storage.Summary(blockRoot).Stored()
for _, index := range indices {
if allStoredIndices[index] {
indicesStored[index] = true
continue
}
indicesToQuery[index] = true
}
return indicesToQuery, indicesStored
}
// tryRequestingColumnsFromPeers attempts to request missing data column sidecars from connected peers.
// It explores the connected peers to find those that are expected to custody the requested columns
// and returns only when all requested columns are either retrieved or have been tried to be retrieved
// by all possible peers.
// Returns a map of block roots to their verified read-only data column sidecars and a map of block roots.
// Returns an error if at least one requested column could not be retrieved.
// WARNING: This function alters `missingIndicesByRoot`. The caller should NOT use it after running this function.
func tryRequestingColumnsFromPeers(
p DataColumnSidecarsParams,
roBlocks []blocks.ROBlock,
slotsWithCommitments map[primitives.Slot]bool,
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
// Create a new random source for peer selection.
randomSource := rand.NewGenerator()
// Compute slots by block root.
slotByRoot := computeSlotByBlockRoot(roBlocks)
// Determine all sidecars each peers are expected to custody.
connectedPeersSlice := p.P2P.Peers().Connected()
connectedPeers := make(map[goPeer.ID]bool, len(connectedPeersSlice))
for _, peer := range connectedPeersSlice {
connectedPeers[peer] = true
}
indicesByRootByPeer, err := computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
if err != nil {
return nil, errors.Wrap(err, "explore peers")
}
verifiedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
for len(missingIndicesByRoot) > 0 && len(indicesByRootByPeer) > 0 {
// Select peers to query the missing sidecars from.
indicesByRootByPeerToQuery, err := selectPeers(p, randomSource, len(missingIndicesByRoot), indicesByRootByPeer)
if err != nil {
return nil, errors.Wrap(err, "select peers")
}
// Remove selected peers from the maps.
for peer := range indicesByRootByPeerToQuery {
delete(connectedPeers, peer)
}
// Fetch the sidecars from the chosen peers.
roDataColumnsByPeer := fetchDataColumnSidecarsFromPeers(p, slotByRoot, slotsWithCommitments, indicesByRootByPeerToQuery)
// Verify the received data column sidecars.
verifiedRoDataColumnSidecars, err := verifyDataColumnSidecarsByPeer(p.P2P, p.NewVerifier, roDataColumnsByPeer)
if err != nil {
return nil, errors.Wrap(err, "verify data columns sidecars by peer")
}
// Remove the verified sidecars from the missing indices map and compute the new verified columns by root.
newMissingIndicesByRoot, localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
missingIndicesByRoot = newMissingIndicesByRoot
for root, verifiedRoDataColumns := range localVerifiedColumnsByRoot {
verifiedColumnsByRoot[root] = append(verifiedColumnsByRoot[root], verifiedRoDataColumns...)
}
// Compute indices by root by peers with the updated missing indices and connected peers.
indicesByRootByPeer, err = computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
if err != nil {
return nil, errors.Wrap(err, "explore peers")
}
}
if len(missingIndicesByRoot) > 0 {
return nil, errors.New("not all requested data column sidecars were retrieved from peers")
}
return verifiedColumnsByRoot, nil
}
// selectPeers selects peers to query the sidecars.
// It begins by randomly selecting a peer in `origIndicesByRootByPeer` that has enough bandwidth,
// and assigns to it all its available sidecars. Then, it randomly select an other peer, until
// all sidecars in `missingIndicesByRoot` are covered.
func selectPeers(
p DataColumnSidecarsParams,
randomSource *rand.Rand,
count int,
origIndicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
const randomPeerTimeout = 30 * time.Second
// Select peers to query the missing sidecars from.
indicesByRootByPeer := copyIndicesByRootByPeer(origIndicesByRootByPeer)
internalIndicesByRootByPeer := copyIndicesByRootByPeer(indicesByRootByPeer)
indicesByRootByPeerToQuery := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool)
for len(internalIndicesByRootByPeer) > 0 {
// Randomly select a peer with enough bandwidth.
peer, err := func() (goPeer.ID, error) {
ctx, cancel := context.WithTimeout(p.Ctx, randomPeerTimeout)
defer cancel()
peer, err := randomPeer(ctx, randomSource, p.RateLimiter, count, internalIndicesByRootByPeer)
if err != nil {
return "", errors.Wrap(err, "select random peer")
}
return peer, err
}()
if err != nil {
return nil, err
}
// Query all the sidecars that peer can offer us.
newIndicesByRoot, ok := internalIndicesByRootByPeer[peer]
if !ok {
return nil, errors.Errorf("peer %s not found in internal indices by root by peer map", peer)
}
indicesByRootByPeerToQuery[peer] = newIndicesByRoot
// Remove this peer from the maps to avoid re-selection.
delete(indicesByRootByPeer, peer)
delete(internalIndicesByRootByPeer, peer)
// Delete the corresponding sidecars from other peers in the internal map
// to avoid re-selection during this iteration.
for peer, indicesByRoot := range internalIndicesByRootByPeer {
for root, indices := range indicesByRoot {
newIndices := newIndicesByRoot[root]
for index := range newIndices {
delete(indices, index)
}
if len(indices) == 0 {
delete(indicesByRoot, root)
}
}
if len(indicesByRoot) == 0 {
delete(internalIndicesByRootByPeer, peer)
}
}
}
return indicesByRootByPeerToQuery, nil
}
// updateResults updates the missing indices and verified sidecars maps based on the newly verified sidecars.
func updateResults(
verifiedSidecars []blocks.VerifiedRODataColumn,
origMissingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
) (map[[fieldparams.RootLength]byte]map[uint64]bool, map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn) {
// Copy the original map to avoid modifying it directly.
missingIndicesByRoot := copyIndicesByRoot(origMissingIndicesByRoot)
verifiedSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
for _, verifiedSidecar := range verifiedSidecars {
blockRoot := verifiedSidecar.BlockRoot()
index := verifiedSidecar.Index
// Add to the result map grouped by block root
verifiedSidecarsByRoot[blockRoot] = append(verifiedSidecarsByRoot[blockRoot], verifiedSidecar)
if indices, ok := missingIndicesByRoot[blockRoot]; ok {
delete(indices, index)
if len(indices) == 0 {
delete(missingIndicesByRoot, blockRoot)
}
}
}
return missingIndicesByRoot, verifiedSidecarsByRoot
}
// fetchDataColumnSidecarsFromPeers retrieves data column sidecars from peers.
func fetchDataColumnSidecarsFromPeers(
params DataColumnSidecarsParams,
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
slotsWithCommitments map[primitives.Slot]bool,
indicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
) map[goPeer.ID][]blocks.RODataColumn {
var (
wg sync.WaitGroup
mut sync.Mutex
)
roDataColumnsByPeer := make(map[goPeer.ID][]blocks.RODataColumn)
wg.Add(len(indicesByRootByPeer))
for peerID, indicesByRoot := range indicesByRootByPeer {
go func(peerID goPeer.ID, indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) {
defer wg.Done()
requestedCount := 0
for _, indices := range indicesByRoot {
requestedCount += len(indices)
}
log := log.WithFields(logrus.Fields{
"peerID": peerID,
"agent": agentString(peerID, params.P2P.Host()),
"blockCount": len(indicesByRoot),
"totalRequestedCount": requestedCount,
})
roDataColumns, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, peerID, indicesByRoot)
if err != nil {
log.WithError(err).Warning("Failed to send data column sidecars request")
return
}
mut.Lock()
defer mut.Unlock()
roDataColumnsByPeer[peerID] = roDataColumns
}(peerID, indicesByRoot)
}
wg.Wait()
return roDataColumnsByPeer
}
func sendDataColumnSidecarsRequest(
params DataColumnSidecarsParams,
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
slotsWithCommitments map[primitives.Slot]bool,
peerID goPeer.ID,
indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
) ([]blocks.RODataColumn, error) {
const batchSize = 32
rootCount := int64(len(indicesByRoot))
requestedSidecarsCount := 0
for _, indices := range indicesByRoot {
requestedSidecarsCount += len(indices)
}
log := log.WithFields(logrus.Fields{
"peerID": peerID,
"agent": agentString(peerID, params.P2P.Host()),
"requestedSidecars": requestedSidecarsCount,
})
// Try to build a by range byRangeRequest first.
byRangeRequests, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, batchSize)
if err != nil {
return nil, errors.Wrap(err, "craft by range request")
}
// If we have a valid by range request, send it.
if len(byRangeRequests) > 0 {
count := 0
for _, indices := range indicesByRoot {
count += len(indices)
}
start := time.Now()
roDataColumns := make([]blocks.RODataColumn, 0, count)
for _, request := range byRangeRequests {
params.RateLimiter.Add(peerID.String(), rootCount)
localRoDataColumns, err := SendDataColumnSidecarsByRangeRequest(params, peerID, request)
if err != nil {
return nil, errors.Wrapf(err, "send data column sidecars by range request to peer %s", peerID)
}
roDataColumns = append(roDataColumns, localRoDataColumns...)
}
log.WithFields(logrus.Fields{
"respondedSidecars": len(roDataColumns),
"requests": len(byRangeRequests),
"type": "byRange",
"duration": time.Since(start),
}).Debug("Received data column sidecars")
return roDataColumns, nil
}
// Build identifiers for the by root request.
byRootRequest := buildByRootRequest(indicesByRoot)
// Send the by root request.
start := time.Now()
params.RateLimiter.Add(peerID.String(), rootCount)
roDataColumns, err := SendDataColumnSidecarsByRootRequest(params, peerID, byRootRequest)
if err != nil {
return nil, errors.Wrapf(err, "send data column sidecars by root request to peer %s", peerID)
}
log.WithFields(logrus.Fields{
"respondedSidecars": len(roDataColumns),
"requests": 1,
"type": "byRoot",
"duration": time.Since(start),
}).Debug("Received data column sidecars")
return roDataColumns, nil
}
// buildByRangeRequests constructs a by range request from the given indices,
// only if the indices are the same all blocks and if the blocks are contiguous.
// (Missing blocks or blocks without commitments do count as contiguous)
// If one of this condition is not met, returns nil.
func buildByRangeRequests(
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
slotsWithCommitments map[primitives.Slot]bool,
indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
batchSize uint64,
) ([]*ethpb.DataColumnSidecarsByRangeRequest, error) {
if len(indicesByRoot) == 0 {
return nil, nil
}
var reference map[uint64]bool
slots := make([]primitives.Slot, 0, len(slotByRoot))
for root, indices := range indicesByRoot {
if reference == nil {
reference = indices
}
if !compareIndices(reference, indices) {
return nil, nil
}
slot, ok := slotByRoot[root]
if !ok {
return nil, errors.Errorf("slot not found for block root %#x", root)
}
slots = append(slots, slot)
}
slices.Sort(slots)
for i := 1; i < len(slots); i++ {
previous, current := slots[i-1], slots[i]
if current == previous+1 {
continue
}
for j := previous + 1; j < current; j++ {
if slotsWithCommitments[j] {
return nil, nil
}
}
}
columns := sortedSliceFromMap(reference)
startSlot, endSlot := slots[0], slots[len(slots)-1]
totalCount := uint64(endSlot - startSlot + 1)
requests := make([]*ethpb.DataColumnSidecarsByRangeRequest, 0, totalCount/batchSize)
for start := startSlot; start <= endSlot; start += primitives.Slot(batchSize) {
end := min(start+primitives.Slot(batchSize)-1, endSlot)
request := &ethpb.DataColumnSidecarsByRangeRequest{
StartSlot: start,
Count: uint64(end - start + 1),
Columns: columns,
}
requests = append(requests, request)
}
return requests, nil
}
// buildByRootRequest constructs a by root request from the given indices.
func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) p2ptypes.DataColumnsByRootIdentifiers {
identifiers := make(p2ptypes.DataColumnsByRootIdentifiers, 0, len(indicesByRoot))
for root, indices := range indicesByRoot {
identifier := &eth.DataColumnsByRootIdentifier{
BlockRoot: root[:],
Columns: sortedSliceFromMap(indices),
}
identifiers = append(identifiers, identifier)
}
// Sort identifiers to have a deterministic output.
slices.SortFunc(identifiers, func(left, right *eth.DataColumnsByRootIdentifier) int {
if cmp := bytes.Compare(left.BlockRoot, right.BlockRoot); cmp != 0 {
return cmp
}
return slices.Compare(left.Columns, right.Columns)
})
return identifiers
}
// verifyDataColumnSidecarsByPeer verifies the received data column sidecars.
// If at least one sidecar from a peer is invalid, the peer is downscored and
// all its sidecars are rejected. (Sidecars from other peers are still accepted.)
func verifyDataColumnSidecarsByPeer(
p2p prysmP2P.P2P,
newVerifier verification.NewDataColumnsVerifier,
roDataColumnsByPeer map[goPeer.ID][]blocks.RODataColumn,
) ([]blocks.VerifiedRODataColumn, error) {
// First optimistically verify all received data columns in a single batch.
count := 0
for _, columns := range roDataColumnsByPeer {
count += len(columns)
}
roDataColumnSidecars := make([]blocks.RODataColumn, 0, count)
for _, columns := range roDataColumnsByPeer {
roDataColumnSidecars = append(roDataColumnSidecars, columns...)
}
verifiedRoDataColumnSidecars, err := verifyByRootDataColumnSidecars(newVerifier, roDataColumnSidecars)
if err == nil {
// This is the happy path where all sidecars are verified.
return verifiedRoDataColumnSidecars, nil
}
// An error occurred during verification, which means that at least one sidecar is invalid.
// Reverify peer by peer to identify faulty peer(s), reject all its sidecars, and downscore it.
verifiedRoDataColumnSidecars = make([]blocks.VerifiedRODataColumn, 0, count)
for peer, columns := range roDataColumnsByPeer {
peerVerifiedRoDataColumnSidecars, err := verifyByRootDataColumnSidecars(newVerifier, columns)
if err != nil {
// This peer has invalid sidecars.
log := log.WithError(err).WithField("peerID", peer)
newScore := p2p.Peers().Scorers().BadResponsesScorer().Increment(peer)
log.Warning("Peer returned invalid data column sidecars")
log.WithFields(logrus.Fields{"reason": "invalidDataColumnSidecars", "newScore": newScore}).Debug("Downscore peer")
}
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, peerVerifiedRoDataColumnSidecars...)
}
return verifiedRoDataColumnSidecars, nil
}
// verifyByRootDataColumnSidecars verifies the provided read-only data columns against the
// requirements for data column sidecars received via the by root request.
func verifyByRootDataColumnSidecars(newVerifier verification.NewDataColumnsVerifier, roDataColumns []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
verifier := newVerifier(roDataColumns, verification.ByRootRequestDataColumnSidecarRequirements)
if err := verifier.ValidFields(); err != nil {
return nil, errors.Wrap(err, "valid fields")
}
if err := verifier.SidecarInclusionProven(); err != nil {
return nil, errors.Wrap(err, "sidecar inclusion proven")
}
if err := verifier.SidecarKzgProofVerified(); err != nil {
return nil, errors.Wrap(err, "sidecar KZG proof verified")
}
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
if err != nil {
return nil, errors.Wrap(err, "verified RO data columns - should never happen")
}
return verifiedRoDataColumns, nil
}
// computeIndicesByRootByPeer returns a peers->root->indices map only for
// root and indices given in `indicesByBlockRoot`. It also only selects peers
// for a given root only if its head state is higher than the block slot.
func computeIndicesByRootByPeer(
p2p prysmP2P.P2P,
slotByBlockRoot map[[fieldparams.RootLength]byte]primitives.Slot,
indicesByBlockRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
peers map[goPeer.ID]bool,
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
// First, compute custody columns for all peers
peersByIndex := make(map[uint64]map[goPeer.ID]bool)
headSlotByPeer := make(map[goPeer.ID]primitives.Slot)
for peer := range peers {
// Computes the custody columns for each peer
nodeID, err := prysmP2P.ConvertPeerIDToNodeID(peer)
if err != nil {
return nil, errors.Wrapf(err, "convert peer ID to node ID for peer %s", peer)
}
custodyGroupCount := p2p.CustodyGroupCountFromPeer(peer)
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
if err != nil {
return nil, errors.Wrapf(err, "peerdas info for peer %s", peer)
}
for column := range dasInfo.CustodyColumns {
if _, exists := peersByIndex[column]; !exists {
peersByIndex[column] = make(map[goPeer.ID]bool)
}
peersByIndex[column][peer] = true
}
// Compute the head slot for each peer
peerChainState, err := p2p.Peers().ChainState(peer)
if err != nil {
return nil, errors.Wrapf(err, "get chain state for peer %s", peer)
}
if peerChainState == nil {
return nil, errors.Errorf("chain state is nil for peer %s", peer)
}
headSlotByPeer[peer] = peerChainState.HeadSlot
}
// For each block root and its indices, find suitable peers
indicesByRootByPeer := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool)
for blockRoot, indices := range indicesByBlockRoot {
blockSlot, ok := slotByBlockRoot[blockRoot]
if !ok {
return nil, errors.Errorf("slot not found for block root %#x", blockRoot)
}
for index := range indices {
peers := peersByIndex[index]
for peer := range peers {
peerHeadSlot, ok := headSlotByPeer[peer]
if !ok {
return nil, errors.Errorf("head slot not found for peer %s", peer)
}
if peerHeadSlot < blockSlot {
continue
}
// Build peers->root->indices map
if _, exists := indicesByRootByPeer[peer]; !exists {
indicesByRootByPeer[peer] = make(map[[fieldparams.RootLength]byte]map[uint64]bool)
}
if _, exists := indicesByRootByPeer[peer][blockRoot]; !exists {
indicesByRootByPeer[peer][blockRoot] = make(map[uint64]bool)
}
indicesByRootByPeer[peer][blockRoot][index] = true
}
}
}
return indicesByRootByPeer, nil
}
// randomPeer selects a random peer. If no peers has enough bandwidth, it will wait and retry.
// Returns the selected peer ID and any error.
func randomPeer(
ctx context.Context,
randomSource *rand.Rand,
rateLimiter *leakybucket.Collector,
count int,
indicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
) (goPeer.ID, error) {
const waitPeriod = 5 * time.Second
peerCount := len(indicesByRootByPeer)
if peerCount == 0 {
return "", errors.New("no peers available")
}
for ctx.Err() == nil {
nonRateLimitedPeers := make([]goPeer.ID, 0, len(indicesByRootByPeer))
for peer := range indicesByRootByPeer {
remaining := rateLimiter.Remaining(peer.String())
if remaining >= int64(count) {
nonRateLimitedPeers = append(nonRateLimitedPeers, peer)
}
}
if len(nonRateLimitedPeers) == 0 {
log.WithFields(logrus.Fields{
"peerCount": peerCount,
"delay": waitPeriod,
}).Debug("Waiting for a peer with enough bandwidth for data column sidecars")
time.Sleep(waitPeriod)
continue
}
randomIndex := randomSource.Intn(len(nonRateLimitedPeers))
return nonRateLimitedPeers[randomIndex], nil
}
return "", ctx.Err()
}
// copyIndicesByRootByPeer creates a deep copy of the given nested map.
// Returns a new map with the same structure and contents.
func copyIndicesByRootByPeer(original map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool) map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool {
copied := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, len(original))
for peer, indicesByRoot := range original {
copied[peer] = copyIndicesByRoot(indicesByRoot)
}
return copied
}
// copyIndicesByRoot creates a deep copy of the given nested map.
// Returns a new map with the same structure and contents.
func copyIndicesByRoot(original map[[fieldparams.RootLength]byte]map[uint64]bool) map[[fieldparams.RootLength]byte]map[uint64]bool {
copied := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(original))
for root, indexMap := range original {
copied[root] = make(map[uint64]bool, len(indexMap))
for index, value := range indexMap {
copied[root][index] = value
}
}
return copied
}
// compareIndices compares two map[uint64]bool and returns true if they are equal.
func compareIndices(left, right map[uint64]bool) bool {
if len(left) != len(right) {
return false
}
for key, leftValue := range left {
rightValue, exists := right[key]
if !exists || leftValue != rightValue {
return false
}
}
return true
}
// sortedSliceFromMap converts a map[uint64]bool to a sorted slice of keys.
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
result := make([]uint64, 0, len(m))
for k := range m {
result = append(result, k)
}
slices.Sort(result)
return result
}
// computeSlotByBlockRoot maps each block root to its corresponding slot.
func computeSlotByBlockRoot(roBlocks []blocks.ROBlock) map[[fieldparams.RootLength]byte]primitives.Slot {
slotByBlockRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, len(roBlocks))
for _, roBlock := range roBlocks {
slotByBlockRoot[roBlock.Root()] = roBlock.Block().Slot()
}
return slotByBlockRoot
}
// computeTotalCount calculates the total count of indices across all roots.
func computeTotalCount(input map[[fieldparams.RootLength]byte]map[uint64]bool) int {
totalCount := 0
for _, indices := range input {
totalCount += len(indices)
}
return totalCount
}

View File

@@ -0,0 +1,984 @@
package sync
import (
"context"
"fmt"
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v6/crypto/rand"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
)
func TestFetchDataColumnSidecars(t *testing.T) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Slot 1: All needed sidecars are available in storage
// Slot 2: No commitment
// Slot 3: All sidecars are saved excepted the needed ones
// Slot 4: Some sidecars are in the storage, other have to be retrieved from peers.
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
storage := filesystem.NewEphemeralDataColumnStorage(t)
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
require.NoError(t, err)
const blobCount = 3
indices := map[uint64]bool{31: true, 81: true, 106: true}
// Block 1
block1, _, verifiedSidecars1 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(1))
root1 := block1.Root()
toStore1 := make([]blocks.VerifiedRODataColumn, 0, len(indices))
for index := range indices {
sidecar := verifiedSidecars1[index]
toStore1 = append(toStore1, sidecar)
}
err = storage.Save(toStore1)
require.NoError(t, err)
// Block 2
block2, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0, util.WithSlot(2))
// Block 3
block3, _, verifiedSidecars3 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(3))
root3 := block3.Root()
toStore3 := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns-uint64(len(indices)))
for i := range numberOfColumns {
if !indices[i] {
sidecar := verifiedSidecars3[i]
toStore3 = append(toStore3, sidecar)
}
}
err = storage.Save(toStore3)
require.NoError(t, err)
// Block 4
block4, _, verifiedSidecars4 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(4))
root4 := block4.Root()
toStore4 := []blocks.VerifiedRODataColumn{verifiedSidecars4[106]}
err = storage.Save(toStore4)
require.NoError(t, err)
privateKeyBytes := [32]byte{1}
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
require.NoError(t, err)
// Peers
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
p2p.Peers().SetConnectionState(other.PeerID(), peers.Connected)
p2p.Connect(other)
p2p.Peers().SetChainState(other.PeerID(), &ethpb.StatusV2{
HeadSlot: 4,
})
expectedRequest := &ethpb.DataColumnSidecarsByRangeRequest{
StartSlot: 4,
Count: 1,
Columns: []uint64{31, 81},
}
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
gs := startup.NewClockSynchronizer()
err = gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
require.NoError(t, err)
waiter := verification.NewInitializerWaiter(gs, nil, nil)
initializer, err := waiter.WaitForInitializer(t.Context())
require.NoError(t, err)
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
other.SetStreamHandler(protocol, func(stream network.Stream) {
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
assert.NoError(t, err)
assert.DeepEqual(t, expectedRequest, actualRequest)
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[31].DataColumnSidecar)
assert.NoError(t, err)
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[81].DataColumnSidecar)
assert.NoError(t, err)
err = stream.CloseWrite()
assert.NoError(t, err)
})
params := DataColumnSidecarsParams{
Ctx: t.Context(),
Tor: clock,
P2P: p2p,
RateLimiter: leakybucket.NewCollector(1., 10, time.Second, false /* deleteEmptyBuckets */),
CtxMap: ctxMap,
Storage: storage,
NewVerifier: newDataColumnsVerifier,
}
expected := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
root1: {verifiedSidecars1[31], verifiedSidecars1[81], verifiedSidecars1[106]},
// no root2 (no commitments in this block)
root3: {verifiedSidecars3[31], verifiedSidecars3[81], verifiedSidecars3[106]},
root4: {verifiedSidecars4[31], verifiedSidecars4[81], verifiedSidecars4[106]},
}
blocks := []blocks.ROBlock{block1, block2, block3, block4}
actual, err := FetchDataColumnSidecars(params, blocks, indices)
require.NoError(t, err)
require.Equal(t, len(expected), len(actual))
for root := range expected {
require.Equal(t, len(expected[root]), len(actual[root]))
for i := range expected[root] {
require.DeepSSZEqual(t, expected[root][i], actual[root][i])
}
}
}
func TestCategorizeIndices(t *testing.T) {
storage := filesystem.NewEphemeralDataColumnStorage(t)
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
{Slot: 1, Index: 14, Column: [][]byte{{1}, {2}, {3}}},
})
err := storage.Save(verifiedRoSidecars)
require.NoError(t, err)
expectedToQuery := map[uint64]bool{13: true}
expectedStored := map[uint64]bool{12: true, 14: true}
actualToQuery, actualStored := categorizeIndices(storage, verifiedRoSidecars[0].BlockRoot(), []uint64{12, 13, 14})
require.Equal(t, len(expectedToQuery), len(actualToQuery))
require.Equal(t, len(expectedStored), len(actualStored))
for index := range expectedToQuery {
require.Equal(t, true, actualToQuery[index])
}
for index := range expectedStored {
require.Equal(t, true, actualStored[index])
}
}
func TestSelectPeers(t *testing.T) {
const (
count = 3
seed = 46
)
params := DataColumnSidecarsParams{
Ctx: t.Context(),
RateLimiter: leakybucket.NewCollector(1., 10, time.Second, false /* deleteEmptyBuckets */),
}
randomSource := rand.NewGenerator()
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
"peer1": {
{1}: {12: true, 13: true},
{2}: {13: true, 14: true, 15: true},
{3}: {14: true, 15: true},
},
"peer2": {
{1}: {13: true, 14: true},
{2}: {13: true, 14: true, 15: true},
{3}: {14: true, 16: true},
},
}
expected_1 := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
"peer1": {
{1}: {12: true, 13: true},
{2}: {13: true, 14: true, 15: true},
{3}: {14: true, 15: true},
},
"peer2": {
{1}: {14: true},
{3}: {16: true},
},
}
expected_2 := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
"peer1": {
{1}: {12: true},
{3}: {15: true},
},
"peer2": {
{1}: {13: true, 14: true},
{2}: {13: true, 14: true, 15: true},
{3}: {14: true, 16: true},
},
}
actual, err := selectPeers(params, randomSource, count, indicesByRootByPeer)
expected := expected_1
if len(actual["peer1"]) == 2 {
expected = expected_2
}
require.NoError(t, err)
require.Equal(t, len(expected), len(actual))
for peerID := range expected {
require.Equal(t, len(expected[peerID]), len(actual[peerID]))
for root := range expected[peerID] {
require.Equal(t, len(expected[peerID][root]), len(actual[peerID][root]))
for indices := range expected[peerID][root] {
require.Equal(t, expected[peerID][root][indices], actual[peerID][root][indices])
}
}
}
}
func TestUpdateResults(t *testing.T) {
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
{Slot: 1, Index: 13, Column: [][]byte{{1}, {2}, {3}}},
{Slot: 2, Index: 13, Column: [][]byte{{1}, {2}, {3}}},
{Slot: 2, Index: 14, Column: [][]byte{{1}, {2}, {3}}},
})
missingIndicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
verifiedSidecars[0].BlockRoot(): {12: true, 13: true},
verifiedSidecars[2].BlockRoot(): {13: true, 14: true, 15: true},
}
expectedMissingIndicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
verifiedSidecars[2].BlockRoot(): {15: true},
}
expectedVerifiedSidecarsByRoot := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
verifiedSidecars[0].BlockRoot(): {verifiedSidecars[0], verifiedSidecars[1]},
verifiedSidecars[2].BlockRoot(): {verifiedSidecars[2], verifiedSidecars[3]},
}
actualMissingIndicesByRoot, actualVerifiedSidecarsByRoot := updateResults(verifiedSidecars, missingIndicesByRoot)
require.DeepEqual(t, expectedMissingIndicesByRoot, actualMissingIndicesByRoot)
require.DeepEqual(t, expectedVerifiedSidecarsByRoot, actualVerifiedSidecarsByRoot)
}
func TestFetchDataColumnSidecarsFromPeers(t *testing.T) {
const count = 4
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
require.NoError(t, err)
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
for range count {
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
}
expectedResponseSidecarPb := &ethpb.DataColumnSidecar{
Index: 2,
SignedBlockHeader: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 1,
ParentRoot: make([]byte, fieldparams.RootLength),
StateRoot: make([]byte, fieldparams.RootLength),
BodyRoot: make([]byte, fieldparams.RootLength),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
}
expectedResponseSidecar, err := blocks.NewRODataColumn(expectedResponseSidecarPb)
require.NoError(t, err)
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
{1}: 1,
{3}: 3,
{4}: 4,
{7}: 7,
}
slotsWithCommitments := map[primitives.Slot]bool{
1: true,
3: true,
4: true,
7: true,
}
expectedRequest := &ethpb.DataColumnSidecarsByRangeRequest{
StartSlot: 1,
Count: 7,
Columns: []uint64{1, 2},
}
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
p2p.Connect(other)
other.SetStreamHandler(protocol, func(stream network.Stream) {
receivedRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
assert.NoError(t, err)
assert.DeepEqual(t, expectedRequest, receivedRequest)
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponseSidecarPb)
assert.NoError(t, err)
err = stream.CloseWrite()
assert.NoError(t, err)
})
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
other.PeerID(): {
{1}: {1: true, 2: true},
{3}: {1: true, 2: true},
{4}: {1: true, 2: true},
{7}: {1: true, 2: true},
},
}
params := DataColumnSidecarsParams{
Ctx: t.Context(),
Tor: clock,
P2P: p2p,
CtxMap: ctxMap,
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
}
expectedResponse := map[peer.ID][]blocks.RODataColumn{
other.PeerID(): {expectedResponseSidecar},
}
actualResponse := fetchDataColumnSidecarsFromPeers(params, slotByRoot, slotsWithCommitments, indicesByRootByPeer)
require.Equal(t, len(expectedResponse), len(actualResponse))
for peerID := range expectedResponse {
require.DeepSSZEqual(t, expectedResponse[peerID], actualResponse[peerID])
}
}
func TestSendDataColumnSidecarsRequest(t *testing.T) {
const count = 4
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
for range count {
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
}
expectedResponsePb := &ethpb.DataColumnSidecar{
Index: 2,
SignedBlockHeader: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 1,
ParentRoot: make([]byte, fieldparams.RootLength),
StateRoot: make([]byte, fieldparams.RootLength),
BodyRoot: make([]byte, fieldparams.RootLength),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
}
expectedResponse, err := blocks.NewRODataColumn(expectedResponsePb)
require.NoError(t, err)
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
require.NoError(t, err)
t.Run("contiguous", func(t *testing.T) {
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
{1}: {1: true, 2: true},
{3}: {1: true, 2: true},
{4}: {1: true, 2: true},
{7}: {1: true, 2: true},
}
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
{1}: 1,
{3}: 3,
{4}: 4,
{7}: 7,
}
slotsWithCommitments := map[primitives.Slot]bool{
1: true,
3: true,
4: true,
7: true,
}
expectedRequest := &ethpb.DataColumnSidecarsByRangeRequest{
StartSlot: 1,
Count: 7,
Columns: []uint64{1, 2},
}
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
p2p.Connect(other)
other.SetStreamHandler(protocol, func(stream network.Stream) {
receivedRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
assert.NoError(t, err)
assert.DeepEqual(t, expectedRequest, receivedRequest)
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponsePb)
assert.NoError(t, err)
err = stream.CloseWrite()
assert.NoError(t, err)
})
params := DataColumnSidecarsParams{
Ctx: t.Context(),
Tor: clock,
P2P: p2p,
CtxMap: ctxMap,
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
}
actualResponse, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, other.PeerID(), indicesByRoot)
require.NoError(t, err)
require.DeepEqual(t, expectedResponse, actualResponse[0])
})
t.Run("non contiguous", func(t *testing.T) {
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
expectedResponse.BlockRoot(): {1: true, 2: true},
{4}: {1: true, 2: true},
{7}: {1: true, 2: true},
}
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
expectedResponse.BlockRoot(): 1,
{4}: 4,
{7}: 7,
}
slotsWithCommitments := map[primitives.Slot]bool{
1: true,
3: true,
4: true,
7: true,
}
roots := [...][fieldparams.RootLength]byte{expectedResponse.BlockRoot(), {4}, {7}}
expectedRequest := &p2ptypes.DataColumnsByRootIdentifiers{
{
BlockRoot: roots[1][:],
Columns: []uint64{1, 2},
},
{
BlockRoot: roots[2][:],
Columns: []uint64{1, 2},
},
{
BlockRoot: roots[0][:],
Columns: []uint64{1, 2},
},
}
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRootTopicV1)
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
p2p.Connect(other)
other.SetStreamHandler(protocol, func(stream network.Stream) {
receivedRequest := new(p2ptypes.DataColumnsByRootIdentifiers)
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
assert.NoError(t, err)
assert.DeepSSZEqual(t, *expectedRequest, *receivedRequest)
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponsePb)
assert.NoError(t, err)
err = stream.CloseWrite()
assert.NoError(t, err)
})
params := DataColumnSidecarsParams{
Ctx: t.Context(),
Tor: clock,
P2P: p2p,
CtxMap: ctxMap,
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
}
actualResponse, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, other.PeerID(), indicesByRoot)
require.NoError(t, err)
require.DeepEqual(t, expectedResponse, actualResponse[0])
})
}
func TestBuildByRangeRequests(t *testing.T) {
const nullBatchSize = 0
t.Run("empty", func(t *testing.T) {
actual, err := buildByRangeRequests(nil, nil, nil, nullBatchSize)
require.NoError(t, err)
require.Equal(t, 0, len(actual))
})
t.Run("missing Root", func(t *testing.T) {
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
{1}: {1: true, 2: true},
}
_, err := buildByRangeRequests(nil, nil, indicesByRoot, nullBatchSize)
require.NotNil(t, err)
})
t.Run("indices differ", func(t *testing.T) {
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
{1}: {1: true, 2: true},
{2}: {1: true, 2: true},
{3}: {2: true, 3: true},
}
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
{1}: 1,
{2}: 2,
{3}: 3,
}
actual, err := buildByRangeRequests(slotByRoot, nil, indicesByRoot, nullBatchSize)
require.NoError(t, err)
require.Equal(t, 0, len(actual))
})
t.Run("slots non contiguous", func(t *testing.T) {
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
{1}: {1: true, 2: true},
{2}: {1: true, 2: true},
}
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
{1}: 1,
{2}: 3,
}
slotsWithCommitments := map[primitives.Slot]bool{
1: true,
2: true,
3: true,
}
actual, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, nullBatchSize)
require.NoError(t, err)
require.Equal(t, 0, len(actual))
})
t.Run("nominal", func(t *testing.T) {
const batchSize = 3
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
{1}: {1: true, 2: true},
{3}: {1: true, 2: true},
{4}: {1: true, 2: true},
{7}: {1: true, 2: true},
}
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
{1}: 1,
{3}: 3,
{4}: 4,
{7}: 7,
}
slotsWithCommitments := map[primitives.Slot]bool{
1: true,
3: true,
4: true,
7: true,
}
expected := []*ethpb.DataColumnSidecarsByRangeRequest{
{
StartSlot: 1,
Count: 3,
Columns: []uint64{1, 2},
},
{
StartSlot: 4,
Count: 3,
Columns: []uint64{1, 2},
},
{
StartSlot: 7,
Count: 1,
Columns: []uint64{1, 2},
},
}
actual, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, batchSize)
require.NoError(t, err)
require.DeepEqual(t, expected, actual)
})
}
func TestBuildByRootRequest(t *testing.T) {
root1 := [fieldparams.RootLength]byte{1}
root2 := [fieldparams.RootLength]byte{2}
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
root1: {1: true, 2: true},
root2: {3: true},
}
expected := p2ptypes.DataColumnsByRootIdentifiers{
{
BlockRoot: root1[:],
Columns: []uint64{1, 2},
},
{
BlockRoot: root2[:],
Columns: []uint64{3},
},
}
actual := buildByRootRequest(input)
require.DeepSSZEqual(t, expected, actual)
}
func TestVerifyDataColumnSidecarsByPeer(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
t.Run("nominal", func(t *testing.T) {
const (
start, stop = 0, 15
blobCount = 1
)
p2p := testp2p.NewTestP2P(t)
// Setup test data and expectations
_, roDataColumnSidecars, expected := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
roDataColumnsByPeer := map[peer.ID][]blocks.RODataColumn{
"peer1": roDataColumnSidecars[start:5],
"peer2": roDataColumnSidecars[5:9],
"peer3": roDataColumnSidecars[9:stop],
}
gs := startup.NewClockSynchronizer()
err := gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
require.NoError(t, err)
waiter := verification.NewInitializerWaiter(gs, nil, nil)
initializer, err := waiter.WaitForInitializer(t.Context())
require.NoError(t, err)
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
actual, err := verifyDataColumnSidecarsByPeer(p2p, newDataColumnsVerifier, roDataColumnsByPeer)
require.NoError(t, err)
require.Equal(t, stop-start, len(actual))
for i := range actual {
actualSidecar := actual[i]
index := actualSidecar.Index
expectedSidecar := expected[index]
require.DeepEqual(t, expectedSidecar, actualSidecar)
}
})
t.Run("one rogue peer", func(t *testing.T) {
const (
start, middle, stop = 0, 5, 15
blobCount = 1
)
p2p := testp2p.NewTestP2P(t)
// Setup test data and expectations
_, roDataColumnSidecars, expected := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
// Modify one sidecar to ensure proof verification fails.
if roDataColumnSidecars[middle].KzgProofs[0][0] == 0 {
roDataColumnSidecars[middle].KzgProofs[0][0]++
} else {
roDataColumnSidecars[middle].KzgProofs[0][0]--
}
roDataColumnsByPeer := map[peer.ID][]blocks.RODataColumn{
"peer1": roDataColumnSidecars[start:middle],
"peer2": roDataColumnSidecars[5:middle],
"peer3": roDataColumnSidecars[middle:stop],
}
gs := startup.NewClockSynchronizer()
err := gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
require.NoError(t, err)
waiter := verification.NewInitializerWaiter(gs, nil, nil)
initializer, err := waiter.WaitForInitializer(t.Context())
require.NoError(t, err)
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
actual, err := verifyDataColumnSidecarsByPeer(p2p, newDataColumnsVerifier, roDataColumnsByPeer)
require.NoError(t, err)
require.Equal(t, middle-start, len(actual))
for i := range actual {
actualSidecar := actual[i]
index := actualSidecar.Index
expectedSidecar := expected[index]
require.DeepEqual(t, expectedSidecar, actualSidecar)
}
})
}
func TestComputeIndicesByRootByPeer(t *testing.T) {
peerIdStrs := []string{
"16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq", // Custodies 89, 94, 97 & 122
"16Uiu2HAmTwQPAwzTr6hTgBmKNecCfH6kP3Kbzxj36ZRyyQ46L6gf", // Custodies 1, 11, 37 & 86
"16Uiu2HAmMDB5uUePTpN7737m78ehePfWPtBL9qMGdH8kCygjzNA8", // Custodies 2, 37, 38 & 68
"16Uiu2HAmTAE5Vxf7Pgfk7eWpmCvVJdSba4C9xg4xkYuuvnVbgfFx", // Custodies 10, 29, 36 & 108
}
headSlotByPeer := map[string]primitives.Slot{
"16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq": 89,
"16Uiu2HAmTwQPAwzTr6hTgBmKNecCfH6kP3Kbzxj36ZRyyQ46L6gf": 10,
"16Uiu2HAmMDB5uUePTpN7737m78ehePfWPtBL9qMGdH8kCygjzNA8": 12,
"16Uiu2HAmTAE5Vxf7Pgfk7eWpmCvVJdSba4C9xg4xkYuuvnVbgfFx": 9,
}
p2p := testp2p.NewTestP2P(t)
peers := p2p.Peers()
peerIDs := make([]peer.ID, 0, len(peerIdStrs))
for _, peerIdStr := range peerIdStrs {
peerID, err := peer.Decode(peerIdStr)
require.NoError(t, err)
peers.SetChainState(peerID, &ethpb.StatusV2{
HeadSlot: headSlotByPeer[peerIdStr],
})
peerIDs = append(peerIDs, peerID)
}
slotByBlockRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
[fieldparams.RootLength]byte{1}: 8,
[fieldparams.RootLength]byte{2}: 10,
[fieldparams.RootLength]byte{3}: 9,
[fieldparams.RootLength]byte{4}: 50,
}
indicesByBlockRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
[fieldparams.RootLength]byte{1}: {3: true, 4: true, 5: true},
[fieldparams.RootLength]byte{2}: {1: true, 10: true, 37: true, 80: true},
[fieldparams.RootLength]byte{3}: {10: true, 38: true, 39: true, 40: true},
[fieldparams.RootLength]byte{4}: {89: true, 108: true, 122: true},
}
expected := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
peerIDs[0]: {
[fieldparams.RootLength]byte{4}: {89: true, 122: true},
},
peerIDs[1]: {
[fieldparams.RootLength]byte{2}: {1: true, 37: true},
},
peerIDs[2]: {
[fieldparams.RootLength]byte{2}: {37: true},
[fieldparams.RootLength]byte{3}: {38: true},
},
peerIDs[3]: {
[fieldparams.RootLength]byte{3}: {10: true},
},
}
peerIDsMap := make(map[peer.ID]bool, len(peerIDs))
for _, id := range peerIDs {
peerIDsMap[id] = true
}
actual, err := computeIndicesByRootByPeer(p2p, slotByBlockRoot, indicesByBlockRoot, peerIDsMap)
require.NoError(t, err)
require.Equal(t, len(expected), len(actual))
for peer, indicesByRoot := range expected {
require.Equal(t, len(indicesByRoot), len(actual[peer]))
for root, indices := range indicesByRoot {
require.Equal(t, len(indices), len(actual[peer][root]))
for index := range indices {
require.Equal(t, actual[peer][root][index], true)
}
}
}
}
func TestRandomPeer(t *testing.T) {
// Fixed seed.
const seed = 42
randomSource := rand.NewGenerator()
t.Run("no peers", func(t *testing.T) {
pid, err := randomPeer(t.Context(), randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, nil)
require.NotNil(t, err)
require.Equal(t, peer.ID(""), pid)
})
t.Run("context cancelled", func(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
cancel()
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{peer.ID("peer1"): {}}
pid, err := randomPeer(ctx, randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, indicesByRootByPeer)
require.NotNil(t, err)
require.Equal(t, peer.ID(""), pid)
})
t.Run("nominal", func(t *testing.T) {
const count = 1
collector := leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */)
peer1, peer2, peer3 := peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3")
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
peer1: {},
peer2: {},
peer3: {},
}
pid, err := randomPeer(t.Context(), randomSource, collector, count, indicesByRootByPeer)
require.NoError(t, err)
require.Equal(t, true, map[peer.ID]bool{peer1: true, peer2: true, peer3: true}[pid])
})
}
func TestCopyIndicesByRootByPeer(t *testing.T) {
original := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
peer.ID("peer1"): {
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
[fieldparams.RootLength]byte{2}: {2: true},
},
peer.ID("peer2"): {
[fieldparams.RootLength]byte{1}: {1: true},
},
}
copied := copyIndicesByRootByPeer(original)
require.Equal(t, len(original), len(copied))
for peer, indicesByRoot := range original {
require.Equal(t, len(indicesByRoot), len(copied[peer]))
for root, indices := range indicesByRoot {
require.Equal(t, len(indices), len(copied[peer][root]))
for index := range indices {
require.Equal(t, copied[peer][root][index], true)
}
}
}
}
func TestCompareIndices(t *testing.T) {
left := map[uint64]bool{3: true, 5: true, 7: true}
right := map[uint64]bool{5: true}
require.Equal(t, false, compareIndices(left, right))
left = map[uint64]bool{3: true, 5: true, 7: true}
right = map[uint64]bool{3: true, 6: true, 7: true}
require.Equal(t, false, compareIndices(left, right))
left = map[uint64]bool{3: true, 5: true, 7: true}
right = map[uint64]bool{5: true, 7: true, 3: true}
require.Equal(t, true, compareIndices(left, right))
}
func TestSlortedSliceFromMap(t *testing.T) {
input := map[uint64]bool{54: true, 23: true, 35: true}
expected := []uint64{23, 35, 54}
actual := sortedSliceFromMap(input)
require.DeepEqual(t, expected, actual)
}
func TestComputeSlotByBlockRoot(t *testing.T) {
const (
count = 3
multiplier = 10
)
roBlocks := make([]blocks.ROBlock, 0, count)
for i := range count {
signedBlock := util.NewBeaconBlock()
signedBlock.Block.Slot = primitives.Slot(i).Mul(multiplier)
roSignedBlock, err := blocks.NewSignedBeaconBlock(signedBlock)
require.NoError(t, err)
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, [fieldparams.RootLength]byte{byte(i)})
require.NoError(t, err)
roBlocks = append(roBlocks, roBlock)
}
expected := map[[fieldparams.RootLength]byte]primitives.Slot{
[fieldparams.RootLength]byte{0}: primitives.Slot(0),
[fieldparams.RootLength]byte{1}: primitives.Slot(10),
[fieldparams.RootLength]byte{2}: primitives.Slot(20),
}
actual := computeSlotByBlockRoot(roBlocks)
require.Equal(t, len(expected), len(actual))
for k, v := range expected {
require.Equal(t, v, actual[k])
}
}
func TestComputeTotalCount(t *testing.T) {
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
[fieldparams.RootLength]byte{2}: {2: true},
}
const expected = 3
actual := computeTotalCount(input)
require.Equal(t, expected, actual)
}

View File

@@ -0,0 +1,936 @@
package sync
import (
"context"
"fmt"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// RequestDataColumnSidecarsByRoot is an opinionated, high level function which, for each data column in `dataColumnsToFetch`:
// - Greedily selects, among `peers`, the peers that can provide the requested data columns, to minimize the number of requests.
// - Request the data column sidecars from the selected peers.
// - In case of peers unable to actually provide all the requested data columns, retry with other peers.
//
// This function:
// - returns on success when all the initially missing sidecars in `dataColumnsToFetch` are retrieved, or
// - returns an error if all peers in `peers` are exhausted and at least one data column sidecar is still missing.
//
// TODO: In case at least one column is still missing after peer exhaustion,
//
// but `peers` custody more than 64 columns, then try to fetch enough columns to reconstruct needed ones.
func RequestDataColumnSidecarsByRoot(
ctx context.Context,
dataColumnsToFetch []uint64,
block blocks.ROBlock,
peers []core.PeerID,
clock *startup.Clock,
p2p p2p.P2P,
ctxMap ContextByteVersions,
newColumnsVerifier verification.NewDataColumnsVerifier,
) ([]blocks.VerifiedRODataColumn, error) {
if len(dataColumnsToFetch) == 0 {
return nil, nil
}
// Assemble the peers who can provide the needed data columns.
dataColumnsByAdmissiblePeer, _, _, err := AdmissiblePeersForDataColumns(peers, dataColumnsToFetch, p2p)
if err != nil {
return nil, errors.Wrap(err, "couldn't get admissible peers for data columns")
}
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnsToFetch))
remainingMissingColumns := make(map[uint64]bool, len(dataColumnsToFetch))
for _, column := range dataColumnsToFetch {
remainingMissingColumns[column] = true
}
blockRoot := block.Root()
for len(dataColumnsByAdmissiblePeer) > 0 {
peersToFetchFrom, err := SelectPeersToFetchDataColumnsFrom(sliceFromMap(remainingMissingColumns, true /*sorted*/), dataColumnsByAdmissiblePeer)
if err != nil {
return nil, errors.Wrap(err, "select peers to fetch data columns from")
}
// Request the data columns from each peer.
successfulColumns := make(map[uint64]bool, len(remainingMissingColumns))
for peer, peerRequestedColumns := range peersToFetchFrom {
log := log.WithFields(logrus.Fields{"peer": peer.String(), "blockRoot": fmt.Sprintf("%#x", blockRoot)})
// Build the requests for the data columns.
byRootRequest := &eth.DataColumnsByRootIdentifier{BlockRoot: blockRoot[:], Columns: peerRequestedColumns}
// Send the requests to the peer.
params := DataColumnSidecarsParams{
Ctx: ctx,
Tor: clock,
P2P: p2p,
CtxMap: ctxMap,
}
peerSidecars, err := SendDataColumnSidecarsByRootRequest(params, peer, types.DataColumnsByRootIdentifiers{byRootRequest})
if err != nil {
// Remove this peer since it failed to respond correctly.
delete(dataColumnsByAdmissiblePeer, peer)
log.WithFields(logrus.Fields{
"peer": peer.String(),
"blockRoot": fmt.Sprintf("%#x", block.Root()),
}).WithError(err).Debug("Failed to request data columns from peer")
continue
}
// Check if returned data columns align with the block.
if err := peerdas.DataColumnsAlignWithBlock(block, peerSidecars); err != nil {
// Remove this peer since it failed to respond correctly.
delete(dataColumnsByAdmissiblePeer, peer)
log.WithError(err).Debug("Align with block failed")
continue
}
// Verify the received sidecars.
verifier := newColumnsVerifier(peerSidecars, verification.ByRootRequestDataColumnSidecarRequirements)
if err := verifier.ValidFields(); err != nil {
// Remove this peer if the verification failed.
delete(dataColumnsByAdmissiblePeer, peer)
log.WithError(err).Debug("Valid verification failed")
continue
}
if err := verifier.SidecarInclusionProven(); err != nil {
// Remove this peer if the verification failed.
delete(dataColumnsByAdmissiblePeer, peer)
log.WithError(err).Debug("Sidecar inclusion proof verification failed")
continue
}
if err := verifier.SidecarKzgProofVerified(); err != nil {
// Remove this peer if the verification failed.
delete(dataColumnsByAdmissiblePeer, peer)
log.WithError(err).Debug("Sidecar KZG proof verification failed")
continue
}
// Upgrade the sidecars to verified sidecars.
verifiedPeerSidecars, err := verifier.VerifiedRODataColumns()
if err != nil {
// This should never happen.
return nil, errors.Wrap(err, "verified data columns")
}
// Mark columns as successful
for _, sidecar := range verifiedPeerSidecars {
successfulColumns[sidecar.Index] = true
}
// Check if all requested columns were successfully returned.
peerMissingColumns := make(map[uint64]bool)
for _, index := range peerRequestedColumns {
if !successfulColumns[index] {
peerMissingColumns[index] = true
}
}
if len(peerMissingColumns) > 0 {
// Remove this peer if some requested columns were not correctly returned.
delete(dataColumnsByAdmissiblePeer, peer)
log.WithField("missingColumns", sliceFromMap(peerMissingColumns, true /*sorted*/)).Debug("Peer did not provide all requested data columns")
}
verifiedSidecars = append(verifiedSidecars, verifiedPeerSidecars...)
}
// Update remaining columns for the next retry.
for col := range successfulColumns {
delete(remainingMissingColumns, col)
}
if len(remainingMissingColumns) > 0 {
// Some columns are still missing, retry with the remaining peers.
continue
}
return verifiedSidecars, nil
}
// If we still have remaining columns after all retries, return error
return nil, errors.Errorf("failed to retrieve all requested data columns after retries for block root=%#x, missing columns=%v", blockRoot, sliceFromMap(remainingMissingColumns, true /*sorted*/))
}
// RequestMissingDataColumnsByRange is an opinionated, high level function which, for each block in `blks`:
// - Computes all data column sidecars we should store and which are missing (according to our node ID and `groupCount`),
// - Builds an optimized set of data column sidecars by range requests in order to never request a data column that is already stored in the DB,
// and in order to minimize the number of total requests, while not exceeding `batchSize` sidecars per requests.
// - Greedily selects, among `peers`, the peers that can provide the requested data columns, to minimize the number of requests.
// - Request the data column sidecars from the selected peers.
// - In case of peers unable to actually provide all the requested data columns, retry with other peers.
//
// This function:
// - returns on success when all the initially missing sidecars for `blks` are retrieved, or
// - returns an error if no progress at all is made after 5 consecutives trials.
// (If at least one additional data column sidecar is retrieved between two trials, the counter is reset.)
//
// In case of success, initially missing data columns grouped by block root are returned.
// This function expects blocks to be sorted by slot.
//
// TODO: In case at least one column is still missing after all allowed retries,
//
// but `peers` custody more than 64 columns, then try to fetch enough columns to reconstruct needed ones.
func RequestMissingDataColumnsByRange(
ctx context.Context,
clock *startup.Clock,
ctxMap ContextByteVersions,
p2p p2p.P2P,
rateLimiter *leakybucket.Collector,
groupCount uint64,
dataColumnsStorage filesystem.DataColumnStorageReader,
blks []blocks.ROBlock,
batchSize int,
) (map[[fieldparams.RootLength]byte][]blocks.RODataColumn, error) {
const maxAllowedStall = 5 // Number of trials before giving up.
if len(blks) == 0 {
return nil, nil
}
// Get the current slot.
currentSlot := clock.CurrentSlot()
// Compute the minimum slot for which we should serve data columns.
minimumSlot, err := dataColumnsRPCMinValidSlot(currentSlot)
if err != nil {
return nil, errors.Wrap(err, "data columns RPC min valid slot")
}
// Get blocks by root and compute all missing columns by root.
blockByRoot := make(map[[fieldparams.RootLength]byte]blocks.ROBlock, len(blks))
missingColumnsByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(blks))
for _, blk := range blks {
// Extract the block root and the block slot
blockRoot, blockSlot := blk.Root(), blk.Block().Slot()
// Populate the block by root.
blockByRoot[blockRoot] = blk
// Skip blocks that are not in the retention period.
if blockSlot < minimumSlot {
continue
}
missingColumns, err := MissingDataColumns(blk, p2p.NodeID(), groupCount, dataColumnsStorage)
if err != nil {
return nil, errors.Wrap(err, "missing data columns")
}
for _, column := range missingColumns {
if _, ok := missingColumnsByRoot[blockRoot]; !ok {
missingColumnsByRoot[blockRoot] = make(map[uint64]bool)
}
missingColumnsByRoot[blockRoot][column] = true
}
}
// Return early if there are no missing data columns.
if len(missingColumnsByRoot) == 0 {
return nil, nil
}
// Compute the number of missing data columns.
previousMissingDataColumnsCount := itemsCount(missingColumnsByRoot)
// Count the number of retries for the same amount of missing data columns.
stallCount := 0
// Add log fields.
log := log.WithFields(logrus.Fields{
"initialMissingColumnsCount": previousMissingDataColumnsCount,
"blockCount": len(blks),
"firstSlot": blks[0].Block().Slot(),
"lastSlot": blks[len(blks)-1].Block().Slot(),
})
// Log the start of the process.
start := time.Now()
log.Debug("Requesting data column sidecars - start")
alignedDataColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.RODataColumn, len(blks))
for len(missingColumnsByRoot) > 0 {
// Build requests.
requests, err := buildDataColumnByRangeRequests(blks, missingColumnsByRoot, batchSize)
if err != nil {
return nil, errors.Wrap(err, "build data column by range requests")
}
// Requests data column sidecars from peers.
retrievedDataColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.RODataColumn)
for _, request := range requests {
roDataColumns, err := fetchDataColumnsFromPeers(ctx, clock, p2p, rateLimiter, ctxMap, request)
if err != nil {
return nil, errors.Wrap(err, "fetch data columns from peers")
}
for _, roDataColumn := range roDataColumns {
root := roDataColumn.BlockRoot()
if _, ok := blockByRoot[root]; !ok {
// It may happen if the peer which sent the data columns is on a different fork.
continue
}
retrievedDataColumnsByRoot[root] = append(retrievedDataColumnsByRoot[root], roDataColumn)
}
}
for root, dataColumns := range retrievedDataColumnsByRoot {
// Retrieve the block from the root.
block, ok := blockByRoot[root]
if !ok {
return nil, errors.New("block not found - this should never happen")
}
// Check if the data columns align with blocks.
if err := peerdas.DataColumnsAlignWithBlock(block, dataColumns); err != nil {
log.WithField("root", root).WithError(err).Debug("Data columns do not align with block")
continue
}
alignedDataColumnsByRoot[root] = append(alignedDataColumnsByRoot[root], dataColumns...)
// Remove aligned data columns from the missing columns.
for _, dataColumn := range dataColumns {
delete(missingColumnsByRoot[root], dataColumn.Index)
if len(missingColumnsByRoot[root]) == 0 {
delete(missingColumnsByRoot, root)
}
}
}
missingDataColumnsCount := itemsCount(missingColumnsByRoot)
if missingDataColumnsCount == previousMissingDataColumnsCount {
stallCount++
} else {
stallCount = 0
}
previousMissingDataColumnsCount = missingDataColumnsCount
if missingDataColumnsCount > 0 {
log := log.WithFields(logrus.Fields{
"remainingMissingColumnsCount": missingDataColumnsCount,
"stallCount": stallCount,
"maxAllowedStall": maxAllowedStall,
})
if stallCount >= maxAllowedStall {
// It is very likely `bwbs` contains orphaned blocks, for which no peer has the data columns.
// We give up and let the state machine handle the situation.
const message = "Requesting data column sidecars - no progress, giving up"
log.Warning(message)
return nil, errors.New(message)
}
log.WithFields(logrus.Fields{
"remainingMissingColumnsCount": missingDataColumnsCount,
"stallCount": stallCount,
}).Debug("Requesting data column sidecars - continue")
}
}
log.WithField("duration", time.Since(start)).Debug("Requesting data column sidecars - success")
return alignedDataColumnsByRoot, nil
}
// MissingDataColumns looks at the data columns we should store for a given block regarding `custodyGroupCount`,
// and returns the indices of the missing ones.
func MissingDataColumns(block blocks.ROBlock, nodeID enode.ID, custodyGroupCount uint64, dataColumnStorage filesystem.DataColumnStorageReader) ([]uint64, error) {
// Blocks before Fulu have no data columns.
if block.Version() < version.Fulu {
return nil, nil
}
// Get the blob commitments from the block.
commitments, err := block.Block().Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
// Nothing to build if there are no commitments.
if len(commitments) == 0 {
return nil, nil
}
// Compute the expected columns.
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
if err != nil {
return nil, errors.Wrap(err, "peer info")
}
expectedColumns := peerInfo.CustodyColumns
// Get the stored columns.
numberOfColumns := params.BeaconConfig().NumberOfColumns
summary := dataColumnStorage.Summary(block.Root())
storedColumns := make(map[uint64]bool, numberOfColumns)
for i := range numberOfColumns {
if summary.HasIndex(i) {
storedColumns[i] = true
}
}
// Compute the missing columns.
missingColumns := make([]uint64, 0, len(expectedColumns))
for column := range expectedColumns {
if !storedColumns[column] {
missingColumns = append(missingColumns, column)
}
}
return missingColumns, nil
}
// SelectPeersToFetchDataColumnsFrom implements greedy algorithm in order to select peers to fetch data columns from.
// https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm
func SelectPeersToFetchDataColumnsFrom(neededDataColumns []uint64, dataColumnsByPeer map[peer.ID]map[uint64]bool) (map[peer.ID][]uint64, error) {
// Copy the provided needed data columns into a set that we will remove elements from.
remainingDataColumns := make(map[uint64]bool, len(neededDataColumns))
for _, dataColumn := range neededDataColumns {
remainingDataColumns[dataColumn] = true
}
dataColumnsFromSelectedPeers := make(map[peer.ID][]uint64)
// Filter `dataColumnsByPeer` to only contain needed data columns.
neededDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, len(dataColumnsByPeer))
for pid, dataColumns := range dataColumnsByPeer {
for dataColumn := range dataColumns {
if remainingDataColumns[dataColumn] {
if _, ok := neededDataColumnsByPeer[pid]; !ok {
neededDataColumnsByPeer[pid] = make(map[uint64]bool, len(neededDataColumns))
}
neededDataColumnsByPeer[pid][dataColumn] = true
}
}
}
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
for len(remainingDataColumns) > 0 {
// Check if at least one peer remains. If not, it means that we don't have enough peers to fetch all needed data columns.
if len(neededDataColumnsByPeer) == 0 {
missingDataColumnsSortedSlice := sliceFromMap(remainingDataColumns, true /*sorted*/)
return dataColumnsFromSelectedPeers, errors.Errorf("no peer to fetch the following data columns: %v", missingDataColumnsSortedSlice)
}
// Select the peer that custody the most needed data columns (greedy selection).
var bestPeer peer.ID
for peer, dataColumns := range neededDataColumnsByPeer {
if len(dataColumns) > len(neededDataColumnsByPeer[bestPeer]) {
bestPeer = peer
}
}
dataColumnsSortedSlice := sliceFromMap(neededDataColumnsByPeer[bestPeer], true /*sorted*/)
if uint64(len(dataColumnsSortedSlice)) > maxRequestDataColumnSidecars {
dataColumnsSortedSlice = dataColumnsSortedSlice[:maxRequestDataColumnSidecars]
}
dataColumnsFromSelectedPeers[bestPeer] = dataColumnsSortedSlice
// Remove the selected peer from the list of peers.
delete(neededDataColumnsByPeer, bestPeer)
// Remove the selected peer's data columns from the list of remaining data columns.
for _, dataColumn := range dataColumnsSortedSlice {
delete(remainingDataColumns, dataColumn)
}
// Remove the selected peer's data columns from the list of needed data columns by peer.
for _, dataColumn := range dataColumnsSortedSlice {
for peer, dataColumns := range neededDataColumnsByPeer {
delete(dataColumns, dataColumn)
if len(dataColumns) == 0 {
delete(neededDataColumnsByPeer, peer)
}
}
}
}
return dataColumnsFromSelectedPeers, nil
}
// AdmissiblePeersForCustodyGroup returns a map of peers that custody at least one custody group listed in `neededCustodyGroups`.
//
// It returns:
// - A map, where the key of the map is the peer, the value is the custody groups of the peer.
// - A map, where the key of the map is the custody group, the value is a list of peers that custody the group.
// - A slice of descriptions for non admissible peers.
// - An error if any.
//
// NOTE: distributeSamplesToPeer from the DataColumnSampler implements similar logic,
// but with only one column queried in each request.
func AdmissiblePeersForDataColumns(
peers []peer.ID,
neededDataColumns []uint64,
p2p p2p.P2P,
) (map[peer.ID]map[uint64]bool, map[uint64][]peer.ID, []string, error) {
peerCount := len(peers)
neededDataColumnsCount := uint64(len(neededDataColumns))
// Create description slice for non admissible peers.
descriptions := make([]string, 0, peerCount)
// Compute custody columns for each peer.
dataColumnsByPeer, err := custodyColumnsFromPeers(p2p, peers)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "custody columns from peers")
}
// Filter peers which custody at least one needed data column.
dataColumnsByAdmissiblePeer, localDescriptions := filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns, dataColumnsByPeer)
descriptions = append(descriptions, localDescriptions...)
// Compute a map from needed data columns to their peers.
admissiblePeersByDataColumn := make(map[uint64][]peer.ID, neededDataColumnsCount)
for peerId, peerDataColumns := range dataColumnsByAdmissiblePeer {
for _, dataColumn := range neededDataColumns {
if peerDataColumns[dataColumn] {
admissiblePeersByDataColumn[dataColumn] = append(admissiblePeersByDataColumn[dataColumn], peerId)
}
}
}
return dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, descriptions, nil
}
// custodyColumnsFromPeers computes all the custody columns indexed by peer.
func custodyColumnsFromPeers(p2pIface p2p.P2P, peers []peer.ID) (map[peer.ID]map[uint64]bool, error) {
peerCount := len(peers)
custodyColumnsByPeer := make(map[peer.ID]map[uint64]bool, peerCount)
for _, peer := range peers {
// Get the node ID from the peer ID.
nodeID, err := p2p.ConvertPeerIDToNodeID(peer)
if err != nil {
return nil, errors.Wrap(err, "convert peer ID to node ID")
}
// Get the custody group count of the peer.
custodyGroupCount := p2pIface.CustodyGroupCountFromPeer(peer)
// Get peerdas info of the peer.
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
if err != nil {
return nil, errors.Wrap(err, "peerdas info")
}
custodyColumnsByPeer[peer] = dasInfo.CustodyColumns
}
return custodyColumnsByPeer, nil
}
// `filterPeerWhichCustodyAtLeastOneDataColumn` filters peers which custody at least one data column
// specified in `neededDataColumns`. It returns also a list of descriptions for non admissible peers.
func filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns []uint64, inputDataColumnsByPeer map[peer.ID]map[uint64]bool) (map[peer.ID]map[uint64]bool, []string) {
// Create pretty needed data columns for logs.
numberOfColumns := params.BeaconConfig().NumberOfColumns
outputDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, len(inputDataColumnsByPeer))
descriptions := make([]string, 0)
outerLoop:
for peer, peerCustodyDataColumns := range inputDataColumnsByPeer {
for _, neededDataColumn := range neededDataColumns {
if peerCustodyDataColumns[neededDataColumn] {
outputDataColumnsByPeer[peer] = peerCustodyDataColumns
continue outerLoop
}
}
peerCustodyColumnsCount := uint64(len(peerCustodyDataColumns))
var peerCustodyColumnsLog interface{} = "all"
if peerCustodyColumnsCount < numberOfColumns {
peerCustodyColumnsLog = sliceFromMap(peerCustodyDataColumns, true /*sorted*/)
}
description := fmt.Sprintf("peer %s: does not custody any needed column, custody columns: %v", peer, peerCustodyColumnsLog)
descriptions = append(descriptions, description)
}
return outputDataColumnsByPeer, descriptions
}
// buildDataColumnByRangeRequests builds an optimized slices of data column by range requests:
// 1. It will never request a data column that is already stored in the DB if there is no "hole" in `roBlocks` other than missed slots.
// 2. It will minimize the number of requests.
// It expects blocks to be sorted by slot.
func buildDataColumnByRangeRequests(roBlocks []blocks.ROBlock, missingColumnsByRoot map[[fieldparams.RootLength]byte]map[uint64]bool, batchSize int) ([]*eth.DataColumnSidecarsByRangeRequest, error) {
batchSizeSlot := primitives.Slot(batchSize)
// Return early if there are no blocks to process.
if len(roBlocks) == 0 {
return nil, nil
}
// It's safe to get the first item of the slice since we've already checked that it's not empty.
firstROBlock, lastROBlock := roBlocks[0], roBlocks[len(roBlocks)-1]
firstBlockSlot, lastBlockSlot := firstROBlock.Block().Slot(), lastROBlock.Block().Slot()
firstBlockRoot := firstROBlock.Root()
previousMissingDataColumns := make(map[uint64]bool, len(missingColumnsByRoot[firstBlockRoot]))
if missing, ok := missingColumnsByRoot[firstBlockRoot]; ok {
for key, value := range missing {
previousMissingDataColumns[key] = value
}
}
previousBlockSlot, previousStartBlockSlot := firstBlockSlot, firstBlockSlot
result := make([]*eth.DataColumnSidecarsByRangeRequest, 0, 1)
for index := 1; index < len(roBlocks); index++ {
roBlock := roBlocks[index]
// Extract the block from the RO-block.
block := roBlock.Block()
// Extract the slot from the block.
blockRoot, blockSlot := roBlock.Root(), block.Slot()
if blockSlot <= previousBlockSlot {
return nil, errors.Errorf("blocks are not strictly sorted by slot. Previous block slot: %d, current block slot: %d", previousBlockSlot, blockSlot)
}
// Extract KZG commitments count from the current block body
blockKzgCommitments, err := block.Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
// Compute the count of KZG commitments.
blockKzgCommitmentCount := len(blockKzgCommitments)
// Skip blocks without commitments.
if blockKzgCommitmentCount == 0 {
previousBlockSlot = blockSlot
continue
}
// Get the missing data columns for the current block.
missingDataColumns := make(map[uint64]bool, len(missingColumnsByRoot[blockRoot]))
for key, value := range missingColumnsByRoot[blockRoot] {
missingDataColumns[key] = value
}
// Compute if the missing data columns differ.
missingDataColumnsDiffer := uint64MapDiffer(previousMissingDataColumns, missingDataColumns)
// Compute if the batch size is reached.
batchSizeReached := blockSlot-previousStartBlockSlot >= batchSizeSlot
if missingDataColumnsDiffer || batchSizeReached {
// Append the slice to the result.
request := &eth.DataColumnSidecarsByRangeRequest{
StartSlot: previousStartBlockSlot,
Count: uint64(blockSlot - previousStartBlockSlot),
Columns: sliceFromMap(previousMissingDataColumns, true /*sorted*/),
}
result = append(result, request)
previousStartBlockSlot, previousMissingDataColumns = blockSlot, missingDataColumns
}
previousBlockSlot = blockSlot
}
lastRequest := &eth.DataColumnSidecarsByRangeRequest{
StartSlot: previousStartBlockSlot,
Count: uint64(lastBlockSlot - previousStartBlockSlot + 1),
Columns: sliceFromMap(previousMissingDataColumns, true /*sorted*/),
}
result = append(result, lastRequest)
return result, nil
}
// fetchDataColumnsFromPeers requests data columns by range to relevant peers
func fetchDataColumnsFromPeers(
ctx context.Context,
clock *startup.Clock,
p2p p2p.P2P,
rateLimiter *leakybucket.Collector,
ctxMap ContextByteVersions,
targetRequest *eth.DataColumnSidecarsByRangeRequest,
) ([]blocks.RODataColumn, error) {
// Filter out requests with no data columns.
if len(targetRequest.Columns) == 0 {
return nil, nil
}
// Get all admissible peers with the data columns they custody.
dataColumnsByAdmissiblePeer, err := waitForPeersForDataColumns(p2p, rateLimiter, targetRequest)
if err != nil {
return nil, errors.Wrap(err, "wait for peers for data columns")
}
// Select the peers that will be requested.
dataColumnsToFetchByPeer, err := SelectPeersToFetchDataColumnsFrom(targetRequest.Columns, dataColumnsByAdmissiblePeer)
if err != nil {
// This should never happen.
return nil, errors.Wrap(err, "select peers to fetch data columns from")
}
var roDataColumns []blocks.RODataColumn
for peer, columnsToFetch := range dataColumnsToFetchByPeer {
// Build the request.
request := &eth.DataColumnSidecarsByRangeRequest{
StartSlot: targetRequest.StartSlot,
Count: targetRequest.Count,
Columns: columnsToFetch,
}
params := DataColumnSidecarsParams{
Ctx: ctx,
Tor: clock,
P2P: p2p,
CtxMap: ctxMap,
}
peerRoDataColumns, err := SendDataColumnSidecarsByRangeRequest(params, peer, request)
if err != nil {
return nil, errors.Wrap(err, "send data column sidecars by range request")
}
roDataColumns = append(roDataColumns, peerRoDataColumns...)
}
return roDataColumns, nil
}
// waitForPeersForDataColumns returns a map, where the key of the map is the peer, the value is the custody columns of the peer.
// It uses only peers
// - synced up to `lastSlot`, and
// - have bandwidth to serve `blockCount` blocks.
// It waits until at least one peer per data column is available.
func waitForPeersForDataColumns(p2p p2p.P2P, rateLimiter *leakybucket.Collector, request *eth.DataColumnSidecarsByRangeRequest) (map[peer.ID]map[uint64]bool, error) {
const delay = 5 * time.Second
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Build nice log fields.
lastSlot := request.StartSlot.Add(request.Count).Sub(1)
var neededDataColumnsLog interface{} = "all"
neededDataColumnCount := uint64(len(request.Columns))
if neededDataColumnCount < numberOfColumns {
neededDataColumnsLog = request.Columns
}
log := log.WithFields(logrus.Fields{
"start": request.StartSlot,
"targetSlot": lastSlot,
"neededDataColumns": neededDataColumnsLog,
})
// Keep only peers with head epoch greater than or equal to the epoch corresponding to the target slot, and
// keep only peers with enough bandwidth.
filteredPeers, descriptions, err := filterPeersByTargetSlotAndBandwidth(p2p, rateLimiter, lastSlot, request.Count)
if err != nil {
return nil, errors.Wrap(err, "filter eers by target slot and bandwidth")
}
// Get the peers that are admissible for the data columns.
dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, moreDescriptions, err := AdmissiblePeersForDataColumns(filteredPeers, request.Columns, p2p)
if err != nil {
return nil, errors.Wrap(err, "admissible peers for data columns")
}
descriptions = append(descriptions, moreDescriptions...)
// Compute data columns without any peer.
dataColumnsWithoutPeers := computeDataColumnsWithoutPeers(request.Columns, admissiblePeersByDataColumn)
// Wait if no suitable peers are available.
for len(dataColumnsWithoutPeers) > 0 {
// Build a nice log fields.
var dataColumnsWithoutPeersLog interface{} = "all"
dataColumnsWithoutPeersCount := uint64(len(dataColumnsWithoutPeers))
if dataColumnsWithoutPeersCount < numberOfColumns {
dataColumnsWithoutPeersLog = sliceFromMap(dataColumnsWithoutPeers, true /*sorted*/)
}
log.WithField("columnsWithoutPeer", dataColumnsWithoutPeersLog).Warning("Fetch data columns from peers - no available peers, retrying later")
for _, description := range descriptions {
log.Debug(description)
}
for pid, peerDataColumns := range dataColumnsByAdmissiblePeer {
var peerDataColumnsLog interface{} = "all"
peerDataColumnsCount := uint64(len(peerDataColumns))
if peerDataColumnsCount < numberOfColumns {
peerDataColumnsLog = sliceFromMap(peerDataColumns, true /*sorted*/)
}
log.WithFields(logrus.Fields{
"peer": pid,
"peerDataColumns": peerDataColumnsLog,
}).Debug("Peer data columns")
}
time.Sleep(delay)
// Filter for peers with head epoch greater than or equal to our target epoch for ByRange requests.
filteredPeers, descriptions, err = filterPeersByTargetSlotAndBandwidth(p2p, rateLimiter, lastSlot, request.Count)
if err != nil {
return nil, errors.Wrap(err, "filter peers by target slot and bandwidth")
}
// Get the peers that are admissible for the data columns.
dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, moreDescriptions, err = AdmissiblePeersForDataColumns(filteredPeers, request.Columns, p2p)
if err != nil {
return nil, errors.Wrap(err, "admissible peers for data columns")
}
descriptions = append(descriptions, moreDescriptions...)
// Compute data columns without any peer.
dataColumnsWithoutPeers = computeDataColumnsWithoutPeers(request.Columns, admissiblePeersByDataColumn)
}
return dataColumnsByAdmissiblePeer, nil
}
// Filter peers to ensure they are synced to the target slot and have sufficient bandwidth to serve the request.
func filterPeersByTargetSlotAndBandwidth(p2p p2p.P2P, rateLimiter *leakybucket.Collector, lastSlot primitives.Slot, blockCount uint64) ([]peer.ID, []string, error) {
peers := p2p.Peers().Connected()
slotPeers, descriptions, err := filterPeersByTargetSlot(p2p, peers, lastSlot)
if err != nil {
return nil, nil, errors.Wrap(err, "peers with slot and data columns")
}
// Filter for peers with sufficient bandwidth to serve the request.
slotAndBandwidthPeers := hasSufficientBandwidth(rateLimiter, slotPeers, blockCount)
// Add debugging logs for the filtered peers.
peerWithSufficientBandwidthMap := make(map[peer.ID]bool, len(peers))
for _, peer := range slotAndBandwidthPeers {
peerWithSufficientBandwidthMap[peer] = true
}
for _, peer := range slotPeers {
if !peerWithSufficientBandwidthMap[peer] {
description := fmt.Sprintf("peer %s: does not have sufficient bandwidth", peer)
descriptions = append(descriptions, description)
}
}
return slotAndBandwidthPeers, descriptions, nil
}
func hasSufficientBandwidth(rateLimiter *leakybucket.Collector, peers []peer.ID, count uint64) []peer.ID {
var filteredPeers []peer.ID
for _, p := range peers {
if uint64(rateLimiter.Remaining(p.String())) < count {
continue
}
copiedP := p
filteredPeers = append(filteredPeers, copiedP)
}
return filteredPeers
}
func computeDataColumnsWithoutPeers(neededColumns []uint64, peersByColumn map[uint64][]peer.ID) map[uint64]bool {
result := make(map[uint64]bool)
for _, column := range neededColumns {
if _, ok := peersByColumn[column]; !ok {
result[column] = true
}
}
return result
}
// Filter peers with head epoch lower than our target epoch for ByRange requests.
func filterPeersByTargetSlot(p2p p2p.P2P, peers []peer.ID, targetSlot primitives.Slot) ([]peer.ID, []string, error) {
filteredPeers := make([]peer.ID, 0, len(peers))
descriptions := make([]string, 0, len(peers))
// Compute the target epoch from the target slot.
targetEpoch := slots.ToEpoch(targetSlot)
for _, peer := range peers {
peerChainState, err := p2p.Peers().ChainState(peer)
if err != nil {
description := fmt.Sprintf("peer %s: error: %s", peer, err)
descriptions = append(descriptions, description)
continue
}
if peerChainState == nil {
description := fmt.Sprintf("peer %s: chain state is nil", peer)
descriptions = append(descriptions, description)
continue
}
peerHeadEpoch := slots.ToEpoch(peerChainState.HeadSlot)
if peerHeadEpoch < targetEpoch {
description := fmt.Sprintf("peer %s: peer head epoch %d < our target epoch %d", peer, peerHeadEpoch, targetEpoch)
descriptions = append(descriptions, description)
continue
}
filteredPeers = append(filteredPeers, peer)
}
return filteredPeers, descriptions, nil
}
// itemsCount returns the total count of items
func itemsCount(missingColumnsByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) int {
count := 0
for _, columns := range missingColumnsByRoot {
count += len(columns)
}
return count
}
// uint64MapDiffer returns true if the two maps differ.
func uint64MapDiffer(left, right map[uint64]bool) bool {
if len(left) != len(right) {
return true
}
for k := range left {
if !right[k] {
return true
}
}
return false
}

File diff suppressed because it is too large Load Diff

View File

@@ -20,6 +20,7 @@ go_library(
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/das:go_default_library",
"//beacon-chain/db:go_default_library",
@@ -72,7 +73,9 @@ go_test(
deps = [
"//async/abool:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/das:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
@@ -89,6 +92,7 @@ go_test(
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -3,11 +3,13 @@ package initialsync
import (
"context"
"fmt"
"slices"
"sort"
"strings"
"sync"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
@@ -15,6 +17,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
prysmsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -34,7 +37,6 @@ import (
)
const (
// maxPendingRequests limits how many concurrent fetch request one can initiate.
maxPendingRequests = 64
// peersPercentagePerRequest caps percentage of peers to be used in a request.
@@ -78,6 +80,8 @@ type blocksFetcherConfig struct {
peerFilterCapacityWeight float64
mode syncMode
bs filesystem.BlobStorageSummarizer
dcs filesystem.DataColumnStorageReader
cv verification.NewDataColumnsVerifier
}
// blocksFetcher is a service to fetch chain data from peers.
@@ -94,6 +98,8 @@ type blocksFetcher struct {
p2p p2p.P2P
db db.ReadOnlyDatabase
bs filesystem.BlobStorageSummarizer
dcs filesystem.DataColumnStorageReader
cv verification.NewDataColumnsVerifier
blocksPerPeriod uint64
rateLimiter *leakybucket.Collector
peerLocks map[peer.ID]*peerLock
@@ -124,7 +130,7 @@ type fetchRequestResponse struct {
blobsFrom peer.ID
start primitives.Slot
count uint64
bwb []blocks.BlockWithROBlobs
bwb []blocks.BlockWithROSidecars
err error
}
@@ -162,6 +168,8 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
p2p: cfg.p2p,
db: cfg.db,
bs: cfg.bs,
dcs: cfg.dcs,
cv: cfg.cv,
blocksPerPeriod: uint64(blocksPerPeriod),
rateLimiter: rateLimiter,
peerLocks: make(map[peer.ID]*peerLock),
@@ -298,7 +306,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
response := &fetchRequestResponse{
start: start,
count: count,
bwb: []blocks.BlockWithROBlobs{},
bwb: []blocks.BlockWithROSidecars{},
err: nil,
}
@@ -317,30 +325,114 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
if f.mode == modeStopOnFinalizedEpoch {
highestFinalizedSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(targetEpoch + 1))
if start > highestFinalizedSlot {
response.err = fmt.Errorf("%w, slot: %d, highest finalized slot: %d",
errSlotIsTooHigh, start, highestFinalizedSlot)
response.err = fmt.Errorf(
"%w, slot: %d, highest finalized slot: %d",
errSlotIsTooHigh, start, highestFinalizedSlot,
)
return response
}
}
response.bwb, response.blocksFrom, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
if response.err == nil {
pid, bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.blocksFrom, peers)
pid, err := f.fetchSidecars(ctx, response.blocksFrom, peers, response.bwb)
if err != nil {
log.WithError(err).Error("Failed to fetch sidecars")
response.err = err
}
response.bwb = bwb
response.blobsFrom = pid
}
return response
}
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer.
// fetchSidecars fetches sidecars corresponding to blocks in `response.bwb`.
// It mutates `Blobs` and `Columns` fields of `response.bwb` with fetched sidecars.
// `pid` is the initial peer to request blob from (usually the peer from which the block originated),
// `peers` is a list of peers to use for the request blobs if `pid` fails.
// `bwScs` must me sorted by slot.
// It returns the peer ID from which blobs were fetched (if any).
func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []peer.ID, bwScs []blocks.BlockWithROSidecars) (peer.ID, error) {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
if len(bwScs) == 0 {
return "", nil
}
firstFuluIndex, err := findFirstFuluIndex(bwScs)
if err != nil {
return "", errors.Wrap(err, "find first Fulu index")
}
preFulu := bwScs[:firstFuluIndex]
postFulu := bwScs[firstFuluIndex:]
var blobsPid peer.ID
if len(preFulu) > 0 {
// Fetch blob sidecars.
blobsPid, err = f.fetchBlobsFromPeer(ctx, preFulu, pid, peers)
if err != nil {
return "", errors.Wrap(err, "fetch blobs from peer")
}
}
if len(postFulu) == 0 {
return blobsPid, nil
}
// Compute the columns to request.
custodyGroupCount, err := f.p2p.CustodyGroupCount()
if err != nil {
return blobsPid, errors.Wrap(err, "custody group count")
}
samplingSize := max(custodyGroupCount, samplesPerSlot)
info, _, err := peerdas.Info(f.p2p.NodeID(), samplingSize)
if err != nil {
return blobsPid, errors.Wrap(err, "custody info")
}
params := prysmsync.DataColumnSidecarsParams{
Ctx: ctx,
Tor: f.clock,
P2P: f.p2p,
RateLimiter: f.rateLimiter,
CtxMap: f.ctxMap,
Storage: f.dcs,
NewVerifier: f.cv,
}
roBlocks := make([]blocks.ROBlock, 0, len(postFulu))
for _, block := range postFulu {
roBlocks = append(roBlocks, block.Block)
}
verifiedRoDataColumnsByRoot, err := prysmsync.FetchDataColumnSidecars(params, roBlocks, info.CustodyColumns)
if err != nil {
return "", errors.Wrap(err, "fetch data column sidecars")
}
// Populate the response.
for i := range bwScs {
bwSc := &bwScs[i]
root := bwSc.Block.Root()
if columns, ok := verifiedRoDataColumnsByRoot[root]; ok {
bwSc.Columns = columns
}
}
return blobsPid, nil
}
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer, sorted by slot.
func (f *blocksFetcher) fetchBlocksFromPeer(
ctx context.Context,
start primitives.Slot, count uint64,
peers []peer.ID,
) ([]blocks.BlockWithROBlobs, peer.ID, error) {
) ([]blocks.BlockWithROSidecars, peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlocksFromPeer")
defer span.End()
@@ -355,8 +447,7 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
// peers are dialed first.
peers = append(bestPeers, peers...)
peers = dedupPeers(peers)
for i := 0; i < len(peers); i++ {
p := peers[i]
for _, p := range peers {
blocks, err := f.requestBlocks(ctx, req, p)
if err != nil {
log.WithField("peer", p).WithError(err).Debug("Could not request blocks by range from peer")
@@ -380,14 +471,14 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
return nil, "", errNoPeersAvailable
}
func sortedBlockWithVerifiedBlobSlice(bs []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROBlobs, error) {
rb := make([]blocks.BlockWithROBlobs, len(bs))
for i, b := range bs {
func sortedBlockWithVerifiedBlobSlice(blks []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROSidecars, error) {
rb := make([]blocks.BlockWithROSidecars, len(blks))
for i, b := range blks {
ro, err := blocks.NewROBlock(b)
if err != nil {
return nil, err
}
rb[i] = blocks.BlockWithROBlobs{Block: ro}
rb[i] = blocks.BlockWithROSidecars{Block: ro}
}
sort.Sort(blocks.BlockWithROBlobsSlice(rb))
return rb, nil
@@ -403,7 +494,8 @@ type commitmentCountList []commitmentCount
// countCommitments makes a list of all blocks that have commitments that need to be satisfied.
// This gives us a representation to finish building the request that is lightweight and readable for testing.
func countCommitments(bwb []blocks.BlockWithROBlobs, retentionStart primitives.Slot) commitmentCountList {
// `bwb` must be sorted by slot.
func countCommitments(bwb []blocks.BlockWithROSidecars, retentionStart primitives.Slot) commitmentCountList {
if len(bwb) == 0 {
return nil
}
@@ -485,7 +577,9 @@ func (r *blobRange) Request() *p2ppb.BlobSidecarsByRangeRequest {
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) ([]blocks.BlockWithROBlobs, error) {
// verifyAndPopulateBlobs mutate the input `bwb` argument by adding verified blobs.
// This function mutates the input `bwb` argument.
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
blobsByRoot := make(map[[32]byte][]blocks.ROBlob)
for i := range blobs {
if blobs[i].Slot() < req.StartSlot {
@@ -495,46 +589,53 @@ func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob
blobsByRoot[br] = append(blobsByRoot[br], blobs[i])
}
for i := range bwb {
bwi, err := populateBlock(bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
err := populateBlock(&bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
if err != nil {
if errors.Is(err, errDidntPopulate) {
continue
}
return bwb, err
return err
}
bwb[i] = bwi
}
return bwb, nil
return nil
}
var errDidntPopulate = errors.New("skipping population of block")
func populateBlock(bw blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) (blocks.BlockWithROBlobs, error) {
// populateBlock verifies and populates blobs for a block.
// This function mutates the input `bw` argument.
func populateBlock(bw *blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
blk := bw.Block
if blk.Version() < version.Deneb || blk.Block().Slot() < req.StartSlot {
return bw, errDidntPopulate
return errDidntPopulate
}
commits, err := blk.Block().Body().BlobKzgCommitments()
if err != nil {
return bw, errDidntPopulate
return errDidntPopulate
}
if len(commits) == 0 {
return bw, errDidntPopulate
return errDidntPopulate
}
// Drop blobs on the floor if we already have them.
if bss != nil && bss.Summary(blk.Root()).AllAvailable(len(commits)) {
return bw, errDidntPopulate
return errDidntPopulate
}
if len(commits) != len(blobs) {
return bw, missingCommitError(blk.Root(), blk.Block().Slot(), commits)
return missingCommitError(blk.Root(), blk.Block().Slot(), commits)
}
for ci := range commits {
if err := verify.BlobAlignsWithBlock(blobs[ci], blk); err != nil {
return bw, err
return err
}
}
bw.Blobs = blobs
return bw, nil
return nil
}
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
@@ -547,29 +648,38 @@ func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) e
}
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROBlobs, pid peer.ID, peers []peer.ID) (peer.ID, []blocks.BlockWithROBlobs, error) {
// This function mutates the input `bwb` argument.
// `pid` is the initial peer to request blobs from (usually the peer from which the block originated),
// `peers` is a list of peers to use for the request if `pid` fails.
// `bwb` must be sorted by slot.
// It returns the peer ID from which blobs were fetched.
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROSidecars, pid peer.ID, peers []peer.ID) (peer.ID, error) {
if len(bwb) == 0 {
return "", nil
}
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
defer span.End()
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
return "", bwb, nil
return "", nil
}
blobWindowStart, err := prysmsync.BlobRPCMinValidSlot(f.clock.CurrentSlot())
if err != nil {
return "", nil, err
return "", err
}
// Construct request message based on observed interval of blocks in need of blobs.
req := countCommitments(bwb, blobWindowStart).blobRange(f.bs).Request()
if req == nil {
return "", bwb, nil
return "", nil
}
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
// We dial the initial peer first to ensure that we get the desired set of blobs.
wantedPeers := append([]peer.ID{pid}, peers...)
bestPeers := f.hasSufficientBandwidth(wantedPeers, req.Count)
peers = append([]peer.ID{pid}, peers...)
peers = f.hasSufficientBandwidth(peers, req.Count)
// We append the best peers to the front so that higher capacity
// peers are dialed first. If all of them fail, we fallback to the
// initial peer we wanted to request blobs from.
peers = append(bestPeers, pid)
peers = append(peers, pid)
for i := 0; i < len(peers); i++ {
p := peers[i]
blobs, err := f.requestBlobs(ctx, req, p)
@@ -578,14 +688,24 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.Blo
continue
}
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
robs, err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs)
if err != nil {
if err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs); err != nil {
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlobsByRange response")
continue
}
return p, robs, err
return p, err
}
return "", nil, errNoPeersAvailable
return "", errNoPeersAvailable
}
// sortedSliceFromMap returns a sorted slice of keys from a map.
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
result := make([]uint64, 0, len(m))
for k := range m {
result = append(result, k)
}
slices.Sort(result)
return result
}
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
@@ -642,6 +762,7 @@ func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecar
}
f.rateLimiter.Add(pid.String(), int64(req.Count))
l.Unlock()
return prysmsync.SendBlobsByRangeRequest(ctx, f.clock, f.p2p, pid, f.ctxMap, req)
}
@@ -699,13 +820,17 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
}
func (f *blocksFetcher) hasSufficientBandwidth(peers []peer.ID, count uint64) []peer.ID {
filteredPeers := []peer.ID{}
for _, p := range peers {
if uint64(f.rateLimiter.Remaining(p.String())) < count {
filteredPeers := make([]peer.ID, 0, len(peers))
for _, peer := range peers {
remaining := uint64(0)
if remainingInt := f.rateLimiter.Remaining(peer.String()); remainingInt > 0 {
remaining = uint64(remainingInt)
}
if remaining < count {
continue
}
copiedP := p
filteredPeers = append(filteredPeers, copiedP)
filteredPeers = append(filteredPeers, peer)
}
return filteredPeers
}
@@ -745,3 +870,23 @@ func dedupPeers(peers []peer.ID) []peer.ID {
}
return newPeerList
}
// findFirstFuluIndex returns the index of the first block with a version >= Fulu.
// It returns an error if blocks are not correctly sorted by version regarding Fulu.
func findFirstFuluIndex(bwScs []blocks.BlockWithROSidecars) (int, error) {
firstFuluIndex := len(bwScs)
for i, bwSc := range bwScs {
blockVersion := bwSc.Block.Version()
if blockVersion >= version.Fulu && firstFuluIndex > i {
firstFuluIndex = i
continue
}
if blockVersion < version.Fulu && firstFuluIndex <= i {
return 0, errors.New("blocks are not sorted by version")
}
}
return firstFuluIndex, nil
}

View File

@@ -12,11 +12,12 @@ import (
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
p2pm "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
beaconsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -266,7 +267,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
beaconDB := dbtest.SetupDB(t)
p := p2pt.NewTestP2P(t)
p := p2ptest.NewTestP2P(t)
connectPeers(t, p, tt.peers, p.Peers())
cache.RLock()
genesisRoot := cache.rootCache[0]
@@ -307,9 +308,9 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
fetcher.stop()
}()
processFetchedBlocks := func() ([]blocks.BlockWithROBlobs, error) {
processFetchedBlocks := func() ([]blocks.BlockWithROSidecars, error) {
defer cancel()
var unionRespBlocks []blocks.BlockWithROBlobs
var unionRespBlocks []blocks.BlockWithROSidecars
for {
select {
@@ -398,6 +399,7 @@ func TestBlocksFetcher_scheduleRequest(t *testing.T) {
fetcher.scheduleRequest(t.Context(), 1, blockBatchLimit))
})
}
func TestBlocksFetcher_handleRequest(t *testing.T) {
blockBatchLimit := flags.Get().BlockBatchLimit
chainConfig := struct {
@@ -455,7 +457,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
}
}()
var bwb []blocks.BlockWithROBlobs
var bwb []blocks.BlockWithROSidecars
select {
case <-ctx.Done():
t.Error(ctx.Err())
@@ -531,9 +533,9 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
}
func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
p1 := p2pt.NewTestP2P(t)
p2 := p2pt.NewTestP2P(t)
p3 := p2pt.NewTestP2P(t)
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p3 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
p1.Connect(p3)
require.Equal(t, 2, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
@@ -543,7 +545,7 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
Count: 64,
}
topic := p2pm.RPCBlocksByRangeTopicV1
topic := p2p.RPCBlocksByRangeTopicV1
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
streamHandlerFn := func(stream network.Stream) {
assert.NoError(t, stream.Close())
@@ -602,15 +604,15 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
}
func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
p1 := p2pt.NewTestP2P(t)
p2 := p2pt.NewTestP2P(t)
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
require.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
req := &ethpb.BeaconBlocksByRangeRequest{
Count: 64,
}
topic := p2pm.RPCBlocksByRangeTopicV1
topic := p2p.RPCBlocksByRangeTopicV1
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
streamHandlerFn := func(stream network.Stream) {
assert.NoError(t, stream.Close())
@@ -638,7 +640,7 @@ func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
}
func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) {
p1 := p2pt.NewTestP2P(t)
p1 := p2ptest.NewTestP2P(t)
tests := []struct {
name string
req *ethpb.BeaconBlocksByRangeRequest
@@ -883,7 +885,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
},
}
topic := p2pm.RPCBlocksByRangeTopicV1
topic := p2p.RPCBlocksByRangeTopicV1
protocol := libp2pcore.ProtocolID(topic + p1.Encoding().ProtocolSuffix())
ctx, cancel := context.WithCancel(t.Context())
@@ -893,7 +895,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p2 := p2pt.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
p2.BHost.SetStreamHandler(protocol, tt.handlerGenFn(tt.req))
@@ -993,7 +995,7 @@ func TestBlobRangeForBlocks(t *testing.T) {
func TestBlobRequest(t *testing.T) {
var nilReq *ethpb.BlobSidecarsByRangeRequest
// no blocks
req := countCommitments([]blocks.BlockWithROBlobs{}, 0).blobRange(nil).Request()
req := countCommitments([]blocks.BlockWithROSidecars{}, 0).blobRange(nil).Request()
require.Equal(t, nilReq, req)
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
@@ -1026,22 +1028,16 @@ func TestBlobRequest(t *testing.T) {
}
func TestCountCommitments(t *testing.T) {
// no blocks
// blocks before retention start filtered
// blocks without commitments filtered
// pre-deneb filtered
// variety of commitment counts are accurate, from 1 to max
type testcase struct {
name string
bwb func(t *testing.T, c testcase) []blocks.BlockWithROBlobs
numBlocks int
retStart primitives.Slot
resCount int
name string
bwb func(t *testing.T, c testcase) []blocks.BlockWithROSidecars
retStart primitives.Slot
resCount int
}
cases := []testcase{
{
name: "nil blocks is safe",
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROBlobs {
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROSidecars {
return nil
},
retStart: 0,
@@ -1179,7 +1175,7 @@ func TestCommitmentCountList(t *testing.T) {
}
}
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROBlobs, []blocks.ROBlob) {
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROSidecars, []blocks.ROBlob) {
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
for i := range blks {
@@ -1190,7 +1186,7 @@ func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROB
return bwb, blobs
}
func testReqFromResp(bwb []blocks.BlockWithROBlobs) *ethpb.BlobSidecarsByRangeRequest {
func testReqFromResp(bwb []blocks.BlockWithROSidecars) *ethpb.BlobSidecarsByRangeRequest {
return &ethpb.BlobSidecarsByRangeRequest{
StartSlot: bwb[0].Block.Block().Slot(),
Count: uint64(bwb[len(bwb)-1].Block.Block().Slot()-bwb[0].Block.Block().Slot()) + 1,
@@ -1207,7 +1203,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
}
require.Equal(t, len(blobs), len(expectedCommits))
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
require.NoError(t, err)
for _, bw := range bwb {
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
@@ -1228,7 +1224,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
})
t.Run("missing blobs", func(t *testing.T) {
bwb, blobs := testSequenceBlockWithBlob(t, 10)
_, err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
})
t.Run("no blobs for last block", func(t *testing.T) {
@@ -1240,7 +1236,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
blobs = blobs[0 : len(blobs)-len(cmts)]
lastBlk, _ = util.GenerateTestDenebBlockWithSidecar(t, lastBlk.Block().ParentRoot(), lastBlk.Block().Slot(), 0)
bwb[lastIdx].Block = lastBlk
_, err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
require.NoError(t, err)
})
t.Run("blobs not copied if all locally available", func(t *testing.T) {
@@ -1254,7 +1250,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
r7: {0, 1, 2, 3, 4, 5},
}
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
require.NoError(t, err)
require.Equal(t, 6, len(bwb[i1].Blobs))
require.Equal(t, 0, len(bwb[i7].Blobs))
@@ -1302,3 +1298,203 @@ func TestBlockFetcher_HasSufficientBandwidth(t *testing.T) {
}
assert.Equal(t, 2, len(receivedPeers))
}
func TestSortedSliceFromMap(t *testing.T) {
m := map[uint64]bool{1: true, 3: true, 2: true, 4: true}
expected := []uint64{1, 2, 3, 4}
actual := sortedSliceFromMap(m)
require.DeepSSZEqual(t, expected, actual)
}
func TestFetchSidecars(t *testing.T) {
ctx := t.Context()
t.Run("No blocks", func(t *testing.T) {
fetcher := new(blocksFetcher)
pid, err := fetcher.fetchSidecars(ctx, "", nil, []blocks.BlockWithROSidecars{})
assert.NoError(t, err)
assert.Equal(t, peer.ID(""), pid)
})
t.Run("Nominal", func(t *testing.T) {
beaconConfig := params.BeaconConfig()
numberOfColumns := beaconConfig.NumberOfColumns
samplesPerSlot := beaconConfig.SamplesPerSlot
// Define "now" to be one epoch after genesis time + retention period.
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
secondsPerSlot := beaconConfig.SecondsPerSlot
slotsPerEpoch := beaconConfig.SlotsPerEpoch
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
nowWrtGenesisSecs := retentionEpochs.Add(1).Mul(secondsPerEpoch)
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
genesisValidatorRoot := [fieldparams.RootLength]byte{}
nower := func() time.Time { return now }
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
// Define a Deneb block with blobs out of retention period.
denebBlock := util.NewBeaconBlockDeneb()
denebBlock.Block.Slot = 0 // Genesis slot, out of retention period.
signedDenebBlock, err := blocks.NewSignedBeaconBlock(denebBlock)
require.NoError(t, err)
roDebebBlock, err := blocks.NewROBlock(signedDenebBlock)
require.NoError(t, err)
// Define a Fulu block with blobs in the retention period.
fuluBlock := util.NewBeaconBlockFulu()
fuluBlock.Block.Slot = slotsPerEpoch // Within retention period.
fuluBlock.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
require.NoError(t, err)
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
require.NoError(t, err)
bodyRoot, err := fuluBlock.Block.Body.HashTreeRoot()
require.NoError(t, err)
// Create and save data column sidecars for this fulu block in the database.
params := make([]util.DataColumnParam, 0, numberOfColumns)
for i := range numberOfColumns {
param := util.DataColumnParam{Index: i, Slot: slotsPerEpoch, BodyRoot: bodyRoot[:]}
params = append(params, param)
}
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
// Create a data columns storage.
dir := t.TempDir()
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
require.NoError(t, err)
// Save the data column sidecars to the storage.
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.NoError(t, err)
// Create a blocks fetcher.
fetcher := &blocksFetcher{
clock: clock,
p2p: p2ptest.NewTestP2P(t),
dcs: dataColumnStorage,
}
// Fetch sidecars.
blocksWithSidecars := []blocks.BlockWithROSidecars{
{Block: roDebebBlock},
{Block: roFuluBlock},
}
pid, err := fetcher.fetchSidecars(ctx, "", nil, blocksWithSidecars)
require.NoError(t, err)
require.Equal(t, peer.ID(""), pid)
// Verify that block with sidecars were modified correctly.
require.Equal(t, 0, len(blocksWithSidecars[0].Blobs))
require.Equal(t, 0, len(blocksWithSidecars[0].Columns))
require.Equal(t, 0, len(blocksWithSidecars[1].Blobs))
// We don't check the content of the columns here. The extensive test is done
// in TestFetchDataColumnsSidecars.
require.Equal(t, samplesPerSlot, uint64(len(blocksWithSidecars[1].Columns)))
})
}
func TestFirstFuluIndex(t *testing.T) {
bellatrix := util.NewBeaconBlockBellatrix()
signedBellatrix, err := blocks.NewSignedBeaconBlock(bellatrix)
require.NoError(t, err)
roBellatrix, err := blocks.NewROBlock(signedBellatrix)
require.NoError(t, err)
capella := util.NewBeaconBlockCapella()
signedCapella, err := blocks.NewSignedBeaconBlock(capella)
require.NoError(t, err)
roCapella, err := blocks.NewROBlock(signedCapella)
require.NoError(t, err)
deneb := util.NewBeaconBlockDeneb()
signedDeneb, err := blocks.NewSignedBeaconBlock(deneb)
require.NoError(t, err)
roDeneb, err := blocks.NewROBlock(signedDeneb)
require.NoError(t, err)
fulu := util.NewBeaconBlockFulu()
signedFulu, err := blocks.NewSignedBeaconBlock(fulu)
require.NoError(t, err)
roFulu, err := blocks.NewROBlock(signedFulu)
require.NoError(t, err)
tests := []struct {
name string
setupBlocks func(t *testing.T) []blocks.BlockWithROSidecars
expectedIndex int
expectError bool
}{
{
name: "all blocks are pre-Fulu",
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
return []blocks.BlockWithROSidecars{
{Block: roBellatrix},
{Block: roCapella},
{Block: roDeneb},
}
},
expectedIndex: 3, // Should be the length of the slice
expectError: false,
},
{
name: "all blocks are Fulu or later",
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
return []blocks.BlockWithROSidecars{
{Block: roFulu},
{Block: roFulu},
}
},
expectedIndex: 0,
expectError: false,
},
{
name: "mixed blocks correctly sorted",
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
return []blocks.BlockWithROSidecars{
{Block: roBellatrix},
{Block: roCapella},
{Block: roDeneb},
{Block: roFulu},
{Block: roFulu},
}
},
expectedIndex: 3, // Index where Fulu blocks start
expectError: false,
},
{
name: "mixed blocks incorrectly sorted",
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
return []blocks.BlockWithROSidecars{
{Block: roBellatrix},
{Block: roCapella},
{Block: roFulu},
{Block: roDeneb},
{Block: roFulu},
}
},
expectedIndex: 0,
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blocks := tt.setupBlocks(t)
index, err := findFirstFuluIndex(blocks)
if tt.expectError {
require.NotNil(t, err)
return
}
require.NoError(t, err)
require.Equal(t, tt.expectedIndex, index)
})
}
}

View File

@@ -24,7 +24,7 @@ import (
type forkData struct {
blocksFrom peer.ID
blobsFrom peer.ID
bwb []blocks.BlockWithROBlobs
bwb []blocks.BlockWithROSidecars
}
// nonSkippedSlotAfter checks slots after the given one in an attempt to find a non-empty future slot.
@@ -275,16 +275,18 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
"slot": block.Block().Slot(),
"root": fmt.Sprintf("%#x", parentRoot),
}).Debug("Block with unknown parent root has been found")
altBlocks, err := sortedBlockWithVerifiedBlobSlice(blocks[i-1:])
bwb, err := sortedBlockWithVerifiedBlobSlice(blocks[i-1:])
if err != nil {
return nil, errors.Wrap(err, "invalid blocks received in findForkWithPeer")
}
// We need to fetch the blobs for the given alt-chain if any exist, so that we can try to verify and import
// the blocks.
bpid, bwb, err := f.fetchBlobsFromPeer(ctx, altBlocks, pid, []peer.ID{pid})
bpid, err := f.fetchSidecars(ctx, pid, []peer.ID{pid}, bwb)
if err != nil {
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findForkWithPeer")
return nil, errors.Wrap(err, "fetch sidecars")
}
// The caller will use the BlocksWith VerifiedBlobs in bwb as the starting point for
// round-robin syncing the alternate chain.
return &forkData{blocksFrom: pid, blobsFrom: bpid, bwb: bwb}, nil
@@ -303,10 +305,9 @@ func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, b interfa
if err != nil {
return nil, errors.Wrap(err, "received invalid blocks in findAncestor")
}
var bpid peer.ID
bpid, bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid, []peer.ID{pid})
bpid, err := f.fetchSidecars(ctx, pid, []peer.ID{pid}, bwb)
if err != nil {
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findAncestor")
return nil, errors.Wrap(err, "fetch sidecars")
}
return &forkData{
blocksFrom: pid,
@@ -350,9 +351,12 @@ func (f *blocksFetcher) calculateHeadAndTargetEpochs() (headEpoch, targetEpoch p
cp := f.chain.FinalizedCheckpt()
headEpoch = cp.Epoch
targetEpoch, peers = f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch)
} else {
headEpoch = slots.ToEpoch(f.chain.HeadSlot())
targetEpoch, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
return headEpoch, targetEpoch, peers
}
headEpoch = slots.ToEpoch(f.chain.HeadSlot())
targetEpoch, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
return headEpoch, targetEpoch, peers
}
}

View File

@@ -371,13 +371,13 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
t.Run("slot is too early", func(t *testing.T) {
p2 := p2pt.NewTestP2P(t)
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 0)
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), nil, 0)
assert.ErrorContains(t, "slot is too low to backtrack", err)
})
t.Run("no peer status", func(t *testing.T) {
p2 := p2pt.NewTestP2P(t)
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 64)
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), nil, 64)
assert.ErrorContains(t, "cannot obtain peer's status", err)
})
@@ -391,7 +391,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
HeadRoot: nil,
HeadSlot: 0,
})
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 64)
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), nil, 64)
assert.ErrorContains(t, "cannot locate non-empty slot for a peer", err)
})
@@ -401,7 +401,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
defer func() {
assert.NoError(t, p1.Disconnect(p2))
}()
_, err := fetcher.findForkWithPeer(ctx, p2, 64)
_, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
assert.ErrorContains(t, "no alternative blocks exist within scanned range", err)
})
@@ -413,7 +413,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
defer func() {
assert.NoError(t, p1.Disconnect(p2))
}()
fork, err := fetcher.findForkWithPeer(ctx, p2, 64)
fork, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
require.NoError(t, err)
require.Equal(t, 10, len(fork.bwb))
assert.Equal(t, forkedSlot, fork.bwb[0].Block.Block().Slot(), "Expected slot %d to be ancestor", forkedSlot)
@@ -426,7 +426,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
defer func() {
assert.NoError(t, p1.Disconnect(p2))
}()
_, err := fetcher.findForkWithPeer(ctx, p2, 64)
_, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
require.ErrorContains(t, "failed to find common ancestor", err)
})
@@ -438,7 +438,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
defer func() {
assert.NoError(t, p1.Disconnect(p2))
}()
fork, err := fetcher.findForkWithPeer(ctx, p2, 64)
fork, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
require.NoError(t, err)
reqEnd := testForkStartSlot(t, 64) + primitives.Slot(findForkReqRangeSize())
@@ -512,7 +512,7 @@ func TestBlocksFetcher_findAncestor(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[4])
require.NoError(t, err)
_, err = fetcher.findAncestor(ctx, p2.PeerID(), wsb)
_, err = fetcher.findAncestor(ctx, p2.PeerID(), nil, wsb)
assert.ErrorContains(t, "protocols not supported", err)
})
@@ -525,7 +525,7 @@ func TestBlocksFetcher_findAncestor(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[4])
require.NoError(t, err)
fork, err := fetcher.findAncestor(ctx, p2.PeerID(), wsb)
fork, err := fetcher.findAncestor(ctx, p2.PeerID(), nil, wsb)
assert.ErrorContains(t, "no common ancestor found", err)
assert.Equal(t, (*forkData)(nil), fork)
})

View File

@@ -72,6 +72,8 @@ type blocksQueueConfig struct {
db db.ReadOnlyDatabase
mode syncMode
bs filesystem.BlobStorageSummarizer
dcs filesystem.DataColumnStorageReader
cv verification.NewDataColumnsVerifier
}
// blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers)
@@ -96,7 +98,7 @@ type blocksQueue struct {
type blocksQueueFetchedData struct {
blocksFrom peer.ID
blobsFrom peer.ID
bwb []blocks.BlockWithROBlobs
bwb []blocks.BlockWithROSidecars
}
// newBlocksQueue creates initialized priority queue.
@@ -115,6 +117,8 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
db: cfg.db,
clock: cfg.clock,
bs: cfg.bs,
dcs: cfg.dcs,
cv: cfg.cv,
})
}
highestExpectedSlot := cfg.highestExpectedSlot
@@ -479,4 +483,4 @@ func onCheckStaleEvent(ctx context.Context) eventHandlerFn {
return stateSkipped, nil
}
}
}

View File

@@ -263,7 +263,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
highestExpectedSlot: tt.highestExpectedSlot,
})
assert.NoError(t, queue.start())
processBlock := func(b blocks.BlockWithROBlobs) error {
processBlock := func(b blocks.BlockWithROSidecars) error {
block := b.Block
if !beaconDB.HasBlock(ctx, block.Block().ParentRoot()) {
return fmt.Errorf("%w: %#x", errParentDoesNotExist, block.Block().ParentRoot())
@@ -275,7 +275,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
return mc.ReceiveBlock(ctx, block, root, nil)
}
var blocks []blocks.BlockWithROBlobs
var blocks []blocks.BlockWithROSidecars
for data := range queue.fetchedData {
for _, b := range data.bwb {
if err := processBlock(b); err != nil {
@@ -538,7 +538,7 @@ func TestBlocksQueue_onDataReceivedEvent(t *testing.T) {
require.NoError(t, err)
response := &fetchRequestResponse{
blocksFrom: "abc",
bwb: []blocks.BlockWithROBlobs{
bwb: []blocks.BlockWithROSidecars{
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsb}},
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsbCopy}},
},
@@ -640,7 +640,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
queue.smm.machines[256].fetched.blocksFrom = pidDataParsed
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
queue.smm.machines[256].fetched.bwb = []blocks.BlockWithROBlobs{
queue.smm.machines[256].fetched.bwb = []blocks.BlockWithROSidecars{
{Block: rwsb},
}
@@ -674,7 +674,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROBlobs{
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROSidecars{
{Block: rwsb},
}
@@ -705,7 +705,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROBlobs{
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROSidecars{
{Block: rwsb},
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/hex"
"fmt"
"sort"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
@@ -13,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/paulbellamy/ratecounter"
@@ -78,6 +80,8 @@ func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.S
highestExpectedSlot: highestSlot,
mode: mode,
bs: s.cfg.BlobStorage,
dcs: s.cfg.DataColumnStorage,
cv: s.newDataColumnsVerifier,
}
queue := newBlocksQueue(ctx, cfg)
if err := queue.start(); err != nil {
@@ -157,31 +161,82 @@ func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQue
log.WithError(err).Debug("Batch did not contain a valid sequence of unprocessed blocks")
return 0, err
}
if len(bwb) == 0 {
return 0, nil
}
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
batchFields := logrus.Fields{
"firstSlot": data.bwb[0].Block.Block().Slot(),
"firstUnprocessed": bwb[0].Block.Block().Slot(),
// Separate blocks with blobs from blocks with data columns.
fistDataColumnIndex := sort.Search(len(bwb), func(i int) bool {
return bwb[i].Block.Version() >= version.Fulu
})
blocksWithBlobs := bwb[:fistDataColumnIndex]
blocksWithDataColumns := bwb[fistDataColumnIndex:]
blobBatchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
lazilyPersistentStoreBlobs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, blobBatchVerifier)
log := log.WithField("firstSlot", data.bwb[0].Block.Block().Slot())
logBlobs, logDataColumns := log, log
if len(blocksWithBlobs) > 0 {
logBlobs = logBlobs.WithField("firstUnprocessed", blocksWithBlobs[0].Block.Block().Slot())
}
for i, b := range bwb {
sidecars := blocks.NewSidecarsFromBlobSidecars(b.Blobs)
if err := avs.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to BlobSidecar issues")
for i, b := range blocksWithBlobs {
if err := lazilyPersistentStoreBlobs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warning("Batch failure due to BlobSidecar issues")
return uint64(i), err
}
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, avs); err != nil {
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, lazilyPersistentStoreBlobs); err != nil {
if errors.Is(err, errParentDoesNotExist) {
log.WithFields(batchFields).WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
logBlobs.WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
} else {
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Block processing failure")
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warn("Block processing failure")
}
return uint64(i), err
}
}
if len(blocksWithDataColumns) == 0 {
return uint64(len(bwb)), nil
}
// Save data column sidecars.
count := 0
for _, b := range blocksWithDataColumns {
count += len(b.Columns)
}
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
for _, blockWithDataColumns := range blocksWithDataColumns {
sidecarsToSave = append(sidecarsToSave, blockWithDataColumns.Columns...)
}
if err := s.cfg.DataColumnStorage.Save(sidecarsToSave); err != nil {
return 0, errors.Wrap(err, "save data column sidecars")
}
for i, b := range blocksWithDataColumns {
logDataColumns := logDataColumns.WithFields(syncFields(b.Block))
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, nil); err != nil {
switch {
case errors.Is(err, errParentDoesNotExist):
logDataColumns.
WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
Debug("Could not process batch blocks due to missing parent")
return uint64(i), err
default:
logDataColumns.WithError(err).Warning("Block processing failure")
return uint64(i), err
}
}
}
return uint64(len(bwb)), nil
}
@@ -193,12 +248,18 @@ func syncFields(b blocks.ROBlock) logrus.Fields {
}
// highestFinalizedEpoch returns the absolute highest finalized epoch of all connected peers.
// Note this can be lower than our finalized epoch if we have no peers or peers that are all behind us.
// It returns `0` if no peers are connected.
// Note this can be lower than our finalized epoch if our connected peers are all behind us.
func (s *Service) highestFinalizedEpoch() primitives.Epoch {
highest := primitives.Epoch(0)
for _, pid := range s.cfg.P2P.Peers().Connected() {
peerChainState, err := s.cfg.P2P.Peers().ChainState(pid)
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch > highest {
if err != nil || peerChainState == nil {
continue
}
if peerChainState.FinalizedEpoch > highest {
highest = peerChainState.FinalizedEpoch
}
}
@@ -250,7 +311,7 @@ func (s *Service) logBatchSyncStatus(firstBlk blocks.ROBlock, nBlocks int) {
func (s *Service) processBlock(
ctx context.Context,
genesis time.Time,
bwb blocks.BlockWithROBlobs,
bwb blocks.BlockWithROSidecars,
blockReceiver blockReceiverFn,
avs das.AvailabilityStore,
) error {
@@ -269,7 +330,7 @@ func (s *Service) processBlock(
type processedChecker func(context.Context, blocks.ROBlock) bool
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROBlobs, error) {
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROSidecars, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROSidecars, error) {
// use a pointer to avoid confusing the zero-value with the case where the first element is processed.
var processed *int
for i := range bwb {
@@ -299,43 +360,100 @@ func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSl
return bwb[nonProcessedIdx:], nil
}
func (s *Service) processBatchedBlocks(ctx context.Context, bwb []blocks.BlockWithROBlobs, bFunc batchBlockReceiverFn) (uint64, error) {
if len(bwb) == 0 {
func (s *Service) processBatchedBlocks(ctx context.Context, bwb []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn) (uint64, error) {
bwbCount := uint64(len(bwb))
if bwbCount == 0 {
return 0, errors.New("0 blocks provided into method")
}
headSlot := s.cfg.Chain.HeadSlot()
var err error
bwb, err = validUnprocessed(ctx, bwb, headSlot, s.isProcessedBlock)
bwb, err := validUnprocessed(ctx, bwb, headSlot, s.isProcessedBlock)
if err != nil {
return 0, err
}
if len(bwb) == 0 {
return 0, nil
}
first := bwb[0].Block
if !s.cfg.Chain.HasBlock(ctx, first.Block().ParentRoot()) {
firstBlock := bwb[0].Block
if !s.cfg.Chain.HasBlock(ctx, firstBlock.Block().ParentRoot()) {
return 0, fmt.Errorf("%w: %#x (in processBatchedBlocks, slot=%d)",
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
errParentDoesNotExist, firstBlock.Block().ParentRoot(), firstBlock.Block().Slot())
}
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
s.logBatchSyncStatus(first, len(bwb))
for _, bb := range bwb {
if len(bb.Blobs) == 0 {
firstFuluIndex, err := findFirstFuluIndex(bwb)
if err != nil {
return 0, errors.Wrap(err, "finding first Fulu index")
}
blocksWithBlobs := bwb[:firstFuluIndex]
blocksWithDataColumns := bwb[firstFuluIndex:]
if err := s.processBlocksWithBlobs(ctx, blocksWithBlobs, bFunc, firstBlock); err != nil {
return 0, errors.Wrap(err, "processing blocks with blobs")
}
if err := s.processBlocksWithDataColumns(ctx, blocksWithDataColumns, bFunc, firstBlock); err != nil {
return 0, errors.Wrap(err, "processing blocks with data columns")
}
return bwbCount, nil
}
func (s *Service) processBlocksWithBlobs(ctx context.Context, bwbs []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn, firstBlock blocks.ROBlock) error {
bwbCount := len(bwbs)
if bwbCount == 0 {
return nil
}
batchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
persistentStore := das.NewLazilyPersistentStore(s.cfg.BlobStorage, batchVerifier)
s.logBatchSyncStatus(firstBlock, bwbCount)
for _, bwb := range bwbs {
if len(bwb.Blobs) == 0 {
continue
}
sidecars := blocks.NewSidecarsFromBlobSidecars(bb.Blobs)
if err := avs.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
return 0, err
if err := persistentStore.Persist(s.clock.CurrentSlot(), bwb.Blobs...); err != nil {
return errors.Wrap(err, "persisting blobs")
}
}
robs := blocks.BlockWithROBlobsSlice(bwb).ROBlocks()
return uint64(len(bwb)), bFunc(ctx, robs, avs)
robs := blocks.BlockWithROBlobsSlice(bwbs).ROBlocks()
if err := bFunc(ctx, robs, persistentStore); err != nil {
return errors.Wrap(err, "processing blocks with blobs")
}
return nil
}
func (s *Service) processBlocksWithDataColumns(ctx context.Context, bwbs []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn, firstBlock blocks.ROBlock) error {
bwbCount := len(bwbs)
if bwbCount == 0 {
return nil
}
s.logBatchSyncStatus(firstBlock, bwbCount)
// Save data column sidecars.
count := 0
for _, bwb := range bwbs {
count += len(bwb.Columns)
}
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
for _, blockWithDataColumns := range bwbs {
sidecarsToSave = append(sidecarsToSave, blockWithDataColumns.Columns...)
}
if err := s.cfg.DataColumnStorage.Save(sidecarsToSave); err != nil {
return errors.Wrap(err, "save data column sidecars")
}
robs := blocks.BlockWithROBlobsSlice(bwbs).ROBlocks()
if err := bFunc(ctx, robs, nil); err != nil {
return errors.Wrap(err, "process post-Fulu blocks")
}
return nil
}
func isPunishableError(err error) bool {
@@ -380,4 +498,4 @@ func (s *Service) isProcessedBlock(ctx context.Context, blk blocks.ROBlock) bool
func (s *Service) downscorePeer(peerID peer.ID, reason string) {
newScore := s.cfg.P2P.Peers().Scorers().BadResponsesScorer().Increment(peerID)
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
}
}

View File

@@ -8,9 +8,11 @@ import (
"github.com/OffchainLabs/prysm/v6/async/abool"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -308,7 +310,7 @@ func TestService_roundRobinSync(t *testing.T) {
} // no-op mock
clock := startup.NewClock(gt, vr)
s := &Service{
ctx: t.Context(),
ctx: context.Background(),
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB},
synced: abool.New(),
chainStarted: abool.NewBool(true),
@@ -373,7 +375,7 @@ func TestService_processBlock(t *testing.T) {
require.NoError(t, err)
rowsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
return nil
@@ -385,7 +387,7 @@ func TestService_processBlock(t *testing.T) {
require.NoError(t, err)
rowsb, err = blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
return nil
}, nil)
@@ -396,7 +398,7 @@ func TestService_processBlock(t *testing.T) {
require.NoError(t, err)
rowsb, err = blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
return nil
@@ -432,7 +434,7 @@ func TestService_processBlockBatch(t *testing.T) {
s.genesisTime = genesis
t.Run("process non-linear batch", func(t *testing.T) {
var batch []blocks.BlockWithROBlobs
var batch []blocks.BlockWithROSidecars
currBlockRoot := genesisBlkRoot
for i := primitives.Slot(1); i < 10; i++ {
parentRoot := currBlockRoot
@@ -446,11 +448,11 @@ func TestService_processBlockBatch(t *testing.T) {
require.NoError(t, err)
rowsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
batch = append(batch, blocks.BlockWithROSidecars{Block: rowsb})
currBlockRoot = blk1Root
}
var batch2 []blocks.BlockWithROBlobs
var batch2 []blocks.BlockWithROSidecars
for i := primitives.Slot(10); i < 20; i++ {
parentRoot := currBlockRoot
blk1 := util.NewBeaconBlock()
@@ -463,7 +465,7 @@ func TestService_processBlockBatch(t *testing.T) {
require.NoError(t, err)
rowsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
batch2 = append(batch2, blocks.BlockWithROBlobs{Block: rowsb})
batch2 = append(batch2, blocks.BlockWithROSidecars{Block: rowsb})
currBlockRoot = blk1Root
}
@@ -485,7 +487,7 @@ func TestService_processBlockBatch(t *testing.T) {
assert.ErrorContains(t, "block is already processed", err)
require.Equal(t, uint64(0), count)
var badBatch2 []blocks.BlockWithROBlobs
var badBatch2 []blocks.BlockWithROSidecars
for i, b := range batch2 {
// create a non-linear batch
if i%3 == 0 && i != 0 {
@@ -568,7 +570,7 @@ func TestService_blockProviderScoring(t *testing.T) {
} // no-op mock
clock := startup.NewClock(gt, vr)
s := &Service{
ctx: t.Context(),
ctx: context.Background(),
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB},
synced: abool.New(),
chainStarted: abool.NewBool(true),
@@ -637,7 +639,7 @@ func TestService_syncToFinalizedEpoch(t *testing.T) {
ValidatorsRoot: vr,
}
s := &Service{
ctx: t.Context(),
ctx: context.Background(),
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB},
synced: abool.New(),
chainStarted: abool.NewBool(true),
@@ -685,7 +687,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
require.NoError(t, err)
util.SaveBlock(t, t.Context(), beaconDB, genesisBlk)
var batch []blocks.BlockWithROBlobs
var batch []blocks.BlockWithROSidecars
currBlockRoot := genesisBlkRoot
for i := primitives.Slot(1); i < 10; i++ {
parentRoot := currBlockRoot
@@ -699,7 +701,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
require.NoError(t, err)
rowsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
batch = append(batch, blocks.BlockWithROSidecars{Block: rowsb})
currBlockRoot = blk1Root
}
@@ -712,3 +714,155 @@ func TestService_ValidUnprocessed(t *testing.T) {
// Ensure that the unprocessed batch is returned correctly.
assert.Equal(t, len(retBlocks), len(batch)-2)
}
func TestService_PropcessFetchedDataRegSync(t *testing.T) {
ctx := t.Context()
// Create a data columns storage.
dir := t.TempDir()
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
require.NoError(t, err)
// Create Fulu blocks.
fuluBlock1 := util.NewBeaconBlockFulu()
signedFuluBlock1, err := blocks.NewSignedBeaconBlock(fuluBlock1)
require.NoError(t, err)
roFuluBlock1, err := blocks.NewROBlock(signedFuluBlock1)
require.NoError(t, err)
block1Root := roFuluBlock1.Root()
fuluBlock2 := util.NewBeaconBlockFulu()
fuluBlock2.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
fuluBlock2.Block.Slot = 1
fuluBlock2.Block.ParentRoot = block1Root[:]
signedFuluBlock2, err := blocks.NewSignedBeaconBlock(fuluBlock2)
require.NoError(t, err)
roFuluBlock2, err := blocks.NewROBlock(signedFuluBlock2)
require.NoError(t, err)
block2Root := roFuluBlock2.Root()
parentRoot2 := roFuluBlock2.Block().ParentRoot()
bodyRoot2, err := roFuluBlock2.Block().Body().HashTreeRoot()
require.NoError(t, err)
// Create a mock chain service.
const validatorCount = uint64(64)
state, _ := util.DeterministicGenesisState(t, validatorCount)
chain := &mock.ChainService{
FinalizedCheckPoint: &eth.Checkpoint{},
DB: dbtest.SetupDB(t),
State: state,
Root: block1Root[:],
}
// Create a new service instance.
service := &Service{
cfg: &Config{
Chain: chain,
DataColumnStorage: dataColumnStorage,
},
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
}
// Save the parent block in the database.
err = chain.DB.SaveBlock(ctx, roFuluBlock1)
require.NoError(t, err)
// Create data column sidecars.
const count = uint64(3)
params := make([]util.DataColumnParam, 0, count)
for i := range count {
param := util.DataColumnParam{Index: i, BodyRoot: bodyRoot2[:], ParentRoot: parentRoot2[:], Slot: roFuluBlock2.Block().Slot()}
params = append(params, param)
}
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
blocksWithSidecars := []blocks.BlockWithROSidecars{
{Block: roFuluBlock2, Columns: verifiedRoDataColumnSidecars},
}
data := &blocksQueueFetchedData{
bwb: blocksWithSidecars,
}
actual, err := service.processFetchedDataRegSync(ctx, data)
require.NoError(t, err)
require.Equal(t, uint64(1), actual)
// Check block and data column sidecars were saved correctly.
require.Equal(t, true, chain.DB.HasBlock(ctx, block2Root))
summary := dataColumnStorage.Summary(block2Root)
for i := range count {
require.Equal(t, true, summary.HasIndex(i))
}
}
func TestService_processBlocksWithDataColumns(t *testing.T) {
ctx := t.Context()
t.Run("no blocks", func(t *testing.T) {
fuluBlock := util.NewBeaconBlockFulu()
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
require.NoError(t, err)
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
require.NoError(t, err)
service := new(Service)
err = service.processBlocksWithDataColumns(ctx, nil, nil, roFuluBlock)
require.NoError(t, err)
})
t.Run("nominal", func(t *testing.T) {
fuluBlock := util.NewBeaconBlockFulu()
fuluBlock.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
require.NoError(t, err)
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
require.NoError(t, err)
bodyRoot, err := roFuluBlock.Block().Body().HashTreeRoot()
require.NoError(t, err)
// Create data column sidecars.
const count = uint64(3)
params := make([]util.DataColumnParam, 0, count)
for i := range count {
param := util.DataColumnParam{Index: i, BodyRoot: bodyRoot[:]}
params = append(params, param)
}
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
blocksWithSidecars := []blocks.BlockWithROSidecars{
{Block: roFuluBlock, Columns: verifiedRoDataColumnSidecars},
}
// Create a data columns storage.
dir := t.TempDir()
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
require.NoError(t, err)
// Create a service.
service := &Service{
cfg: &Config{
P2P: p2pt.NewTestP2P(t),
DataColumnStorage: dataColumnStorage,
},
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
}
receiverFunc := func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error {
require.Equal(t, 1, len(blks))
return nil
}
err = service.processBlocksWithDataColumns(ctx, blocksWithSidecars, receiverFunc, roFuluBlock)
require.NoError(t, err)
// Verify that the data columns were saved correctly.
summary := dataColumnStorage.Summary(roFuluBlock.Root())
for i := range count {
require.Equal(t, true, summary.HasIndex(i))
}
})
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
@@ -53,22 +54,24 @@ type Config struct {
ClockWaiter startup.ClockWaiter
InitialSyncComplete chan struct{}
BlobStorage *filesystem.BlobStorage
DataColumnStorage *filesystem.DataColumnStorage
}
// Service service.
type Service struct {
cfg *Config
ctx context.Context
cancel context.CancelFunc
synced *abool.AtomicBool
chainStarted *abool.AtomicBool
counter *ratecounter.RateCounter
genesisChan chan time.Time
clock *startup.Clock
verifierWaiter *verification.InitializerWaiter
newBlobVerifier verification.NewBlobVerifier
ctxMap sync.ContextByteVersions
genesisTime time.Time
cfg *Config
ctx context.Context
cancel context.CancelFunc
synced *abool.AtomicBool
chainStarted *abool.AtomicBool
counter *ratecounter.RateCounter
genesisChan chan time.Time
clock *startup.Clock
verifierWaiter *verification.InitializerWaiter
newBlobVerifier verification.NewBlobVerifier
newDataColumnsVerifier verification.NewDataColumnsVerifier
ctxMap sync.ContextByteVersions
genesisTime time.Time
}
// Option is a functional option for the initial-sync Service.
@@ -149,6 +152,7 @@ func (s *Service) Start() {
return
}
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
s.newDataColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
gt := clock.GenesisTime()
if gt.IsZero() {
@@ -175,19 +179,22 @@ func (s *Service) Start() {
}
s.chainStarted.Set()
log.Info("Starting initial chain sync...")
// Are we already in sync, or close to it?
if slots.ToEpoch(s.cfg.Chain.HeadSlot()) == slots.ToEpoch(currentSlot) {
log.Info("Already synced to the current chain head")
s.markSynced()
return
}
peers, err := s.waitForMinimumPeers()
if err != nil {
log.WithError(err).Error("Error waiting for minimum number of peers")
return
}
if err := s.fetchOriginBlobs(peers); err != nil {
log.WithError(err).Error("Failed to fetch missing blobs for checkpoint origin")
if err := s.fetchOriginSidecars(peers); err != nil {
log.WithError(err).Error("Error fetching origin sidecars")
return
}
if err := s.roundRobinSync(); err != nil {
@@ -200,6 +207,48 @@ func (s *Service) Start() {
s.markSynced()
}
// fetchOriginSidecars fetches origin sidecars
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
return nil
}
block, err := s.cfg.DB.Block(s.ctx, blockRoot)
if err != nil {
return errors.Wrap(err, "block")
}
currentSlot, blockSlot := s.clock.CurrentSlot(), block.Block().Slot()
currentEpoch, blockEpoch := slots.ToEpoch(currentSlot), slots.ToEpoch(blockSlot)
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return nil
}
roBlock, err := blocks.NewROBlockWithRoot(block, blockRoot)
if err != nil {
return errors.Wrap(err, "new ro block with root")
}
blockVersion := roBlock.Version()
if blockVersion >= version.Fulu {
if err := s.fetchOriginColumns(peers, roBlock); err != nil {
return errors.Wrap(err, "fetch origin columns")
}
return nil
}
if blockVersion >= version.Deneb {
if err := s.fetchOriginBlobs(peers, roBlock); err != nil {
return errors.Wrap(err, "fetch origin blobs")
}
}
return nil
}
// Stop initial sync.
func (s *Service) Stop() error {
s.cancel()
@@ -304,23 +353,9 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt
return req, nil
}
func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
r, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
return nil
}
blk, err := s.cfg.DB.Block(s.ctx, r)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", r)).Error("Block for checkpoint sync origin root not found in db")
return err
}
if !params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(s.clock.CurrentSlot())) {
return nil
}
rob, err := blocks.NewROBlockWithRoot(blk, r)
if err != nil {
return err
}
func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
r := rob.Root()
req, err := missingBlobRequest(rob, s.cfg.BlobStorage)
if err != nil {
return err
@@ -335,16 +370,17 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
if err != nil {
continue
}
if len(blobSidecars) != len(req) {
continue
}
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
current := s.clock.CurrentSlot()
sidecars := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
if err := avs.Persist(current, sidecars...); err != nil {
if err := avs.Persist(current, blobSidecars...); err != nil {
return err
}
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
log.WithField("root", fmt.Sprintf("%#x", r)).WithField("peerID", pids[i]).Warn("Blobs from peer for origin block were unusable")
continue
@@ -355,6 +391,67 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
}
func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) error {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// Return early if the origin block has no blob commitments.
commitments, err := roBlock.Block().Body().BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "fetch blob commitments")
}
if len(commitments) == 0 {
return nil
}
// Compute the columns to request.
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
if err != nil {
return errors.Wrap(err, "custody group count")
}
samplingSize := max(custodyGroupCount, samplesPerSlot)
info, _, err := peerdas.Info(s.cfg.P2P.NodeID(), samplingSize)
if err != nil {
return errors.Wrap(err, "fetch peer info")
}
// Fetch origin data column sidecars.
root := roBlock.Root()
params := sync.DataColumnSidecarsParams{
Ctx: s.ctx,
Tor: s.clock,
P2P: s.cfg.P2P,
CtxMap: s.ctxMap,
Storage: s.cfg.DataColumnStorage,
NewVerifier: s.newDataColumnsVerifier,
}
verfifiedRoDataColumnsByRoot, err := sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
if err != nil {
return errors.Wrap(err, "fetch data column sidecars")
}
// Save origin data columns to disk.
verifiedRoDataColumnsSidecars, ok := verfifiedRoDataColumnsByRoot[root]
if !ok {
return fmt.Errorf("cannot extract origins data column sidecars for block root %#x - should never happen", root)
}
if err := s.cfg.DataColumnStorage.Save(verifiedRoDataColumnsSidecars); err != nil {
return errors.Wrap(err, "save data column sidecars")
}
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
"blobCount": len(commitments),
"columnCount": len(verifiedRoDataColumnsSidecars),
}).Info("Successfully downloaded data columns for checkpoint sync block")
return nil
}
func shufflePeers(pids []peer.ID) {
rg := rand.NewGenerator()
rg.Shuffle(len(pids), func(i, j int) {
@@ -367,3 +464,9 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.
return ini.NewBlobVerifier(b, reqs)
}
}
func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verification.NewDataColumnsVerifier {
return func(roDataColumns []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier {
return ini.NewDataColumnsVerifier(roDataColumns, reqs)
}
}

View File

@@ -7,14 +7,17 @@ import (
"time"
"github.com/OffchainLabs/prysm/v6/async/abool"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -138,7 +141,7 @@ func TestService_InitStartStop(t *testing.T) {
},
}
p := p2pt.NewTestP2P(t)
p := p2ptest.NewTestP2P(t)
connectPeers(t, p, []*peerData{}, p.Peers())
for i, tt := range tests {
if i == 0 {
@@ -328,7 +331,7 @@ func TestService_markSynced(t *testing.T) {
}
func TestService_Resync(t *testing.T) {
p := p2pt.NewTestP2P(t)
p := p2ptest.NewTestP2P(t)
connectPeers(t, p, []*peerData{
{blocks: makeSequence(1, 160), finalizedEpoch: 5, headSlot: 160},
}, p.Peers())
@@ -511,5 +514,152 @@ func TestOriginOutsideRetention(t *testing.T) {
require.NoError(t, concreteDB.SaveOriginCheckpointBlockRoot(ctx, blk.Root()))
// This would break due to missing service dependencies, but will return nil fast due to being outside retention.
require.Equal(t, false, params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(clock.CurrentSlot())))
require.NoError(t, s.fetchOriginBlobs([]peer.ID{}))
require.NoError(t, s.fetchOriginSidecars([]peer.ID{}))
}
func TestFetchOriginSidecars(t *testing.T) {
ctx := t.Context()
beaconConfig := params.BeaconConfig()
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
secondsPerSlot := beaconConfig.SecondsPerSlot
slotsPerEpoch := beaconConfig.SlotsPerEpoch
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
genesisValidatorRoot := [fieldparams.RootLength]byte{}
t.Run("out of retention period", func(t *testing.T) {
// Create an origin block.
block := util.NewBeaconBlockFulu()
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
roBlock, err := blocks.NewROBlock(signedBlock)
require.NoError(t, err)
// Save the block.
db := dbtest.SetupDB(t)
err = db.SaveOriginCheckpointBlockRoot(ctx, roBlock.Root())
require.NoError(t, err)
err = db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Define "now" to be one epoch after genesis time + retention period.
nowWrtGenesisSecs := retentionEpochs.Add(1).Mul(secondsPerEpoch)
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
nower := func() time.Time { return now }
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
service := &Service{
cfg: &Config{
DB: db,
},
clock: clock,
}
err = service.fetchOriginSidecars(nil)
require.NoError(t, err)
})
t.Run("no commitments", func(t *testing.T) {
// Create an origin block.
block := util.NewBeaconBlockFulu()
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
roBlock, err := blocks.NewROBlock(signedBlock)
require.NoError(t, err)
// Save the block.
db := dbtest.SetupDB(t)
err = db.SaveOriginCheckpointBlockRoot(ctx, roBlock.Root())
require.NoError(t, err)
err = db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Define "now" to be after genesis time + retention period.
nowWrtGenesisSecs := retentionEpochs.Mul(secondsPerEpoch)
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
nower := func() time.Time { return now }
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
service := &Service{
cfg: &Config{
DB: db,
P2P: p2ptest.NewTestP2P(t),
},
clock: clock,
}
err = service.fetchOriginSidecars(nil)
require.NoError(t, err)
})
t.Run("nominal", func(t *testing.T) {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Create block and sidecars.
const blobCount = 1
roBlock, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
// Save the block.
db := dbtest.SetupDB(t)
err = db.SaveOriginCheckpointBlockRoot(ctx, roBlock.Root())
require.NoError(t, err)
err = db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Create a data columns storage.
dir := t.TempDir()
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
require.NoError(t, err)
// Compute the columns to request.
p2p := p2ptest.NewTestP2P(t)
custodyGroupCount, err := p2p.CustodyGroupCount()
require.NoError(t, err)
samplingSize := max(custodyGroupCount, samplesPerSlot)
info, _, err := peerdas.Info(p2p.NodeID(), samplingSize)
require.NoError(t, err)
// Save all sidecars except what we need.
toSave := make([]blocks.VerifiedRODataColumn, 0, uint64(len(verifiedRoSidecars))-samplingSize)
for _, sidecar := range verifiedRoSidecars {
if !info.CustodyColumns[sidecar.Index] {
toSave = append(toSave, sidecar)
}
}
err = dataColumnStorage.Save(toSave)
require.NoError(t, err)
// Define "now" to be after genesis time + retention period.
nowWrtGenesisSecs := retentionEpochs.Mul(secondsPerEpoch)
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
nower := func() time.Time { return now }
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
service := &Service{
cfg: &Config{
DB: db,
P2P: p2p,
DataColumnStorage: dataColumnStorage,
},
clock: clock,
}
err = service.fetchOriginSidecars(nil)
require.NoError(t, err)
// Check that needed sidecars are saved.
summary := dataColumnStorage.Summary(roBlock.Root())
for index := range info.CustodyColumns {
require.Equal(t, true, summary.HasIndex(index))
}
})
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v6/async"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -175,8 +176,9 @@ func (s *Service) getBlocksInQueue(slot primitives.Slot) []interfaces.ReadOnlySi
func (s *Service) removeBlockFromQueue(b interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
s.pendingQueueLock.Lock()
defer s.pendingQueueLock.Unlock()
if err := s.deleteBlockFromPendingQueue(b.Block().Slot(), b, blkRoot); err != nil {
return err
return errors.Wrap(err, "delete block from pending queue")
}
return nil
}
@@ -196,41 +198,82 @@ func (s *Service) hasPeer() bool {
var errNoPeersForPending = errors.New("no suitable peers to process pending block queue, delaying")
// processAndBroadcastBlock validates, processes, and broadcasts a block.
// part of the function is to request missing blobs from peers if the block contains kzg commitments.
func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
// Part of the function is to request missing sidecars from peers if the block contains kzg commitments.
func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [fieldparams.RootLength]byte) error {
if err := s.processBlock(ctx, b, blkRoot); err != nil {
return errors.Wrap(err, "process block")
}
if err := s.receiveAndBroadCastBlock(ctx, b, blkRoot, b.Block().Slot()); err != nil {
return errors.Wrap(err, "receive and broadcast block")
}
return nil
}
func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [fieldparams.RootLength]byte) error {
blockSlot := b.Block().Slot()
if err := s.validateBeaconBlock(ctx, b, blkRoot); err != nil {
if !errors.Is(ErrOptimisticParent, err) {
log.WithError(err).WithField("slot", b.Block().Slot()).Debug("Could not validate block")
log.WithError(err).WithField("slot", blockSlot).Debug("Could not validate block")
return err
}
}
request, err := s.pendingBlobsRequestForBlock(blkRoot, b)
blockEpoch, denebForkEpoch, fuluForkEpoch := slots.ToEpoch(blockSlot), params.BeaconConfig().DenebForkEpoch, params.BeaconConfig().FuluForkEpoch
roBlock, err := blocks.NewROBlockWithRoot(b, blkRoot)
if err != nil {
return err
}
if len(request) > 0 {
peers := s.getBestPeers()
peerCount := len(peers)
if peerCount == 0 {
return errors.Wrapf(errNoPeersForPending, "block root=%#x", blkRoot)
}
if err := s.sendAndSaveBlobSidecars(ctx, request, peers[rand.NewGenerator().Int()%peerCount], b); err != nil {
return err
}
return errors.Wrap(err, "new ro block with root")
}
if blockEpoch >= fuluForkEpoch {
if err := s.requestAndSaveMissingDataColumnSidecars([]blocks.ROBlock{roBlock}); err != nil {
return errors.Wrap(err, "request and save missing data column sidecars")
}
return nil
}
if blockEpoch >= denebForkEpoch {
request, err := s.pendingBlobsRequestForBlock(blkRoot, b)
if err != nil {
return errors.Wrap(err, "pending blobs request for block")
}
if len(request) > 0 {
peers := s.getBestPeers()
peerCount := len(peers)
if peerCount == 0 {
return errors.Wrapf(errNoPeersForPending, "block root=%#x", blkRoot)
}
if err := s.sendAndSaveBlobSidecars(ctx, request, peers[rand.NewGenerator().Int()%peerCount], b); err != nil {
return errors.Wrap(err, "send and save blob sidecars")
}
}
return nil
}
return nil
}
func (s *Service) receiveAndBroadCastBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [fieldparams.RootLength]byte, blockSlot primitives.Slot) error {
if err := s.cfg.chain.ReceiveBlock(ctx, b, blkRoot, nil); err != nil {
return err
return errors.Wrap(err, "receive block")
}
s.setSeenBlockIndexSlot(b.Block().Slot(), b.Block().ProposerIndex())
s.setSeenBlockIndexSlot(blockSlot, b.Block().ProposerIndex())
pb, err := b.Proto()
if err != nil {
log.WithError(err).Debug("Could not get protobuf block")
return err
}
if err := s.cfg.p2p.Broadcast(ctx, pb); err != nil {
log.WithError(err).Debug("Could not broadcast block")
return err
@@ -286,55 +329,105 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
ctx, span := prysmTrace.StartSpan(ctx, "sendBatchRootRequest")
defer span.End()
roots = dedupRoots(roots)
s.pendingQueueLock.RLock()
for i := len(roots) - 1; i >= 0; i-- {
r := roots[i]
if s.seenPendingBlocks[r] || s.cfg.chain.BlockBeingSynced(r) {
roots = append(roots[:i], roots[i+1:]...)
} else {
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
}
}
s.pendingQueueLock.RUnlock()
// Exit early if there are no roots to request.
if len(roots) == 0 {
return nil
}
bestPeers := s.getBestPeers()
if len(bestPeers) == 0 {
// Remove duplicates (if any) from the list of roots.
roots = dedupRoots(roots)
// Filters out in place roots that are already seen in pending blocks or being synced.
func() {
s.pendingQueueLock.RLock()
defer s.pendingQueueLock.RUnlock()
for i := len(roots) - 1; i >= 0; i-- {
r := roots[i]
if s.seenPendingBlocks[r] || s.cfg.chain.BlockBeingSynced(r) {
roots = append(roots[:i], roots[i+1:]...)
continue
}
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
}
}()
// Nothing to do, exit early.
if len(roots) == 0 {
return nil
}
// Randomly choose a peer to query from our best peers. If that peer cannot return
// all the requested blocks, we randomly select another peer.
pid := bestPeers[randGen.Int()%len(bestPeers)]
for i := 0; i < numOfTries; i++ {
// Fetch best peers to request blocks from.
bestPeers := s.getBestPeers()
// No suitable peer, exit early.
if len(bestPeers) == 0 {
log.WithField("roots", fmt.Sprintf("%#x", roots)).Debug("Send batch root request: No suitable peers")
return nil
}
// Randomly choose a peer to query from our best peers.
// If that peer cannot return all the requested blocks,
// we randomly select another peer.
randomIndex := randGen.Int() % len(bestPeers)
pid := bestPeers[randomIndex]
for range numOfTries {
req := p2ptypes.BeaconBlockByRootsReq(roots)
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
// Get the current epoch.
currentSlot := s.cfg.clock.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
// Trim the request to the maximum number of blocks we can request if needed.
maxReqBlock := params.MaxRequestBlock(currentEpoch)
if uint64(len(roots)) > maxReqBlock {
rootCount := uint64(len(roots))
if rootCount > maxReqBlock {
req = roots[:maxReqBlock]
}
// Send the request to the peer.
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Debug("Could not send recent block request")
}
newRoots := make([][32]byte, 0, len(roots))
s.pendingQueueLock.RLock()
for _, rt := range roots {
if !s.seenPendingBlocks[rt] {
newRoots = append(newRoots, rt)
// Filter out roots that are already seen in pending blocks.
newRoots := make([][32]byte, 0, rootCount)
func() {
s.pendingQueueLock.RLock()
defer s.pendingQueueLock.RUnlock()
for _, rt := range roots {
if !s.seenPendingBlocks[rt] {
newRoots = append(newRoots, rt)
}
}
}
s.pendingQueueLock.RUnlock()
}()
// Exit early if all roots have been seen.
// This is the happy path.
if len(newRoots) == 0 {
break
return nil
}
// Choosing a new peer with the leftover set of
// roots to request.
// There is still some roots that have not been seen.
// Choosing a new peer with the leftover set of oots to request.
roots = newRoots
pid = bestPeers[randGen.Int()%len(bestPeers)]
// Choose a new peer to query.
randomIndex = randGen.Int() % len(bestPeers)
pid = bestPeers[randomIndex]
}
// Some roots are still missing after all allowed tries.
// This is the unhappy path.
log.WithFields(logrus.Fields{
"roots": fmt.Sprintf("%#x", roots),
"tries": numOfTries,
}).Debug("Send batch root request: Some roots are still missing after all allowed tries")
return nil
}
@@ -535,4 +628,4 @@ func dedupRoots(roots [][32]byte) [][32]byte {
newRoots = append(newRoots, roots[i])
}
return newRoots
}
}

View File

@@ -61,48 +61,49 @@ func TestRateLimiter_ExceedCapacity(t *testing.T) {
}
}
func TestRateLimiter_ExceedRawCapacity(t *testing.T) {
p1 := mockp2p.NewTestP2P(t)
p2 := mockp2p.NewTestP2P(t)
p1.Connect(p2)
p1.Peers().Add(nil, p2.PeerID(), p2.BHost.Addrs()[0], network.DirOutbound)
// TODO: Uncomment out of devnet
// func TestRateLimiter_ExceedRawCapacity(t *testing.T) {
// p1 := mockp2p.NewTestP2P(t)
// p2 := mockp2p.NewTestP2P(t)
// p1.Connect(p2)
// p1.Peers().Add(nil, p2.PeerID(), p2.BHost.Addrs()[0], network.DirOutbound)
rlimiter := newRateLimiter(p1)
// rlimiter := newRateLimiter(p1)
// BlockByRange
topic := p2p.RPCBlocksByRangeTopicV1 + p1.Encoding().ProtocolSuffix()
// // BlockByRange
// topic := p2p.RPCBlocksByRangeTopicV1 + p1.Encoding().ProtocolSuffix()
wg := sync.WaitGroup{}
p2.BHost.SetStreamHandler(protocol.ID(topic), func(stream network.Stream) {
defer wg.Done()
code, errMsg, err := readStatusCodeNoDeadline(stream, p2.Encoding())
require.NoError(t, err, "could not read incoming stream")
assert.Equal(t, responseCodeInvalidRequest, code, "not equal response codes")
assert.Equal(t, p2ptypes.ErrRateLimited.Error(), errMsg, "not equal errors")
})
wg.Add(1)
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), protocol.ID(topic))
require.NoError(t, err, "could not create stream")
// wg := sync.WaitGroup{}
// p2.BHost.SetStreamHandler(protocol.ID(topic), func(stream network.Stream) {
// defer wg.Done()
// code, errMsg, err := readStatusCodeNoDeadline(stream, p2.Encoding())
// require.NoError(t, err, "could not read incoming stream")
// assert.Equal(t, responseCodeInvalidRequest, code, "not equal response codes")
// assert.Equal(t, p2ptypes.ErrRateLimited.Error(), errMsg, "not equal errors")
// })
// wg.Add(1)
// stream, err := p1.BHost.NewStream(context.Background(), p2.PeerID(), protocol.ID(topic))
// require.NoError(t, err, "could not create stream")
for i := 0; i < 2*defaultBurstLimit; i++ {
err = rlimiter.validateRawRpcRequest(stream, 1)
rlimiter.addRawStream(stream)
require.NoError(t, err, "could not validate incoming request")
}
// Triggers rate limit error on burst.
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
// for i := 0; i < 2*defaultBurstLimit; i++ {
// err = rlimiter.validateRawRpcRequest(stream, 1)
// rlimiter.addRawStream(stream)
// require.NoError(t, err, "could not validate incoming request")
// }
// // Triggers rate limit error on burst.
// assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
// Make Peer bad.
for i := 0; i < defaultBurstLimit; i++ {
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
}
assert.NotNil(t, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
require.NoError(t, stream.Close(), "could not close stream")
// // Make Peer bad.
// for i := 0; i < defaultBurstLimit; i++ {
// assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
// }
// assert.NotNil(t, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
// require.NoError(t, stream.Close(), "could not close stream")
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
}
// if util.WaitTimeout(&wg, 1*time.Second) {
// t.Fatal("Did not receive stream within 1 sec")
// }
// }
func Test_limiter_retrieveCollector_requiresLock(t *testing.T) {
l := limiter{}

View File

@@ -411,150 +411,151 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
}
}
func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
d := db.SetupDB(t)
saveBlocks := func(req *ethpb.BeaconBlocksByRangeRequest) {
// Populate the database with blocks that would match the request.
var parentRoot [32]byte
// Default to 1 to be inline with the spec.
req.Step = 1
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += primitives.Slot(req.Step) {
block := util.NewBeaconBlock()
block.Block.Slot = i
if req.Step == 1 {
block.Block.ParentRoot = parentRoot[:]
}
util.SaveBlock(t, t.Context(), d, block)
rt, err := block.Block.HashTreeRoot()
require.NoError(t, err)
parentRoot = rt
}
}
sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
req *ethpb.BeaconBlocksByRangeRequest, validateBlocks bool, success bool) error {
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
reqAnswered := false
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
defer func() {
reqAnswered = true
}()
if !validateBlocks {
return
}
for i := req.StartSlot; i < req.StartSlot.Add(req.Count); i += primitives.Slot(req.Step) {
if !success {
continue
}
expectSuccess(t, stream)
res := util.NewBeaconBlock()
assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, res))
if res.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
}
}
})
stream, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl)
require.NoError(t, err)
if err := r.beaconBlocksByRangeRPCHandler(t.Context(), req, stream); err != nil {
return err
}
time.Sleep(100 * time.Millisecond)
assert.Equal(t, reqAnswered, true)
return nil
}
// TODO: Uncomment when out of devnet
// func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
// d := db.SetupDB(t)
// saveBlocks := func(req *ethpb.BeaconBlocksByRangeRequest) {
// // Populate the database with blocks that would match the request.
// var parentRoot [32]byte
// // Default to 1 to be inline with the spec.
// req.Step = 1
// for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += primitives.Slot(req.Step) {
// block := util.NewBeaconBlock()
// block.Block.Slot = i
// if req.Step == 1 {
// block.Block.ParentRoot = parentRoot[:]
// }
// util.SaveBlock(t, context.Background(), d, block)
// rt, err := block.Block.HashTreeRoot()
// require.NoError(t, err)
// parentRoot = rt
// }
// }
// sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
// req *ethpb.BeaconBlocksByRangeRequest, validateBlocks bool, success bool) error {
// pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
// reqAnswered := false
// p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
// defer func() {
// reqAnswered = true
// }()
// if !validateBlocks {
// return
// }
// for i := req.StartSlot; i < req.StartSlot.Add(req.Count); i += primitives.Slot(req.Step) {
// if !success {
// continue
// }
// expectSuccess(t, stream)
// res := util.NewBeaconBlock()
// assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, res))
// if res.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
// t.Errorf("Received unexpected block slot %d", res.Block.Slot)
// }
// }
// })
// stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
// require.NoError(t, err)
// if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil {
// return err
// }
// time.Sleep(100 * time.Millisecond)
// assert.Equal(t, reqAnswered, true)
// return nil
// }
t.Run("high request count param and no overflow", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
// t.Run("high request count param and no overflow", func(t *testing.T) {
// p1 := p2ptest.NewTestP2P(t)
// p2 := p2ptest.NewTestP2P(t)
// p1.Connect(p2)
// assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
reqSize := params.MaxRequestBlock(slots.ToEpoch(clock.CurrentSlot()))
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
// clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
// reqSize := params.MaxRequestBlock(slots.ToEpoch(clock.CurrentSlot()))
// r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
defaultBlockBurstFactor := 2 // TODO: can we update the default value set in TestMain to match flags?
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(flags.Get().BlockBatchLimit*defaultBlockBurstFactor), time.Second, false)
req := &ethpb.BeaconBlocksByRangeRequest{
StartSlot: 100,
Count: reqSize,
}
saveBlocks(req)
// pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
// topic := string(pcl)
// defaultBlockBurstFactor := 2 // TODO: can we update the default value set in TestMain to match flags?
// r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(flags.Get().BlockBatchLimit*defaultBlockBurstFactor), time.Second, false)
// req := &ethpb.BeaconBlocksByRangeRequest{
// StartSlot: 100,
// Count: reqSize,
// }
// saveBlocks(req)
// This doesn't error because reqSize by default is 128, which is exactly the burst factor * batch limit
assert.NoError(t, sendRequest(p1, p2, r, req, true, true))
// // This doesn't error because reqSize by default is 128, which is exactly the burst factor * batch limit
// assert.NoError(t, sendRequest(p1, p2, r, req, true, true))
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
expectedCapacity := int64(0) // Whole capacity is used, but no overflow.
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
})
// remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
// expectedCapacity := int64(0) // Whole capacity is used, but no overflow.
// assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
// })
t.Run("high request count param and overflow", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
// t.Run("high request count param and overflow", func(t *testing.T) {
// p1 := p2ptest.NewTestP2P(t)
// p2 := p2ptest.NewTestP2P(t)
// p1.Connect(p2)
// assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
reqSize := params.MaxRequestBlock(slots.ToEpoch(clock.CurrentSlot())) - 1
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
// clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
// reqSize := params.MaxRequestBlock(slots.ToEpoch(clock.CurrentSlot())) - 1
// r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(flags.Get().BlockBatchLimit), time.Second, false)
// pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
// topic := string(pcl)
// r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(flags.Get().BlockBatchLimit), time.Second, false)
req := &ethpb.BeaconBlocksByRangeRequest{
StartSlot: 100,
Count: reqSize,
}
saveBlocks(req)
// req := &ethpb.BeaconBlocksByRangeRequest{
// StartSlot: 100,
// Count: reqSize,
// }
// saveBlocks(req)
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
err := sendRequest(p1, p2, r, req, false, true)
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
}
// for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
// err := sendRequest(p1, p2, r, req, false, true)
// assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
// }
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
expectedCapacity := int64(0) // Whole capacity is used.
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
})
// remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
// expectedCapacity := int64(0) // Whole capacity is used.
// assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
// })
t.Run("many requests with count set to max blocks per second", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
// t.Run("many requests with count set to max blocks per second", func(t *testing.T) {
// p1 := p2ptest.NewTestP2P(t)
// p2 := p2ptest.NewTestP2P(t)
// p1.Connect(p2)
// assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor)
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, time.Second, false)
// capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor)
// clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
// r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
// pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
// topic := string(pcl)
// r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, time.Second, false)
req := &ethpb.BeaconBlocksByRangeRequest{
StartSlot: 100,
Count: uint64(flags.Get().BlockBatchLimit),
}
saveBlocks(req)
// req := &ethpb.BeaconBlocksByRangeRequest{
// StartSlot: 100,
// Count: uint64(flags.Get().BlockBatchLimit),
// }
// saveBlocks(req)
for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
assert.NoError(t, sendRequest(p1, p2, r, req, true, false))
}
// for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
// assert.NoError(t, sendRequest(p1, p2, r, req, true, false))
// }
// One more request should result in overflow.
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
err := sendRequest(p1, p2, r, req, false, false)
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
}
// // One more request should result in overflow.
// for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
// err := sendRequest(p1, p2, r, req, false, false)
// assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
// }
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
expectedCapacity := int64(0) // Whole capacity is used.
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
})
}
// remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
// expectedCapacity := int64(0) // Whole capacity is used.
// assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
// })
// }
func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
slotsSinceGenesis := primitives.Slot(1000)

View File

@@ -4,11 +4,13 @@ import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -20,15 +22,19 @@ import (
"github.com/pkg/errors"
)
// sendBeaconBlocksRequest sends a recent beacon blocks request to a peer to get
// those corresponding blocks from that peer.
// sendBeaconBlocksRequest sends the `requests` beacon blocks by root requests to
// the peer with the given `id`. For each received block, it inserts the block into the
// pending queue. Then, for each received blocks, it checks if all corresponding sidecars
// are stored, and, if not, sends the corresponding sidecar requests and stores the received sidecars.
// For sidecars, only blob sidecars will be requested to the peer with the given `id`.
// For other types of sidecars, the request will be sent to the best peers.
func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.BeaconBlockByRootsReq, id peer.ID) error {
ctx, cancel := context.WithTimeout(ctx, respTimeout)
defer cancel()
requestedRoots := make(map[[32]byte]struct{})
requestedRoots := make(map[[fieldparams.RootLength]byte]bool)
for _, root := range *requests {
requestedRoots[root] = struct{}{}
requestedRoots[root] = true
}
blks, err := SendBeaconBlocksByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, id, requests, func(blk interfaces.ReadOnlySignedBeaconBlock) error {
@@ -36,39 +42,124 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
if err != nil {
return err
}
if _, ok := requestedRoots[blkRoot]; !ok {
if ok := requestedRoots[blkRoot]; !ok {
return fmt.Errorf("received unexpected block with root %x", blkRoot)
}
s.pendingQueueLock.Lock()
defer s.pendingQueueLock.Unlock()
if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blkRoot); err != nil {
return err
return errors.Wrapf(err, "insert block to pending queue for block with root %x", blkRoot)
}
return nil
})
// The following part deals with sidecars.
postFuluBlocks := make([]blocks.ROBlock, 0, len(blks))
for _, blk := range blks {
// Skip blocks before deneb because they have no blob.
if blk.Version() < version.Deneb {
blockVersion := blk.Version()
if blockVersion >= version.Fulu {
roBlock, err := blocks.NewROBlock(blk)
if err != nil {
return errors.Wrap(err, "new ro block")
}
postFuluBlocks = append(postFuluBlocks, roBlock)
continue
}
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
return err
}
request, err := s.pendingBlobsRequestForBlock(blkRoot, blk)
if err != nil {
return err
}
if len(request) == 0 {
if blockVersion >= version.Deneb {
if err := s.requestAndSaveMissingBlobSidecars(blk, id); err != nil {
return errors.Wrap(err, "request and save missing blob sidecars")
}
continue
}
if err := s.sendAndSaveBlobSidecars(ctx, request, id, blk); err != nil {
return err
}
}
if err := s.requestAndSaveMissingDataColumnSidecars(postFuluBlocks); err != nil {
return errors.Wrap(err, "request and save missing data columns")
}
return err
}
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
// If so, requests them and saves them to the storage.
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
if err != nil {
return errors.Wrap(err, "fetch custody group count from peer")
}
samplingSize := max(custodyGroupCount, samplesPerSlot)
info, _, err := peerdas.Info(s.cfg.p2p.NodeID(), samplingSize)
if err != nil {
return errors.Wrap(err, "custody info")
}
// Fetch missing data column sidecars.
params := DataColumnSidecarsParams{
Ctx: s.ctx,
Tor: s.cfg.clock,
P2P: s.cfg.p2p,
CtxMap: s.ctxMap,
Storage: s.cfg.dataColumnStorage,
NewVerifier: s.newColumnsVerifier,
}
sidecarsByRoot, err := FetchDataColumnSidecars(params, blks, info.CustodyColumns)
if err != nil {
return errors.Wrap(err, "fetch data column sidecars")
}
// Save the sidecars to the storage.
count := 0
for _, sidecars := range sidecarsByRoot {
count += len(sidecars)
}
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
for _, sidecars := range sidecarsByRoot {
sidecarsToSave = append(sidecarsToSave, sidecars...)
}
if err := s.cfg.dataColumnStorage.Save(sidecarsToSave); err != nil {
return errors.Wrap(err, "save")
}
return nil
}
func (s *Service) requestAndSaveMissingBlobSidecars(block interfaces.ReadOnlySignedBeaconBlock, peerID peer.ID) error {
blockRoot, err := block.Block().HashTreeRoot()
if err != nil {
return errors.Wrap(err, "hash tree root")
}
request, err := s.pendingBlobsRequestForBlock(blockRoot, block)
if err != nil {
return errors.Wrap(err, "pending blobs request for block")
}
if len(request) == 0 {
return nil
}
if err := s.sendAndSaveBlobSidecars(s.ctx, request, peerID, block); err != nil {
return errors.Wrap(err, "send and save blob sidecars")
}
return nil
}
// beaconBlocksRootRPCHandler looks up the request blocks from the database from the given block roots.
func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
@@ -207,4 +298,4 @@ func requestsForMissingIndices(stored filesystem.BlobStorageSummary, commitments
}
}
return ids
}
}

View File

@@ -216,4 +216,4 @@ func validateBlobsByRange(r *pb.BlobSidecarsByRangeRequest, current primitives.S
}
return rp, nil
}
}

View File

@@ -44,7 +44,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
return err
}
// Sort the identifiers so that requests for the same blob root will be adjacent, minimizing db lookups.
sort.Sort(blobIdents)
sort.Sort(&blobIdents)
batchSize := flags.Get().BlobBatchLimit
var ticker *time.Ticker

View File

@@ -190,7 +190,7 @@ func TestBlobsByRootValidation(t *testing.T) {
}()
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
require.NoError(t, err)
dmc, clock := defaultMockChain(t)
dmc, clock := defaultMockChain(t, 0)
dmc.Slot = &capellaSlot
dmc.FinalizedCheckPoint = &ethpb.Checkpoint{Epoch: params.BeaconConfig().CapellaForkEpoch}
cases := []*blobsTestCase{

View File

@@ -36,12 +36,12 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Check if the message type is the one expected.
ref, ok := msg.(*types.DataColumnsByRootIdentifiers)
ref, ok := msg.(types.DataColumnsByRootIdentifiers)
if !ok {
return notDataColumnsByRootIdentifiersError
}
requestedColumnIdents := *ref
requestedColumnIdents := ref
remotePeer := stream.Conn().RemotePeer()
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)

View File

@@ -68,7 +68,7 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
msg := &types.DataColumnsByRootIdentifiers{{Columns: []uint64{1, 2, 3}}}
msg := types.DataColumnsByRootIdentifiers{{Columns: []uint64{1, 2, 3}}}
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
err = service.dataColumnSidecarByRootRPCHandler(t.Context(), msg, stream)
@@ -169,7 +169,7 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
stream, err := localP2P.BHost.NewStream(ctx, remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
msg := &types.DataColumnsByRootIdentifiers{
msg := types.DataColumnsByRootIdentifiers{
{
BlockRoot: root0[:],
Columns: []uint64{1, 2, 3},

View File

@@ -22,6 +22,7 @@ import (
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
goPeer "github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -404,11 +405,8 @@ func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncod
// SendDataColumnSidecarsByRangeRequest sends a request for data column sidecars by range
// and returns the fetched data column sidecars.
func SendDataColumnSidecarsByRangeRequest(
ctx context.Context,
tor blockchain.TemporalOracle,
p2pApi p2p.P2P,
p DataColumnSidecarsParams,
pid peer.ID,
ctxMap ContextByteVersions,
request *ethpb.DataColumnSidecarsByRangeRequest,
) ([]blocks.RODataColumn, error) {
// Return early if nothing to request.
@@ -428,7 +426,7 @@ func SendDataColumnSidecarsByRangeRequest(
}
// Build the topic.
currentSlot := tor.CurrentSlot()
currentSlot := p.Tor.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRangeName, currentEpoch)
if err != nil {
@@ -453,7 +451,7 @@ func SendDataColumnSidecarsByRangeRequest(
})
// Send the request.
stream, err := p2pApi.Send(ctx, request, topic, pid)
stream, err := p.P2P.Send(p.Ctx, request, topic, pid)
if err != nil {
return nil, errors.Wrap(err, "p2p send")
}
@@ -463,7 +461,7 @@ func SendDataColumnSidecarsByRangeRequest(
roDataColumns := make([]blocks.RODataColumn, 0, totalCount)
for range totalCount {
// Avoid reading extra chunks if the context is done.
if err := ctx.Err(); err != nil {
if err := p.Ctx.Err(); err != nil {
return nil, err
}
@@ -473,7 +471,7 @@ func SendDataColumnSidecarsByRangeRequest(
}
roDataColumn, err := readChunkedDataColumnSidecar(
stream, p2pApi, ctxMap,
stream, p.P2P, p.CtxMap,
validatorSlotWithinBounds,
isSidecarIndexRequested(request),
)
@@ -492,7 +490,7 @@ func SendDataColumnSidecarsByRangeRequest(
}
// All requested sidecars were delivered by the peer. Expecting EOF.
if _, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap); !errors.Is(err, io.EOF) {
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", totalCount)
}
@@ -539,22 +537,10 @@ func isSidecarIndexRequested(request *ethpb.DataColumnSidecarsByRangeRequest) Da
// SendDataColumnSidecarsByRootRequest sends a request for data column sidecars by root
// and returns the fetched data column sidecars.
func SendDataColumnSidecarsByRootRequest(
ctx context.Context,
tor blockchain.TemporalOracle,
p2pApi p2p.P2P,
pid peer.ID,
ctxMap ContextByteVersions,
request p2ptypes.DataColumnsByRootIdentifiers,
) ([]blocks.RODataColumn, error) {
// Return early if the request is nil.
if request == nil {
return nil, nil
}
func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer.ID, identifiers p2ptypes.DataColumnsByRootIdentifiers) ([]blocks.RODataColumn, error) {
// Compute how many sidecars are requested.
count := uint64(0)
for _, identifier := range request {
for _, identifier := range identifiers {
count += uint64(len(identifier.Columns))
}
@@ -570,13 +556,15 @@ func SendDataColumnSidecarsByRootRequest(
}
// Get the topic for the request.
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRootName, slots.ToEpoch(tor.CurrentSlot()))
currentSlot := p.Tor.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRootName, currentEpoch)
if err != nil {
return nil, errors.Wrap(err, "topic from message")
}
// Send the request to the peer.
stream, err := p2pApi.Send(ctx, request, topic, pid)
stream, err := p.P2P.Send(p.Ctx, identifiers, topic, peer)
if err != nil {
return nil, errors.Wrap(err, "p2p api send")
}
@@ -587,7 +575,7 @@ func SendDataColumnSidecarsByRootRequest(
// Read the data column sidecars from the stream.
for range count {
roDataColumn, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap, isSidecarIndexRootRequested(request))
roDataColumn, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap, isSidecarIndexRootRequested(identifiers))
if errors.Is(err, io.EOF) {
return roDataColumns, nil
}
@@ -603,7 +591,7 @@ func SendDataColumnSidecarsByRootRequest(
}
// All requested sidecars were delivered by the peer. Expecting EOF.
if _, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap); !errors.Is(err, io.EOF) {
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", count)
}
@@ -629,11 +617,11 @@ func isSidecarIndexRootRequested(request p2ptypes.DataColumnsByRootIdentifiers)
indices, ok := columnsIndexFromRoot[root]
if !ok {
return errors.Errorf("root #%x returned by peer but not requested", root)
return errors.Errorf("root %#x returned by peer but not requested", root)
}
if !indices[index] {
return errors.Errorf("index %d for root #%x returned by peer but not requested", index, root)
return errors.Errorf("index %d for root %#x returned by peer but not requested", index, root)
}
return nil

View File

@@ -915,7 +915,7 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
for _, tc := range nilTestCases {
t.Run(tc.name, func(t *testing.T) {
actual, err := SendDataColumnSidecarsByRangeRequest(t.Context(), nil, nil, "aRandomPID", nil, tc.request)
actual, err := SendDataColumnSidecarsByRangeRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", tc.request)
require.NoError(t, err)
require.IsNil(t, actual)
})
@@ -928,7 +928,7 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
params.OverrideBeaconConfig(beaconConfig)
request := &ethpb.DataColumnSidecarsByRangeRequest{Count: 1, Columns: []uint64{1, 2, 3}}
_, err := SendDataColumnSidecarsByRangeRequest(t.Context(), nil, nil, "aRandomPID", nil, request)
_, err := SendDataColumnSidecarsByRangeRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", request)
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
})
@@ -1040,7 +1040,14 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
assert.NoError(t, err)
})
actual, err := SendDataColumnSidecarsByRangeRequest(t.Context(), clock, p1, p2.PeerID(), ctxMap, requestSent)
parameters := DataColumnSidecarsParams{
Ctx: t.Context(),
Tor: clock,
P2P: p1,
CtxMap: ctxMap,
}
actual, err := SendDataColumnSidecarsByRangeRequest(parameters, p2.PeerID(), requestSent)
if tc.expectedError != nil {
require.ErrorContains(t, tc.expectedError.Error(), err)
if util.WaitTimeout(&wg, time.Second) {
@@ -1208,7 +1215,7 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
for _, tc := range nilTestCases {
t.Run(tc.name, func(t *testing.T) {
actual, err := SendDataColumnSidecarsByRootRequest(t.Context(), nil, nil, "aRandomPID", nil, tc.request)
actual, err := SendDataColumnSidecarsByRootRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", tc.request)
require.NoError(t, err)
require.IsNil(t, actual)
})
@@ -1225,7 +1232,7 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
{Columns: []uint64{4, 5, 6}},
}
_, err := SendDataColumnSidecarsByRootRequest(t.Context(), nil, nil, "aRandomPID", nil, request)
_, err := SendDataColumnSidecarsByRootRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", request)
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
})
@@ -1346,7 +1353,13 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
assert.NoError(t, err)
})
actual, err := SendDataColumnSidecarsByRootRequest(t.Context(), clock, p1, p2.PeerID(), ctxMap, sentRequest)
parameters := DataColumnSidecarsParams{
Ctx: t.Context(),
Tor: clock,
P2P: p1,
CtxMap: ctxMap,
}
actual, err := SendDataColumnSidecarsByRootRequest(parameters, p2.PeerID(), sentRequest)
if tc.expectedError != nil {
require.ErrorContains(t, tc.expectedError.Error(), err)
if util.WaitTimeout(&wg, time.Second) {

View File

@@ -38,7 +38,10 @@ func (s *Service) maintainPeerStatuses() {
go func(id peer.ID) {
defer wg.Done()
log := log.WithField("peer", id)
log := log.WithFields(logrus.Fields{
"peer": id,
"agent": agentString(id, s.cfg.p2p.Host()),
})
// If our peer status has not been updated correctly we disconnect over here
// and set the connection state over here instead.

View File

@@ -123,6 +123,7 @@ type blockchainService interface {
blockchain.OptimisticModeFetcher
blockchain.SlashingReceiver
blockchain.ForkchoiceFetcher
blockchain.DataAvailabilityChecker
}
// Service is responsible for handling all run time p2p related operations as the

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"reflect"
"runtime/debug"
"slices"
"strings"
"time"
@@ -230,6 +231,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
handle: s.dataColumnSubscriber,
digest: digest,
getSubnetsToJoin: s.dataColumnSubnetIndices,
// TODO: Should we find peers always? When validators are managed? When validators are managed AND when we are going to propose a block?
})
}
}
@@ -798,3 +800,17 @@ func errorIsIgnored(err error) bool {
}
return false
}
// sliceFromMap returns a sorted list of keys from a map.
func sliceFromMap(m map[uint64]bool, sorted ...bool) []uint64 {
result := make([]uint64, 0, len(m))
for k := range m {
result = append(result, k)
}
if len(sorted) > 0 && sorted[0] {
slices.Sort(result)
}
return result
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/OffchainLabs/prysm/v6/io/file"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/protobuf/proto"
)
@@ -108,6 +109,18 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, ro
log.Warning("Data column storage is not enabled, skip saving data column, but continue to reconstruct and broadcast data column")
}
// Check if data is already available to avoid unnecessary execution client calls
switch err := s.cfg.chain.IsDataAvailable(ctx, blockRoot, roSignedBlock); {
case err == nil:
log.Debug("Data already available skipping execution-client call")
return
case errors.Is(err, blockchain.ErrDataNotAvailable):
// continue
default:
log.WithError(err).Error("Failed to check data availability")
return
}
// When this function is called, it's from the time when the block is received, so in almost all situations we need to get the data column from EL instead of the blob storage.
sidecars, err := s.cfg.executionReconstructor.ReconstructDataColumnSidecars(ctx, roSignedBlock, blockRoot)
if err != nil {

View File

@@ -17,6 +17,7 @@ import (
lruwrpr "github.com/OffchainLabs/prysm/v6/cache/lru"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -214,8 +215,11 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
chainService := &chainMock.ChainService{
Genesis: time.Now(),
// Create a chain service that returns ErrDataNotAvailable to trigger execution service calls
chainService := &ChainServiceDataNotAvailable{
ChainService: &chainMock.ChainService{
Genesis: time.Now(),
},
}
allColumns := make([]blocks.VerifiedRODataColumn, 128)
@@ -295,3 +299,193 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
})
}
// TestProcessDataColumnSidecarsFromExecution_DataAvailabilityCheck tests the data availability optimization
func TestProcessDataColumnSidecarsFromExecution_DataAvailabilityCheck(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
ctx := context.Background()
// Create a test block with KZG commitments
block := util.NewBeaconBlockDeneb()
block.Block.Slot = 100
commitment := [48]byte{1, 2, 3}
block.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
t.Run("skips execution call when data is available", func(t *testing.T) {
mockChain := &MockChainServiceTrackingCalls{
ChainService: &chainMock.ChainService{},
dataAvailable: true, // Data is available
availabilityError: nil,
isDataAvailableCalled: false,
}
mockExecutionClient := &MockExecutionClientTrackingCalls{
EngineClient: &mockExecution.EngineClient{},
reconstructCalled: false,
}
s := &Service{
cfg: &config{
chain: mockChain,
executionReconstructor: mockExecutionClient,
},
}
// This should call IsDataAvailable and return early without calling execution client
s.processDataColumnSidecarsFromExecution(ctx, signedBlock)
// Verify the expected call pattern
assert.Equal(t, true, mockChain.isDataAvailableCalled, "Expected IsDataAvailable to be called")
assert.Equal(t, false, mockExecutionClient.reconstructCalled, "Expected execution client NOT to be called when data is available")
})
t.Run("returns early when IsDataAvailable returns error", func(t *testing.T) {
mockChain := &MockChainServiceTrackingCalls{
ChainService: &chainMock.ChainService{},
dataAvailable: false, // This should be ignored due to error
availabilityError: errors.New("test error from IsDataAvailable"),
isDataAvailableCalled: false,
}
mockExecutionClient := &MockExecutionClientTrackingCalls{
EngineClient: &mockExecution.EngineClient{},
reconstructCalled: false,
}
s := &Service{
cfg: &config{
chain: mockChain,
executionReconstructor: mockExecutionClient,
},
}
// This should call IsDataAvailable, get an error, and return early without calling execution client
s.processDataColumnSidecarsFromExecution(ctx, signedBlock)
// Verify the expected call pattern
assert.Equal(t, true, mockChain.isDataAvailableCalled, "Expected IsDataAvailable to be called")
assert.Equal(t, false, mockExecutionClient.reconstructCalled, "Expected execution client NOT to be called when IsDataAvailable returns error")
})
t.Run("calls execution client when data not available", func(t *testing.T) {
mockChain := &MockChainServiceTrackingCalls{
ChainService: &chainMock.ChainService{},
dataAvailable: false, // Data not available
availabilityError: nil,
isDataAvailableCalled: false,
}
mockExecutionClient := &MockExecutionClientTrackingCalls{
EngineClient: &mockExecution.EngineClient{
DataColumnSidecars: []blocks.VerifiedRODataColumn{}, // Empty response is fine for this test
},
reconstructCalled: false,
}
s := &Service{
cfg: &config{
chain: mockChain,
executionReconstructor: mockExecutionClient,
},
}
// This should call IsDataAvailable, get false, and proceed to call execution client
s.processDataColumnSidecarsFromExecution(ctx, signedBlock)
// Verify the expected call pattern
assert.Equal(t, true, mockChain.isDataAvailableCalled, "Expected IsDataAvailable to be called")
assert.Equal(t, true, mockExecutionClient.reconstructCalled, "Expected execution client to be called when data is not available")
})
t.Run("returns early when block has no KZG commitments", func(t *testing.T) {
// Create a block without KZG commitments
blockNoCommitments := util.NewBeaconBlockDeneb()
blockNoCommitments.Block.Slot = 100
blockNoCommitments.Block.Body.BlobKzgCommitments = [][]byte{} // No commitments
signedBlockNoCommitments, err := blocks.NewSignedBeaconBlock(blockNoCommitments)
require.NoError(t, err)
mockChain := &MockChainServiceTrackingCalls{
ChainService: &chainMock.ChainService{},
dataAvailable: false,
availabilityError: nil,
isDataAvailableCalled: false,
}
mockExecutionClient := &MockExecutionClientTrackingCalls{
EngineClient: &mockExecution.EngineClient{},
reconstructCalled: false,
}
s := &Service{
cfg: &config{
chain: mockChain,
executionReconstructor: mockExecutionClient,
},
}
// This should return early before checking data availability or calling execution client
s.processDataColumnSidecarsFromExecution(ctx, signedBlockNoCommitments)
// Verify neither method was called since there are no commitments
assert.Equal(t, false, mockChain.isDataAvailableCalled, "Expected IsDataAvailable NOT to be called when no KZG commitments")
assert.Equal(t, false, mockExecutionClient.reconstructCalled, "Expected execution client NOT to be called when no KZG commitments")
})
}
// MockChainServiceTrackingCalls tracks calls to IsDataAvailable for testing
type MockChainServiceTrackingCalls struct {
isDataAvailableCalled bool
dataAvailable bool
*chainMock.ChainService
availabilityError error
}
func (m *MockChainServiceTrackingCalls) IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
m.isDataAvailableCalled = true
if m.availabilityError != nil {
return m.availabilityError
}
if !m.dataAvailable {
return blockchain.ErrDataNotAvailable
}
return nil
}
// MockExecutionClientTrackingCalls tracks calls to ReconstructDataColumnSidecars for testing
type MockExecutionClientTrackingCalls struct {
*mockExecution.EngineClient
reconstructCalled bool
}
func (m *MockExecutionClientTrackingCalls) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
m.reconstructCalled = true
return m.EngineClient.DataColumnSidecars, m.EngineClient.ErrorDataColumnSidecars
}
func (m *MockExecutionClientTrackingCalls) ReconstructFullBlock(ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
return m.EngineClient.ReconstructFullBlock(ctx, blindedBlock)
}
func (m *MockExecutionClientTrackingCalls) ReconstructFullBellatrixBlockBatch(ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
return m.EngineClient.ReconstructFullBellatrixBlockBatch(ctx, blindedBlocks)
}
func (m *MockExecutionClientTrackingCalls) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hasIndex func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
return m.EngineClient.ReconstructBlobSidecars(ctx, block, blockRoot, hasIndex)
}
// ChainServiceDataNotAvailable wraps ChainService and overrides IsDataAvailable to return ErrDataNotAvailable
type ChainServiceDataNotAvailable struct {
*chainMock.ChainService
}
func (c *ChainServiceDataNotAvailable) IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
return blockchain.ErrDataNotAvailable
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
opfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
@@ -29,6 +30,11 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
return errors.Wrap(err, "reconstruct/save/broadcast data column sidecars")
}
// Trigger getBlobsV2 when receiving data column sidecar
if err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, root); err != nil {
return errors.Wrap(err, "failed to trigger getBlobsV2 for data column sidecar")
}
return nil
}
@@ -52,3 +58,55 @@ func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.V
return nil
}
// triggerGetBlobsV2ForDataColumnSidecar triggers getBlobsV2 retry when receiving a data column sidecar.
// This function attempts to fetch the block and trigger the execution service's retry mechanism.
func (s *Service) triggerGetBlobsV2ForDataColumnSidecar(ctx context.Context, blockRoot [32]byte) error {
// Get the specific block by root from database
signedBlock, err := s.cfg.beaconDB.Block(ctx, blockRoot)
if err != nil {
log.WithError(err).Debug("Could not fetch block from database for getBlobsV2 retry trigger")
return nil
}
if signedBlock == nil || signedBlock.IsNil() {
log.Debug("Block not found in database for getBlobsV2 retry trigger")
return nil
}
// Check if this block has blob commitments that would need getBlobsV2
blockBody := signedBlock.Block().Body()
commitments, err := blockBody.BlobKzgCommitments()
if err != nil {
return err
}
if len(commitments) == 0 {
return nil
}
// Check if data is already available
switch err := s.cfg.chain.IsDataAvailable(ctx, blockRoot, signedBlock); {
case err == nil:
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Data already available, skipping getBlobsV2 retry")
return nil
case errors.Is(err, blockchain.ErrDataNotAvailable):
// fall through and trigger getBlobsV2.
default:
return errors.Wrap(err, "Error checking data availability during getBlobsV2 trigger")
}
// Trigger the retry by calling the execution service's reconstruct method
// ReconstructDataColumnSidecars handles concurrent calls internally
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Triggering getBlobsV2 retry for data column sidecar")
if s.cfg.executionReconstructor == nil {
return nil
}
_, err = s.cfg.executionReconstructor.ReconstructDataColumnSidecars(ctx, signedBlock, blockRoot)
if err != nil {
return errors.Wrap(err, "getBlobsV2 retry triggered by data column sidecar failed")
}
return nil
}

View File

@@ -0,0 +1,326 @@
package sync
import (
"context"
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
blockchaintesting "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
dbtesting "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/pkg/errors"
)
// TestDataColumnSubscriber_InvalidMessage tests error handling for invalid messages
func TestDataColumnSubscriber_InvalidMessage(t *testing.T) {
s := &Service{}
// Test with invalid message type (use a proto message that's not VerifiedRODataColumn)
invalidMsg := &ethpb.SignedBeaconBlock{}
err := s.dataColumnSubscriber(context.Background(), invalidMsg)
require.ErrorContains(t, "message was not type blocks.VerifiedRODataColumn", err)
}
// TestTriggerGetBlobsV2ForDataColumnSidecar_BlockAvailability tests block availability checking
func TestTriggerGetBlobsV2ForDataColumnSidecar_BlockAvailability(t *testing.T) {
ctx := context.Background()
blockRoot := [32]byte{1, 2, 3}
// Test when block is not available
t.Run("block not available", func(t *testing.T) {
mockChain := &blockchaintesting.ChainService{}
db := dbtesting.SetupDB(t)
s := &Service{
cfg: &config{
chain: mockChain,
beaconDB: db,
},
}
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
require.NoError(t, err)
})
// Test when HasBlock returns true but block is not in database
t.Run("HasBlock true but not in database", func(t *testing.T) {
mockChain := &blockchaintesting.ChainService{}
// Mock HasBlock to return true
mockChain.CanonicalRoots = map[[32]byte]bool{blockRoot: true}
db := dbtesting.SetupDB(t)
s := &Service{
cfg: &config{
chain: mockChain,
beaconDB: db,
},
}
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
require.NoError(t, err)
})
}
// TestTriggerGetBlobsV2ForDataColumnSidecar_WithValidBlock tests with a valid block
func TestTriggerGetBlobsV2ForDataColumnSidecar_WithValidBlock(t *testing.T) {
ctx := context.Background()
// Create a test block with KZG commitments
slot := primitives.Slot(100)
block := util.NewBeaconBlockDeneb()
block.Block.Slot = slot
// Add KZG commitments to trigger getBlobsV2 retry logic
commitment := [48]byte{1, 2, 3}
block.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
blockRoot, err := signedBlock.Block().HashTreeRoot()
require.NoError(t, err)
t.Run("block with KZG commitments triggers retry", func(t *testing.T) {
// Mock execution reconstructor to track calls
mockReconstructor := &MockExecutionReconstructor{
reconstructCalled: false,
}
db := dbtesting.SetupDB(t)
// Save block to database
require.NoError(t, db.SaveBlock(ctx, signedBlock))
// Mock chain service that reports data is NOT available (to trigger execution service)
mockChain := &MockChainServiceWithAvailability{
ChainService: &blockchaintesting.ChainService{DB: db},
dataAvailable: false, // Data not available, should trigger execution service
availabilityError: nil,
}
s := &Service{
cfg: &config{
chain: mockChain,
beaconDB: db,
executionReconstructor: mockReconstructor,
},
}
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
require.NoError(t, err)
// Wait a bit for the goroutine to execute
time.Sleep(10 * time.Millisecond)
// Verify that the execution reconstructor was called
if !mockReconstructor.reconstructCalled {
t.Errorf("Expected ReconstructDataColumnSidecars to be called")
}
})
t.Run("does not start retry if data already available", func(t *testing.T) {
// Mock execution reconstructor to track calls
mockReconstructor := &MockExecutionReconstructor{
reconstructCalled: false,
}
db := dbtesting.SetupDB(t)
// Save block to database
require.NoError(t, db.SaveBlock(ctx, signedBlock))
// Mock chain service that reports data is already available
mockChain := &MockChainServiceWithAvailability{
ChainService: &blockchaintesting.ChainService{DB: db},
dataAvailable: true,
availabilityError: nil,
}
s := &Service{
cfg: &config{
chain: mockChain,
beaconDB: db,
executionReconstructor: mockReconstructor,
},
}
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
require.NoError(t, err)
// Wait a bit to ensure no goroutine was started
time.Sleep(10 * time.Millisecond)
// Verify that the execution reconstructor was NOT called since data is already available
if mockReconstructor.reconstructCalled {
t.Errorf("Expected ReconstructDataColumnSidecars NOT to be called when data is already available")
}
})
t.Run("calls execution service when data not available", func(t *testing.T) {
// Mock execution reconstructor to track calls
mockReconstructor := &MockExecutionReconstructor{
reconstructCalled: false,
}
db := dbtesting.SetupDB(t)
// Save block to database
require.NoError(t, db.SaveBlock(ctx, signedBlock))
// Mock chain service that returns ErrDataNotAvailable
mockChain := &MockChainServiceWithAvailability{
ChainService: &blockchaintesting.ChainService{DB: db},
dataAvailable: false, // Data not available
availabilityError: blockchain.ErrDataNotAvailable, // Should trigger execution service call
}
s := &Service{
cfg: &config{
chain: mockChain,
beaconDB: db,
executionReconstructor: mockReconstructor,
},
}
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
require.NoError(t, err) // Function should succeed and call execution service
// Wait a bit for the goroutine to execute
time.Sleep(10 * time.Millisecond)
// Verify that the execution reconstructor was called
if !mockReconstructor.reconstructCalled {
t.Errorf("Expected ReconstructDataColumnSidecars to be called when data is not available")
}
})
t.Run("returns error when availability check returns error", func(t *testing.T) {
// Mock execution reconstructor to track calls
mockReconstructor := &MockExecutionReconstructor{
reconstructCalled: false,
}
db := dbtesting.SetupDB(t)
// Save block to database
require.NoError(t, db.SaveBlock(ctx, signedBlock))
// Mock chain service that returns an error for availability check
mockChain := &MockChainServiceWithAvailability{
ChainService: &blockchaintesting.ChainService{DB: db},
dataAvailable: false, // This should be ignored due to error
availabilityError: errors.New("availability check error"), // Error should cause function to return error
}
s := &Service{
cfg: &config{
chain: mockChain,
beaconDB: db,
executionReconstructor: mockReconstructor,
},
}
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
require.ErrorContains(t, "availability check error", err) // Function should return the availability check error
// Verify that the execution reconstructor was NOT called since function returned early with error
if mockReconstructor.reconstructCalled {
t.Errorf("Expected ReconstructDataColumnSidecars NOT to be called when availability check returns error")
}
})
t.Run("block without KZG commitments does not trigger retry", func(t *testing.T) {
// Create block without KZG commitments
blockNoCommitments := util.NewBeaconBlockDeneb()
blockNoCommitments.Block.Slot = slot
blockNoCommitments.Block.Body.BlobKzgCommitments = [][]byte{} // No commitments
signedBlockNoCommitments, err := blocks.NewSignedBeaconBlock(blockNoCommitments)
require.NoError(t, err)
blockRootNoCommitments, err := signedBlockNoCommitments.Block().HashTreeRoot()
require.NoError(t, err)
mockReconstructor := &MockExecutionReconstructor{
reconstructCalled: false,
}
db := dbtesting.SetupDB(t)
// Save block to database
require.NoError(t, db.SaveBlock(ctx, signedBlockNoCommitments))
mockChain := &blockchaintesting.ChainService{
DB: db, // Set the DB so HasBlock can find the block
}
s := &Service{
cfg: &config{
chain: mockChain,
beaconDB: db,
executionReconstructor: mockReconstructor,
},
}
err = s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRootNoCommitments)
require.NoError(t, err)
// Wait a bit to ensure no goroutine was started
time.Sleep(10 * time.Millisecond)
// Verify that the execution reconstructor was NOT called
if mockReconstructor.reconstructCalled {
t.Errorf("Expected ReconstructDataColumnSidecars NOT to be called for block without commitments")
}
})
}
// MockExecutionReconstructor is a mock implementation for testing
type MockExecutionReconstructor struct {
reconstructCalled bool
reconstructError error
reconstructResult []blocks.VerifiedRODataColumn
}
func (m *MockExecutionReconstructor) ReconstructFullBlock(ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
return nil, nil
}
func (m *MockExecutionReconstructor) ReconstructFullBellatrixBlockBatch(ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
return nil, nil
}
func (m *MockExecutionReconstructor) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
return nil, nil
}
func (m *MockExecutionReconstructor) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
m.reconstructCalled = true
return m.reconstructResult, m.reconstructError
}
// MockChainServiceWithAvailability wraps the testing ChainService to allow configuring IsDataAvailable
type MockChainServiceWithAvailability struct {
*blockchaintesting.ChainService
dataAvailable bool
availabilityError error
}
// IsDataAvailable overrides the default implementation to return configurable values for testing
func (m *MockChainServiceWithAvailability) IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
if m.availabilityError != nil {
return m.availabilityError
}
if !m.dataAvailable {
return blockchain.ErrDataNotAvailable
}
return nil
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -48,7 +49,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
return pubsub.ValidationReject, p2p.ErrInvalidTopic
}
// Decode the message, reject if it fails.
// Decode the message.
m, err := s.decodePubsubMessage(msg)
if err != nil {
log.WithError(err).Error("Failed to decode message")
@@ -68,6 +69,20 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
return pubsub.ValidationReject, errors.Wrap(err, "roDataColumn conversion failure")
}
// Voluntary ignore messages (for debugging purposes).
dataColumnsIgnoreSlotMultiple := features.Get().DataColumnsIgnoreSlotMultiple
blockSlot := uint64(roDataColumn.SignedBlockHeader.Header.Slot)
if dataColumnsIgnoreSlotMultiple != 0 && blockSlot%dataColumnsIgnoreSlotMultiple == 0 {
log.WithFields(logrus.Fields{
"slot": blockSlot,
"columnIndex": roDataColumn.Index,
"blockRoot": fmt.Sprintf("%#x", roDataColumn.BlockRoot()),
}).Warning("Voluntary ignore data column sidecar gossip")
return pubsub.ValidationIgnore, err
}
// Compute a batch of only one data column sidecar.
roDataColumns := []blocks.RODataColumn{roDataColumn}

View File

@@ -17,9 +17,12 @@ var (
// BlobAlignsWithBlock verifies if the blob aligns with the block.
func BlobAlignsWithBlock(blob blocks.ROBlob, block blocks.ROBlock) error {
if block.Version() < version.Deneb {
blockVersion := block.Version()
if blockVersion < version.Deneb || blockVersion >= version.Fulu {
return nil
}
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blob.Slot())
if blob.Index >= uint64(maxBlobsPerBlock) {
return errors.Wrapf(ErrIncorrectBlobIndex, "index %d exceeds MAX_BLOBS_PER_BLOCK %d", blob.Index, maxBlobsPerBlock)

View File

@@ -38,6 +38,7 @@ go_library(
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/logging:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",

View File

@@ -47,6 +47,15 @@ var (
RequireSidecarKzgProofVerified,
}
// ByRootRequestDataColumnSidecarRequirements defines the set of requirements that DataColumnSidecars received
// via the by root request must satisfy in order to upgrade an RODataColumn to a VerifiedRODataColumn.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1
ByRootRequestDataColumnSidecarRequirements = []Requirement{
RequireValidFields,
RequireSidecarInclusionProven,
RequireSidecarKzgProofVerified,
}
errColumnsInvalid = errors.New("data columns failed verification")
errBadTopicLength = errors.New("topic length is invalid")
errBadTopic = errors.New("topic is not of the one expected")
@@ -531,4 +540,4 @@ func inclusionProofKey(c blocks.RODataColumn) ([160]byte, error) {
copy(key[128:], root[:])
return key, nil
}
}

View File

@@ -3,7 +3,24 @@ package verification
import (
"testing"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
)
type (
DataColumnParams struct {
Slot primitives.Slot
ColumnIndex uint64
KzgCommitments [][]byte
DataColumn []byte // A whole data cell will be filled with the content of one item of this slice.
}
DataColumnsParamsByRoot map[[fieldparams.RootLength]byte][]DataColumnParams
)
// FakeVerifyForTest can be used by tests that need a VerifiedROBlob but don't want to do all the
@@ -25,3 +42,85 @@ func FakeVerifySliceForTest(t *testing.T, b []blocks.ROBlob) []blocks.VerifiedRO
}
return vbs
}
// FakeVerifyDataColumnForTest can be used by tests that need a VerifiedRODataColumn but don't want to do all the
// expensive set up to perform full validation.
func FakeVerifyDataColumnForTest(t *testing.T, b blocks.RODataColumn) blocks.VerifiedRODataColumn {
// log so that t is truly required
t.Log("producing fake VerifiedRODataColumn for a test")
return blocks.NewVerifiedRODataColumn(b)
}
// FakeVerifyDataColumnSliceForTest can be used by tests that need a []VerifiedRODataColumn but don't want to do all the
// expensive set up to perform full validation.
func FakeVerifyDataColumnSliceForTest(t *testing.T, dcs []blocks.RODataColumn) []blocks.VerifiedRODataColumn {
// Log so that `t`` is truly required.
t.Log("producing fake []VerifiedRODataColumn for a test")
vcs := make([]blocks.VerifiedRODataColumn, 0, len(dcs))
for _, dc := range dcs {
vcs = append(vcs, blocks.NewVerifiedRODataColumn(dc))
}
return vcs
}
func CreateTestVerifiedRoDataColumnSidecars(t *testing.T, dataColumnParamsByBlockRoot DataColumnsParamsByRoot) ([]blocks.RODataColumn, []blocks.VerifiedRODataColumn) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
count := 0
for _, indices := range dataColumnParamsByBlockRoot {
count += len(indices)
}
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, count)
rodataColumnSidecars := make([]blocks.RODataColumn, 0, count)
for blockRoot, params := range dataColumnParamsByBlockRoot {
for _, param := range params {
dataColumn := make([][]byte, 0, len(param.DataColumn))
for _, value := range param.DataColumn {
cell := make([]byte, ckzg4844.BytesPerCell)
for i := range ckzg4844.BytesPerCell {
cell[i] = value
}
dataColumn = append(dataColumn, cell)
}
kzgCommitmentsInclusionProof := make([][]byte, 4)
for i := range kzgCommitmentsInclusionProof {
kzgCommitmentsInclusionProof[i] = make([]byte, 32)
}
dataColumnSidecar := &ethpb.DataColumnSidecar{
Index: param.ColumnIndex,
KzgCommitments: param.KzgCommitments,
Column: dataColumn,
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
SignedBlockHeader: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: param.Slot,
ParentRoot: make([]byte, fieldparams.RootLength),
StateRoot: make([]byte, fieldparams.RootLength),
BodyRoot: make([]byte, fieldparams.RootLength),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
}
roDataColumnSidecar, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
if err != nil {
t.Fatal(err)
}
rodataColumnSidecars = append(rodataColumnSidecars, roDataColumnSidecar)
verifiedRoDataColumnSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumnSidecar)
}
}
return rodataColumnSidecars, verifiedRoDataColumnSidecars
}

View File

@@ -0,0 +1,2 @@
### Added
- Data columns syncing for Fusaka.

View File

@@ -2,4 +2,4 @@
- **Gzip Compression for Beacon API:**
Fixed an issue where the beacon chain server ignored the `Accept-Encoding: gzip` header and returned uncompressed JSON responses. With this change, endpoints that use the `AcceptHeaderHandler` now also compress responses when a client requests gzip encoding.
Fixes [#14593](https://github.com/prysmaticlabs/prysm/issues/14593).
Fixes [#14593](https://github.com/prysmaticlabs/prysm/issues/14593).

View File

@@ -216,6 +216,7 @@ var (
DataColumnBatchLimit = &cli.IntFlag{
Name: "data-column-batch-limit",
Usage: "The amount of data columns the local peer is bounded to request and respond to in a batch.",
// TODO: determine a good default value for this flag.
Value: 4096,
}
// DataColumnBatchLimitBurstFactor specifies the factor by which data column batch size may increase.

View File

@@ -42,7 +42,7 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
cfg := &GlobalFlags{}
if ctx.Bool(SubscribeToAllSubnets.Name) {
log.Warn("Subscribing to All Attestation Subnets")
log.Warning("Subscribing to all attestation subnets")
cfg.SubscribeToAllSubnets = true
}

View File

@@ -59,10 +59,13 @@ var appFlags = []cli.Flag{
flags.BlockBatchLimitBurstFactor,
flags.BlobBatchLimit,
flags.BlobBatchLimitBurstFactor,
flags.DataColumnBatchLimit,
flags.DataColumnBatchLimitBurstFactor,
flags.InteropMockEth1DataVotesFlag,
flags.SlotsPerArchivedPoint,
flags.DisableDebugRPCEndpoints,
flags.SubscribeToAllSubnets,
flags.SubscribeAllDataSubnets,
flags.HistoricalSlasherNode,
flags.ChainID,
flags.NetworkID,
@@ -84,6 +87,7 @@ var appFlags = []cli.Flag{
flags.BeaconDBPruning,
flags.PrunerRetentionEpochs,
flags.EnableBuilderSSZ,
flags.SubscribeAllDataSubnets,
cmd.MinimalConfigFlag,
cmd.E2EConfigFlag,
cmd.RPCMaxPageSizeFlag,
@@ -143,6 +147,7 @@ var appFlags = []cli.Flag{
storage.BlobStoragePathFlag,
storage.BlobRetentionEpochFlag,
storage.BlobStorageLayout,
storage.DataColumnStoragePathFlag,
bflags.EnableExperimentalBackfill,
bflags.BackfillBatchSize,
bflags.BackfillWorkerCount,

View File

@@ -61,3 +61,12 @@ func TestConfigureBlobRetentionEpoch(t *testing.T) {
_, err = blobRetentionEpoch(cliCtx)
require.ErrorIs(t, err, errInvalidBlobRetentionEpochs)
}
func TestDataColumnStoragePath_FlagSpecified(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
set.String(DataColumnStoragePathFlag.Name, "/blah/blah", DataColumnStoragePathFlag.Usage)
cliCtx := cli.NewContext(&app, set, nil)
storagePath := dataColumnStoragePath(cliCtx)
assert.Equal(t, "/blah/blah", storagePath)
}

View File

@@ -98,12 +98,15 @@ var appHelpFlagGroups = []flagGroup{
cmd.StaticPeers,
flags.BlobBatchLimit,
flags.BlobBatchLimitBurstFactor,
flags.DataColumnBatchLimit,
flags.DataColumnBatchLimitBurstFactor,
flags.BlockBatchLimit,
flags.BlockBatchLimitBurstFactor,
flags.MaxConcurrentDials,
flags.MinPeersPerSubnet,
flags.MinSyncPeers,
flags.SubscribeToAllSubnets,
flags.SubscribeAllDataSubnets,
},
},
{ // Flags relevant to storing data on disk and configuring the beacon chain database.
@@ -124,6 +127,7 @@ var appHelpFlagGroups = []flagGroup{
storage.BlobRetentionEpochFlag,
storage.BlobStorageLayout,
storage.BlobStoragePathFlag,
storage.DataColumnStoragePathFlag,
},
},
{ // Flags relevant to configuring local block production or external builders such as mev-boost.

View File

@@ -85,6 +85,12 @@ type Flags struct {
// changed on disk. This feature is for advanced use cases only.
KeystoreImportDebounceInterval time.Duration
// DataColumnsWithholdCount specifies the number of data columns that should be withheld when proposing a block.
DataColumnsWithholdCount uint64
// DataColumnsIgnoreSlotMultiple specifies the multiple of slot number where data columns should be ignored.
DataColumnsIgnoreSlotMultiple uint64
// AggregateIntervals specifies the time durations at which we aggregate attestations preparing for forkchoice.
AggregateIntervals [3]time.Duration
@@ -280,6 +286,16 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
cfg.BlacklistedRoots = parseBlacklistedRoots(ctx.StringSlice(blacklistRoots.Name))
}
if ctx.IsSet(DataColumnsWithholdCount.Name) {
logEnabled(DataColumnsWithholdCount)
cfg.DataColumnsWithholdCount = ctx.Uint64(DataColumnsWithholdCount.Name)
}
if ctx.IsSet(DataColumnsIgnoreSlotMultiple.Name) {
logEnabled(DataColumnsIgnoreSlotMultiple)
cfg.DataColumnsIgnoreSlotMultiple = ctx.Uint64(DataColumnsIgnoreSlotMultiple.Name)
}
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
Init(cfg)
return nil

View File

@@ -172,6 +172,20 @@ var (
Name: "enable-experimental-attestation-pool",
Usage: "Enables an experimental attestation pool design.",
}
// DataColumnsWithholdCount is a flag for withholding data columns when proposing a block.
DataColumnsWithholdCount = &cli.Uint64Flag{
Name: "data-columns-withhold-count",
Usage: "Number of columns to withhold when proposing a block. DO NOT USE IN PRODUCTION.",
Value: 0,
Hidden: true,
}
// DataColumnsWithholdCount is a flag for withholding data columns when proposing a block.
DataColumnsIgnoreSlotMultiple = &cli.Uint64Flag{
Name: "data-columns-ignore-slot-multiple",
Usage: "Ignore all data columns for slots that are a multiple of this value. DO NOT USE IN PRODUCTION.",
Value: 0,
Hidden: true,
}
// forceHeadFlag is a flag to force the head of the beacon chain to a specific block.
forceHeadFlag = &cli.StringFlag{
Name: "sync-from",
@@ -255,6 +269,8 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
DisableQUIC,
EnableDiscoveryReboot,
enableExperimentalAttestationPool,
DataColumnsWithholdCount,
DataColumnsIgnoreSlotMultiple,
forceHeadFlag,
blacklistRoots,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)

View File

@@ -46,6 +46,9 @@ const (
MaxRandomValueElectra = uint64(1<<16 - 1) // MaxRandomValueElectra defines max for a random value using for proposer and sync committee sampling.
// Introduced in Fulu network upgrade.
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
FieldElementsPerCell = 64 // FieldElementsPerCell refers to the number of field elements in a cell.
BytesPerFieldElement = 32 // BytesPerFieldElement refers to the number of bytes in a field element.
BytesPerCells = FieldElementsPerCell * BytesPerFieldElement // BytesPerCells refers to the number of bytes in a cell.
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
)

View File

@@ -46,6 +46,9 @@ const (
MaxRandomValueElectra = uint64(1<<16 - 1) // Maximum value for a random value using for proposer and sync committee sampling.
// Introduced in Fulu network upgrade.
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
FieldElementsPerCell = 64 // FieldElementsPerCell refers to the number of field elements in a cell.
BytesPerFieldElement = 32 // BytesPerFieldElement refers to the number of bytes in a field element.
BytesPerCells = FieldElementsPerCell * BytesPerFieldElement // BytesPerCells refers to the number of bytes in a cell.
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
)

View File

@@ -13,17 +13,22 @@ const (
func SetupTestConfigCleanup(t testing.TB) {
prevDefaultBeaconConfig := mainnetBeaconConfig.Copy()
temp := configs.getActive().Copy()
undo, err := SetActiveWithUndo(temp)
if err != nil {
t.Fatal(err)
}
prevNetworkCfg := networkConfig.Copy()
t.Cleanup(func() {
mainnetBeaconConfig = prevDefaultBeaconConfig
err = undo()
if err != nil {
t.Fatal(err)
}
networkConfig = prevNetworkCfg
})
}

View File

@@ -13,7 +13,6 @@ go_library(
"roblob.go",
"roblock.go",
"rodatacolumn.go",
"rosidecar.go",
"setters.go",
"types.go",
],
@@ -54,7 +53,6 @@ go_test(
"roblob_test.go",
"roblock_test.go",
"rodatacolumn_test.go",
"rosidecar_test.go",
],
embed = [":go_default_library"],
deps = [
@@ -74,6 +72,5 @@ go_test(
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
],
)

View File

@@ -1,6 +1,8 @@
package blocks
import (
"fmt"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
@@ -398,7 +400,7 @@ func (b *BeaconBlock) Proto() (proto.Message, error) { // nolint:gocognit
Body: body,
}, nil
default:
return nil, errors.New("unsupported beacon block version")
return nil, fmt.Errorf("unsupported beacon block version: %s", version.String(b.version))
}
}

View File

@@ -96,16 +96,17 @@ func (s ROBlockSlice) Len() int {
return len(s)
}
// BlockWithROBlobs is a wrapper that collects the block and blob values together.
// BlockWithROSidecars is a wrapper that collects the block and blob values together.
// This is helpful because these values are collated from separate RPC requests.
type BlockWithROBlobs struct {
Block ROBlock
Blobs []ROBlob
type BlockWithROSidecars struct {
Block ROBlock
Blobs []ROBlob
Columns []VerifiedRODataColumn
}
// BlockWithROBlobsSlice gives convenient access to getting a slice of just the ROBlocks,
// and defines sorting helpers.
type BlockWithROBlobsSlice []BlockWithROBlobs
type BlockWithROBlobsSlice []BlockWithROSidecars
func (s BlockWithROBlobsSlice) ROBlocks() []ROBlock {
r := make([]ROBlock, len(s))

View File

@@ -66,16 +66,16 @@ func (dc *RODataColumn) Slot() primitives.Slot {
return dc.SignedBlockHeader.Header.Slot
}
// ParentRoot returns the parent root of the data column sidecar.
func (dc *RODataColumn) ParentRoot() [fieldparams.RootLength]byte {
return bytesutil.ToBytes32(dc.SignedBlockHeader.Header.ParentRoot)
}
// ProposerIndex returns the proposer index of the data column sidecar.
func (dc *RODataColumn) ProposerIndex() primitives.ValidatorIndex {
return dc.SignedBlockHeader.Header.ProposerIndex
}
// ParentRoot returns the parent root of the data column sidecar.
func (dc *RODataColumn) ParentRoot() [fieldparams.RootLength]byte {
return bytesutil.ToBytes32(dc.SignedBlockHeader.Header.ParentRoot)
}
// VerifiedRODataColumn represents an RODataColumn that has undergone full verification (eg block sig, inclusion proof, commitment check).
type VerifiedRODataColumn struct {
RODataColumn
@@ -84,4 +84,4 @@ type VerifiedRODataColumn struct {
// NewVerifiedRODataColumn "upgrades" an RODataColumn to a VerifiedRODataColumn. This method should only be used by the verification package.
func NewVerifiedRODataColumn(roDataColumn RODataColumn) VerifiedRODataColumn {
return VerifiedRODataColumn{RODataColumn: roDataColumn}
}
}

View File

@@ -1,96 +0,0 @@
package blocks
import (
"github.com/pkg/errors"
)
// ROSidecar represents a read-only sidecar with its block root.
type ROSidecar struct {
blob *ROBlob
dataColumn *RODataColumn
}
var (
errBlobNeeded = errors.New("blob sidecar needed")
errDataColumnNeeded = errors.New("data column sidecar needed")
)
// NewSidecarFromBlobSidecar creates a new read-only (generic) sidecar from a read-only blob sidecar.
func NewSidecarFromBlobSidecar(blob ROBlob) ROSidecar {
return ROSidecar{blob: &blob}
}
// NewSidecarFromDataColumnSidecar creates a new read-only (generic) sidecar from a read-only data column sidecar.
func NewSidecarFromDataColumnSidecar(dataColumn RODataColumn) ROSidecar {
return ROSidecar{dataColumn: &dataColumn}
}
// NewSidecarsFromBlobSidecars creates a new slice of read-only (generic) sidecars from a slice of read-only blobs sidecars.
func NewSidecarsFromBlobSidecars(blobSidecars []ROBlob) []ROSidecar {
sidecars := make([]ROSidecar, 0, len(blobSidecars))
for _, blob := range blobSidecars {
blobSidecar := ROSidecar{blob: &blob} // #nosec G601
sidecars = append(sidecars, blobSidecar)
}
return sidecars
}
// NewSidecarsFromDataColumnSidecars creates a new slice of read-only (generic) sidecars from a slice of read-only data column sidecars.
func NewSidecarsFromDataColumnSidecars(dataColumnSidecars []RODataColumn) []ROSidecar {
sidecars := make([]ROSidecar, 0, len(dataColumnSidecars))
for _, dataColumn := range dataColumnSidecars {
dataColumnSidecar := ROSidecar{dataColumn: &dataColumn} // #nosec G601
sidecars = append(sidecars, dataColumnSidecar)
}
return sidecars
}
// Blob returns the blob sidecar.
func (sc *ROSidecar) Blob() (ROBlob, error) {
if sc.blob == nil {
return ROBlob{}, errBlobNeeded
}
return *sc.blob, nil
}
// DataColumn returns the data column sidecar.
func (sc *ROSidecar) DataColumn() (RODataColumn, error) {
if sc.dataColumn == nil {
return RODataColumn{}, errDataColumnNeeded
}
return *sc.dataColumn, nil
}
// BlobSidecarsFromSidecars creates a new slice of read-only blobs sidecars from a slice of read-only (generic) sidecars.
func BlobSidecarsFromSidecars(sidecars []ROSidecar) ([]ROBlob, error) {
blobSidecars := make([]ROBlob, 0, len(sidecars))
for _, sidecar := range sidecars {
blob, err := sidecar.Blob()
if err != nil {
return nil, errors.Wrap(err, "blob")
}
blobSidecars = append(blobSidecars, blob)
}
return blobSidecars, nil
}
// DataColumnSidecarsFromSidecars creates a new slice of read-only data column sidecars from a slice of read-only (generic) sidecars.
func DataColumnSidecarsFromSidecars(sidecars []ROSidecar) ([]RODataColumn, error) {
dataColumnSidecars := make([]RODataColumn, 0, len(sidecars))
for _, sidecar := range sidecars {
dataColumn, err := sidecar.DataColumn()
if err != nil {
return nil, errors.Wrap(err, "data column")
}
dataColumnSidecars = append(dataColumnSidecars, dataColumn)
}
return dataColumnSidecars, nil
}

View File

@@ -1,109 +0,0 @@
package blocks
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestNewSidecarFromBlobSidecar(t *testing.T) {
blob := ROBlob{}
sidecar := NewSidecarFromBlobSidecar(blob)
// Check that the blob is set
retrievedBlob, err := sidecar.Blob()
require.NoError(t, err)
require.Equal(t, blob, retrievedBlob)
// Check that data column is not set
_, err = sidecar.DataColumn()
require.ErrorIs(t, err, errDataColumnNeeded)
}
func TestNewSidecarFromDataColumnSidecar(t *testing.T) {
dataColumn := RODataColumn{}
sidecar := NewSidecarFromDataColumnSidecar(dataColumn)
// Check that the data column is set
retrievedDataColumn, err := sidecar.DataColumn()
require.NoError(t, err)
require.Equal(t, dataColumn, retrievedDataColumn)
// Check that blob is not set
_, err = sidecar.Blob()
require.ErrorIs(t, err, errBlobNeeded)
}
func TestNewSidecarsFromBlobSidecars(t *testing.T) {
blobSidecars := []ROBlob{{}, {}}
sidecars := NewSidecarsFromBlobSidecars(blobSidecars)
require.Equal(t, len(blobSidecars), len(sidecars))
for i, sidecar := range sidecars {
retrievedBlob, err := sidecar.Blob()
require.NoError(t, err)
require.Equal(t, blobSidecars[i], retrievedBlob)
}
}
func TestNewSidecarsFromDataColumnSidecars(t *testing.T) {
dataColumnSidecars := []RODataColumn{{}, {}}
sidecars := NewSidecarsFromDataColumnSidecars(dataColumnSidecars)
require.Equal(t, len(dataColumnSidecars), len(sidecars))
for i, sidecar := range sidecars {
retrievedDataColumn, err := sidecar.DataColumn()
require.NoError(t, err)
require.Equal(t, dataColumnSidecars[i], retrievedDataColumn)
}
}
func TestBlobSidecarsFromSidecars(t *testing.T) {
// Create sidecars with blobs
blobSidecars := []ROBlob{{}, {}}
sidecars := NewSidecarsFromBlobSidecars(blobSidecars)
// Convert back to blob sidecars
retrievedBlobSidecars, err := BlobSidecarsFromSidecars(sidecars)
require.NoError(t, err)
require.Equal(t, len(blobSidecars), len(retrievedBlobSidecars))
for i, blob := range retrievedBlobSidecars {
require.Equal(t, blobSidecars[i], blob)
}
// Test with a mix of sidecar types
mixedSidecars := []ROSidecar{
NewSidecarFromBlobSidecar(ROBlob{}),
NewSidecarFromDataColumnSidecar(RODataColumn{}),
}
_, err = BlobSidecarsFromSidecars(mixedSidecars)
require.Error(t, err)
}
func TestDataColumnSidecarsFromSidecars(t *testing.T) {
// Create sidecars with data columns
dataColumnSidecars := []RODataColumn{{}, {}}
sidecars := NewSidecarsFromDataColumnSidecars(dataColumnSidecars)
// Convert back to data column sidecars
retrievedDataColumnSidecars, err := DataColumnSidecarsFromSidecars(sidecars)
require.NoError(t, err)
require.Equal(t, len(dataColumnSidecars), len(retrievedDataColumnSidecars))
for i, dataColumn := range retrievedDataColumnSidecars {
require.Equal(t, dataColumnSidecars[i], dataColumn)
}
// Test with a mix of sidecar types
mixedSidecars := []ROSidecar{
NewSidecarFromDataColumnSidecar(RODataColumn{}),
NewSidecarFromBlobSidecar(ROBlob{}),
}
_, err = DataColumnSidecarsFromSidecars(mixedSidecars)
require.Error(t, err)
}

Some files were not shown because too many files have changed in this diff Show More