Compare commits

...

219 Commits

Author SHA1 Message Date
Kasey Kirkham
58006e2920 gaz 2026-04-17 10:09:19 -05:00
Kasey Kirkham
8daf06a89e add missing test file 2026-04-16 18:14:28 -05:00
Kasey Kirkham
0f9b44731d Avoid using nil context as a sentinel value (linter says no) 2026-04-16 18:14:28 -05:00
Kasey Kirkham
fb70f08b7b changelog 2026-04-16 18:14:28 -05:00
Kasey Kirkham
a6b76386f6 readability tweak 2026-04-16 18:14:28 -05:00
Kasey Kirkham
0bf098af33 Cancellable event loop plumbing
Force methods interacting with the loop to safely cooperate by using buffered channels
and doing appropriate context checks everywhere. Encapulate all the interaction points
behind helper methods to simplify calling code.
2026-04-16 18:14:28 -05:00
Kasey Kirkham
e004b578db gaz 2026-04-16 18:13:47 -05:00
Kasey Kirkham
897d01d498 changelog 2026-04-13 14:42:41 -05:00
Kasey Kirkham
c159b6f684 simplify wg.Wait handling 2026-04-13 14:34:44 -05:00
Kasey Kirkham
01066cbadf track elapsed time of failed batches as well 2026-04-13 14:34:35 -05:00
Kasey Kirkham
82a5ff88c6 move some info logs to debug 2026-04-13 14:34:27 -05:00
Kasey Kirkham
154eb5ac9f remove todos 2026-04-13 14:34:19 -05:00
Kasey Kirkham
ea10e60ec1 typo fixes 2026-04-13 14:34:15 -05:00
Kasey Kirkham
70b9495bcc Use values from params at broadcaster creation time not package init time
Setting these package-level variables when the package
is initialized by the go runtime will copy values from the
params structure before it has been initialized in node setup.
2026-04-08 16:55:57 -05:00
Kasey Kirkham
aef004633a modify CellsToVerifyFromPartialMessage to not mutate the incoming message 2026-04-08 16:55:30 -05:00
Kasey Kirkham
572a283560 fix bug where log entry was dropped after setting attrs 2026-04-08 16:54:38 -05:00
Kasey Kirkham
591af3aecb update go version component of import path to match develop 2026-04-08 16:53:08 -05:00
Kasey Kirkham
1f13aae729 add failing tests to demonstrate bugs found in review 2026-04-08 16:12:54 -05:00
Aarsh Shah
e8480a86af only send header once per peer per group and fix tests for a green race detector 2026-03-31 20:01:49 +05:30
Aarsh Shah
616617e300 changes as per review 2026-03-31 09:38:54 +05:30
Aarsh Shah
28acdfef9f dont construct partial columns if partial is disabled and fix logging 2026-03-26 17:57:54 +05:30
Aarsh Shah
0678ad3e27 fix log 2026-03-26 17:12:24 +05:30
Aarsh Shah
96b6512ebb clean up logs 2026-03-26 17:10:24 +05:30
Aarsh Shah
f853cabe0f include cells and proofs 2026-03-24 12:01:52 +05:30
Aarsh Shah
aa8b95c33c eager push all cells for block proposer 2026-03-23 20:01:30 +05:30
Aarsh Shah
f9e1ddcd0c de-dup find peers for full and partial columns 2026-03-19 18:42:53 +05:30
Aarsh Shah
476d5b0399 pass root to partial data column construction 2026-03-19 15:42:00 +05:30
Aarsh Shah
d07a0b44be changes as per code review 2026-03-19 15:04:28 +05:30
Aarsh Shah
c6442dd070 reject if groupID is invalid 2026-03-19 15:02:04 +05:30
Aarsh Shah
7932a88f33 type for return value of ComputeCellsAndProofsFromStructured 2026-03-19 12:50:34 +05:30
Aarsh Shah
bb08e7e108 revert changes to verifyKzgCommitmentsInclusionProof 2026-03-19 12:36:31 +05:30
Aarsh Shah
fb991b6ac6 remove ExtendFromVerifiedCells method 2026-03-19 11:58:38 +05:30
Aarsh Shah
c781bdfb85 rename p2p_pubsub_rpc_recv_pub_size_total and p2p_pubsub_rpc_drop_pub_size_total 2026-03-19 11:49:54 +05:30
Aarsh Shah
ab207b5086 refactor extractColumnIndexFromTopic 2026-03-19 11:43:17 +05:30
Aarsh Shah
7ca578c0cb remove TODO 2026-03-19 11:41:06 +05:30
Aarsh Shah
cbdd2e3076 remove whitespace bloating 2026-03-19 11:35:35 +05:30
Aarsh Shah
78115c7bcc remove TODO 2026-03-19 11:31:36 +05:30
Aarsh Shah
57c55055d7 start method for broadcaster 2026-03-19 11:30:01 +05:30
Aarsh Shah
5ccd111168 refactor handle incoming rpc 2026-03-18 15:21:21 +05:30
Aarsh Shah
f5e8119a39 add logging helpers 2026-03-18 13:28:58 +05:30
Aarsh Shah
750f54e70b rename to ErrSidecarParentUnknown 2026-03-18 10:20:30 +05:30
Aarsh Shah
3a081cc362 reverse type cast conversion 2026-03-18 10:17:38 +05:30
Aarsh Shah
64f4c524bf remove typecast that is not needed 2026-03-17 19:38:38 +05:30
Aarsh Shah
7408a4e26e improve cells to send for peer implementation 2026-03-17 19:33:06 +05:30
Aarsh Shah
e04b149bf0 make callbacks methods on the broadcaster 2026-03-17 19:24:28 +05:30
Aarsh Shah
69b02d7352 decode parts metadata correctly 2026-03-17 19:09:44 +05:30
Aarsh Shah
8fd850d68c Revert "change parse to decode"
This reverts commit 3fe9ce5e50.
2026-03-17 18:59:00 +05:30
Aarsh Shah
3fe9ce5e50 change parse to decode 2026-03-17 18:42:21 +05:30
Aarsh Shah
4d106477b8 use interface for broadcaster helpers 2026-03-17 18:07:13 +05:30
Aarsh Shah
00dae3670d make column handling methods an interface 2026-03-17 18:02:21 +05:30
Aarsh Shah
62b6439d37 release semaphore immediately 2026-03-17 17:28:38 +05:30
Aarsh Shah
58834e4ba6 use config var for DataColumnSidecarSubnetCount 2026-03-17 17:27:30 +05:30
Aarsh Shah
9240158d7e some fixes 2026-03-16 17:37:15 +05:30
Marco Munizaga
2f587d713a Update partial message usage 2026-03-12 16:01:43 -07:00
Marco Munizaga
06fbfa5ab8 Update gossipsub dep 2026-03-12 15:11:34 -07:00
Aarsh Shah
2a2d013c9b fix handling of batch 2026-03-12 13:30:42 +05:30
Aarsh Shah
645ff9c143 batch verify kzg cells for partial columns 2026-03-12 13:09:08 +05:30
Aarsh Shah
4b3871b98a improve broadcasting of partial columns 2026-03-12 11:11:38 +05:30
Aarsh Shah
3b06bd7605 better error handling 2026-03-11 16:34:29 +05:30
Aarsh Shah
90630ae336 return if broadcaster is stopped 2026-03-11 16:20:42 +05:30
Aarsh Shah
23921f9a2e last set of improvements 2026-03-11 16:11:54 +05:30
Aarsh Shah
f851d381cb republish for data column sidecars and partial columns as well 2026-03-11 14:13:49 +05:30
Aarsh Shah
c406c5c030 fix conflicts 2026-03-11 12:18:23 +05:30
Kasey Kirkham
5995381adf squash me - logger plumbing fix 2026-03-10 00:13:19 -05:00
Kasey Kirkham
f429aa6c81 lint 2026-03-09 22:56:16 -05:00
Aarsh Shah
8e9c476aa8 (16465) unify full/partial validation pipelines 2026-03-09 22:45:31 -05:00
Aarsh Shah
462ef6265a (16433) test partial column validation, broadcast 2026-03-09 22:45:22 -05:00
Aarsh Shah
a598c26443 (16364) ssz partial column metadata encoding 2026-03-09 22:45:10 -05:00
Aarsh Shah
a122010a2d (16324) process eager partial data column headers 2026-03-09 22:44:51 -05:00
Marco Munizaga
827e9d7708 Partial columns addendum (#16327)
see commits for details. best reviewed commit by commit.
2026-03-09 21:37:35 -05:00
Aarsh Shah
5136619cf7 fix partial columns broadcast (#16321)
Fix the data column broadcast logic for partial and full columns .
2026-03-09 21:37:16 -05:00
Aarsh Shah
db4c5a2793 fix lint 2026-03-09 21:35:58 -05:00
Aarsh Shah
0bd1d4ae84 fix bazel 2026-03-09 21:35:47 -05:00
Aarsh Shah
73ca00c26f fix log files CI error 2026-03-09 21:35:39 -05:00
Aarsh Shah
1390a177ed fix CI 2026-03-09 21:35:32 -05:00
Aarsh Shah
b4244b735f fix CI 2026-03-09 21:35:25 -05:00
Aarsh Shah
07f42d4cbf fix lint 2026-03-09 21:35:12 -05:00
Aarsh Shah
c2c1f0ab19 fix CI 2026-03-09 21:35:06 -05:00
Aarsh Shah
77610ee212 go mod tidy and partial columns 2026-03-09 21:34:58 -05:00
Marco Munizaga
45f97a274c add todos from call
this came from a call with Aarsh and Kasey
2026-03-09 21:34:28 -05:00
Marco Munizaga
75d7074548 Implement gossipsub_mesh_peers metric 2026-03-09 21:34:20 -05:00
Marco Munizaga
df3a214809 Track number of peers in mesh
along with tracking which peers request partial messages
2026-03-09 21:34:07 -05:00
Marco Munizaga
69561c7d43 update go-libp2p-pubsub with new tracer
the new tracer interface provides the peer ID in tracer.RecvRPC.
2026-03-09 21:33:56 -05:00
Marco Munizaga
7659ed2bb2 add todo 2026-03-09 21:33:50 -05:00
Marco Munizaga
66a967e9ba return valid if there are no datacolumns to validate 2026-03-09 21:33:39 -05:00
Marco Munizaga
12f67cb2c5 cache partial data column header by group ID 2026-03-09 21:33:27 -05:00
Marco Munizaga
7aaf47a774 Add partial message metrics 2026-03-09 21:33:17 -05:00
Marco Munizaga
0bfb19e0ea more context around errors 2026-03-09 21:33:09 -05:00
Marco Munizaga
a930ec21ad fix test typo 2026-03-09 21:32:37 -05:00
Marco Munizaga
583deedfbd eagerly push the partial data column header 2026-03-09 21:32:22 -05:00
Marco Munizaga
419aafd7b3 Include the version byte in the group ID 2026-03-09 21:31:43 -05:00
Marco Munizaga
1e485d9e5e add partial data column header 2026-03-09 21:31:31 -05:00
Marco Munizaga
0bba8cac47 Update go-libp2p-pubsub 2026-03-09 21:31:24 -05:00
Marco Munizaga
fb0dd6f927 add todo 2026-03-09 21:31:15 -05:00
Marco Munizaga
3620fe9b7c add partial-data-columns flag 2026-03-09 21:31:06 -05:00
Marco Munizaga
bc6db507e2 beacon-chain/sync: subscribe to partial columns 2026-03-09 21:30:42 -05:00
Marco Munizaga
7d29866789 publish partial columns when proposing a block 2026-03-09 21:30:19 -05:00
Marco Munizaga
40190ffe69 beacon-chain/execution: return partial columns and use getBlobsV3
... if available
2026-03-09 21:27:17 -05:00
Marco Munizaga
16ccdfaa69 core/peerdas: Add PartialColumns helper 2026-03-09 21:25:22 -05:00
Marco Munizaga
d3c0d456b0 core/peerdas: support partial responses 2026-03-09 21:25:08 -05:00
Marco Munizaga
7650dff057 beacon-chain/p2p: own and start PartialColumnBroadcaster 2026-03-09 21:24:58 -05:00
Marco Munizaga
a555eb0245 Add metrics for gossipsub message sizes 2026-03-09 21:24:38 -05:00
Marco Munizaga
67b1e21ffc Implement PartialColumnBroadcaster 2026-03-09 21:24:23 -05:00
Marco Munizaga
aa72fb21bb refactor DataColumn Cell KZG Proof verification 2026-03-09 21:24:12 -05:00
Marco Munizaga
26766b90c3 avoid needless copy 2026-03-09 21:24:04 -05:00
Marco Munizaga
864e97ca30 Add PartialDataColumn type 2026-03-09 21:23:56 -05:00
Marco Munizaga
dadf5fe459 proto: Add PartialDataColumnSidecar 2026-03-09 21:23:42 -05:00
Marco Munizaga
8a5fbf2e90 clone slice in testing util 2026-03-09 21:23:30 -05:00
Marco Munizaga
d624f89ad1 logrusadapter for slog 2026-03-09 21:23:20 -05:00
Marco Munizaga
e751d0bc2b fix multiaddr comparison 2026-03-09 21:23:13 -05:00
Marco Munizaga
f90d31eeac deps: update libp2p deps
for partial message support and simnet support
2026-03-09 21:22:58 -05:00
Aarsh Shah
efe2bce182 check for stop in publish 2026-03-09 14:40:36 +04:00
Aarsh Shah
cdd169197a pick up marco's fixes 2026-03-09 12:28:44 +04:00
Aarsh Shah
609d382cbf remove getBlobsCalled param 2026-03-09 11:53:35 +04:00
Marco Munizaga
01ec77c3c5 don't gitignore the execution directory 2026-03-09 11:28:15 +04:00
Aarsh Shah
361c423d78 revert removal of logrus adaptor 2026-03-09 11:21:57 +04:00
Aarsh Shah
9199377539 fix bazel build 2026-03-05 15:51:45 +04:00
Aarsh Shah
707b6dad06 add docs 2026-03-05 15:48:11 +04:00
Aarsh Shah
4ad1f87429 changes based on self review 2026-03-05 15:31:19 +04:00
Aarsh Shah
04340407a5 Merge remote-tracking branch 'origin/tests/tests-for-partial-broadcaster' into fix/unify-validation-pipeline 2026-03-04 18:22:49 +04:00
Aarsh Shah
41056b0828 Merge remote-tracking branch 'origin/feat/remove-logrus-changes' into tests/tests-for-partial-broadcaster 2026-03-04 18:21:40 +04:00
Aarsh Shah
b1cbb5474b Merge remote-tracking branch 'origin/feat/update-gossipsub-update-new-partial-API' into feat/remove-logrus-changes 2026-03-04 18:20:59 +04:00
Aarsh Shah
bde83a2f3f more logging and bazel fixes 2026-03-04 17:58:49 +04:00
Aarsh Shah
eadfb59015 fixes and tests 2026-03-04 17:08:22 +04:00
Aarsh Shah
43cf25660e bazel fixes and validation pipeline 2026-03-04 13:29:59 +04:00
Aarsh Shah
ecd3a910de fix linting 2026-02-27 11:42:26 +04:00
Aarsh Shah
1f5f3c879a finish tests 2026-02-27 11:33:02 +04:00
Aarsh Shah
7d1a8892dd tests for handling validated cells 2026-02-26 16:30:35 +04:00
Aarsh Shah
c1275ac3de unit tests for the broadcaster 2026-02-26 16:05:24 +04:00
Aarsh Shah
c15ffb4469 Rename metrics for partial cells to confirm with ETH beacon metrics (#16369)
This PR renames some metrics for partial cells to confirm with the ETH
beacon metrics at https://github.com/ethereum/beacon-metrics/pull/21.
2026-02-23 15:00:21 +04:00
Aarsh Shah
cdfc2ba3cd update correctly 2026-02-23 09:38:48 +04:00
Aarsh Shah
1cd3418dd7 send parts metadata along with header 2026-02-23 09:34:49 +04:00
Aarsh Shah
8b295c6208 Update beacon-chain/p2p/partialdatacolumnbroadcaster/partial.go
Co-authored-by: Marco Munizaga <git@marcopolo.io>
2026-02-23 09:11:15 +04:00
Aarsh Shah
09c123294f remove logrus adaptor from the partial cells PR 2026-02-20 11:31:04 +04:00
Aarsh Shah
6710215816 Merge branch 'feat/process-eager-partial-header' into feat/update-gossipsub-update-new-partial-API 2026-02-20 10:37:24 +04:00
Aarsh Shah
b8e0f8bf79 drop header if no semaphore bound is available to handle it 2026-02-20 10:30:42 +04:00
Aarsh Shah
7648f2855d send parts metadata correctly 2026-02-19 20:06:58 +04:00
Aarsh Shah
92c24014a2 don't unmarshal message twice 2026-02-19 18:09:42 +04:00
Aarsh Shah
eda07c0198 changes as per review 2026-02-19 17:53:33 +04:00
Aarsh Shah
97a5b50c0b process incoming RPC correctly 2026-02-19 14:48:29 +04:00
Aarsh Shah
76a68759c5 update recieved state on eager push for idempotency 2026-02-19 12:15:32 +04:00
Aarsh Shah
88834f044f remove excessive checks 2026-02-19 11:48:22 +04:00
Aarsh Shah
115a91d59b let caller format the error for the statekind 2026-02-19 11:37:22 +04:00
Aarsh Shah
fc3d41f90b Apply suggestions from code review
Co-authored-by: Marco Munizaga <git@marcopolo.io>
2026-02-19 11:30:05 +04:00
Aarsh Shah
d2ae2a4fa4 default to nothing available and nothing requested if peer state is empty 2026-02-19 11:29:15 +04:00
Aarsh Shah
7719686a41 root validation changes 2026-02-19 10:41:03 +04:00
Aarsh Shah
57a295f1a5 Avoid string allocation in map keys 2026-02-19 09:25:21 +04:00
Aarsh Shah
a0630aec02 final changes 2026-02-18 15:18:02 +04:00
Aarsh Shah
9c0fc75cd2 Merge branch 'feat/process-eager-partial-header' into feat/update-gossipsub-update-new-partial-API 2026-02-18 13:17:39 +04:00
Aarsh Shah
0f3f3124af final changes 2026-02-18 13:07:34 +04:00
Aarsh Shah
412648ac02 add changelog 2026-02-17 19:48:55 +04:00
Aarsh Shah
fd3587c932 get a green CI 2026-02-17 18:53:23 +04:00
Aarsh Shah
6ab9af51d6 fix bazel 2026-02-17 17:27:54 +04:00
Aarsh Shah
73c5d2648c Tests for partial data column 2026-02-17 17:15:51 +04:00
Aarsh Shah
9b315166de implement OnIncomingRPC 2026-02-17 16:29:00 +04:00
Aarsh Shah
938dda3025 handle gossip for peer 2026-02-17 09:36:21 +04:00
Aarsh Shah
55f5973dad for peer implementation 2026-02-16 20:16:12 +04:00
Aarsh Shah
867978e79b update gossipsub 2026-02-16 16:13:37 +04:00
Aarsh Shah
c8af4b74b5 rename to getBlobsCalled 2026-02-16 15:03:15 +04:00
Aarsh Shah
ab9366ed5f event loop should never block 2026-02-16 14:27:09 +04:00
Aarsh Shah
fd07e59c0a bound number of go-routines to handle header 2026-02-11 12:26:49 +04:00
Aarsh Shah
d66d25e7ad fix lint 2026-02-11 11:49:25 +04:00
Aarsh Shah
89ab513183 don't block the event loop on handling headers 2026-02-11 11:43:20 +04:00
Aarsh Shah
1c6110b6e8 move initialisation of validators and handlers for partial data columns to the Start method 2026-02-11 10:37:06 +04:00
Aarsh Shah
a82edc7fbc fix lint 2026-02-10 12:11:14 +04:00
Aarsh Shah
39bb8d2929 Merge branch 'rebased-partial-columns' into feat/process-eager-partial-header 2026-02-10 12:10:14 +04:00
Marco Munizaga
80e0227bac Partial columns addendum (#16327)
see commits for details. best reviewed commit by commit.
2026-02-10 12:07:27 +04:00
Aarsh Shah
387cfcd442 only complete column if it has been extended 2026-02-10 11:50:11 +04:00
Aarsh Shah
c70e51b445 fix test 2026-02-10 10:12:18 +04:00
Aarsh Shah
a1a8a86341 extend existing cells 2026-02-10 09:41:55 +04:00
Aarsh Shah
71b1610331 publish headers 2026-02-09 20:37:12 +04:00
Aarsh Shah
814abab4b5 wait for header to be processed 2026-02-09 18:47:47 +04:00
Aarsh Shah
d0d6bab8a0 handle header 2026-02-06 19:21:43 +04:00
Aarsh Shah
3f6c01fc3b changes as per review 2026-02-06 18:44:58 +04:00
Aarsh Shah
c5914ea4d9 handle partial data column header 2026-02-04 19:05:03 +04:00
Aarsh Shah
08d143bd2c add type for partial header reconstruction 2026-02-04 18:27:54 +04:00
Aarsh Shah
b36bc1fe17 Merge branch 'develop' into rebased-partial-columns 2026-02-04 10:29:56 +04:00
Aarsh Shah
a329a77037 fix partial columns broadcast (#16321)
Fix the data column broadcast logic for partial and full columns .
2026-02-04 10:25:53 +04:00
Aarsh Shah
bfd9ff8651 fix lint 2026-02-03 18:10:35 +04:00
Aarsh Shah
79301d4db6 fix bazel 2026-02-03 18:03:26 +04:00
Aarsh Shah
fbfea6f753 fix log files CI error 2026-02-03 17:54:41 +04:00
Aarsh Shah
562ef25527 fix CI 2026-02-03 17:47:40 +04:00
Aarsh Shah
488971f989 fix CI 2026-02-03 17:44:33 +04:00
Aarsh Shah
b129eaaeb8 fix lint 2026-02-03 17:01:45 +04:00
Aarsh Shah
90302adbd2 fix CI 2026-02-03 16:17:36 +04:00
Aarsh Shah
a6262ba07b go mod tidy and partial columns 2026-02-03 15:23:59 +04:00
Aarsh Shah
7d5c8d6964 Merge branch 'develop' into rebased-partial-columns 2026-02-03 10:20:18 +04:00
Marco Munizaga
38cae3f8de add todos from call
this came from a call with Aarsh and Kasey
2026-02-02 11:05:40 -06:00
Marco Munizaga
a5e6d0a3ac Implement gossipsub_mesh_peers metric 2026-02-02 11:05:40 -06:00
Marco Munizaga
c4a308e598 Track number of peers in mesh
along with tracking which peers request partial messages
2026-02-02 11:05:40 -06:00
Marco Munizaga
ba720c1b4b update go-libp2p-pubsub with new tracer
the new tracer interface provides the peer ID in tracer.RecvRPC.
2026-02-02 11:05:40 -06:00
Marco Munizaga
5a3f45f91f add todo 2026-02-02 11:05:40 -06:00
Marco Munizaga
29010edcd1 return valid if there are no datacolumns to validate 2026-02-02 11:05:40 -06:00
Marco Munizaga
045c29ccce cache partial data column header by group ID 2026-02-02 11:05:40 -06:00
Marco Munizaga
0d80bbe44f Add partial message metrics 2026-02-02 11:05:40 -06:00
Marco Munizaga
14f13ed902 more context around errors 2026-02-02 11:05:40 -06:00
Marco Munizaga
308199c2e7 fix test typo 2026-02-02 11:05:40 -06:00
Marco Munizaga
7a04c6b645 eagerly push the partial data column header 2026-02-02 11:05:40 -06:00
Marco Munizaga
93cda45e18 Include the version byte in the group ID 2026-02-02 11:05:40 -06:00
Marco Munizaga
1b4265ef3f add partial data column header 2026-02-02 11:05:40 -06:00
Marco Munizaga
90d1716fd7 Update go-libp2p-pubsub 2026-02-02 11:05:40 -06:00
Marco Munizaga
c4f1a9ac4f add todo 2026-02-02 11:05:40 -06:00
Marco Munizaga
73d948a710 add partial-data-columns flag 2026-02-02 11:05:38 -06:00
Marco Munizaga
5e2985d36b beacon-chain/sync: subscribe to partial columns 2026-02-02 11:04:53 -06:00
Marco Munizaga
7ac3f3cb68 publish partial columns when proposing a block 2026-02-02 11:04:53 -06:00
Marco Munizaga
0a759c3d15 beacon-chain/execution: return partial columns and use getBlobsV3
... if available
2026-02-02 11:04:53 -06:00
Marco Munizaga
bf7ca00780 core/peerdas: Add PartialColumns helper 2026-02-02 11:04:52 -06:00
Marco Munizaga
efcb98bcaa core/peerdas: support partial responses 2026-02-02 11:04:52 -06:00
Marco Munizaga
7b0de5ad0e beacon-chain/p2p: own and start PartialColumnBroadcaster 2026-02-02 11:04:52 -06:00
Marco Munizaga
2e15cd2068 Add metrics for gossipsub message sizes 2026-02-02 11:04:52 -06:00
Marco Munizaga
2e5192e496 Implement PartialColumnBroadcaster 2026-02-02 11:04:52 -06:00
Marco Munizaga
729c54a300 refactor DataColumn Cell KZG Proof verification 2026-02-02 11:04:52 -06:00
Marco Munizaga
4556aa266a avoid needless copy 2026-02-02 11:04:52 -06:00
Marco Munizaga
2f1cac217d Add PartialDataColumn type 2026-02-02 11:04:52 -06:00
Marco Munizaga
6bfc779ea1 proto: Add PartialDataColumnSidecar 2026-02-02 11:04:52 -06:00
Marco Munizaga
90481d6aa8 clone slice in testing util 2026-02-02 11:04:52 -06:00
Marco Munizaga
9f64007dc1 logrusadapter for slog 2026-02-02 11:04:52 -06:00
Marco Munizaga
879ea624ec fix multiaddr comparison 2026-02-02 11:04:52 -06:00
Marco Munizaga
79841f451c deps: update libp2p deps
for partial message support and simnet support
2026-02-02 11:04:52 -06:00
88 changed files with 8216 additions and 493 deletions

View File

@@ -110,7 +110,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
ckzgCells := make([]ckzg4844.Cell, len(cells))
for i := range cells {
copy(ckzgCells[i][:], cells[i][:])
ckzgCells[i] = ckzg4844.Cell(cells[i])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
}

View File

@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
mb.broadcastCalled = true
return nil
}

View File

@@ -33,6 +33,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -1,6 +1,9 @@
package peerdas
import (
stderrors "errors"
"iter"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -16,6 +19,7 @@ var (
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
ErrNoKzgCommitments = errors.New("no KZG commitments found")
ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs")
ErrEmptySegment = errors.New("empty segment in batch")
ErrInvalidKZGProof = errors.New("invalid KZG proof")
ErrBadRootLength = errors.New("bad root length")
ErrInvalidInclusionProof = errors.New("invalid inclusion proof")
@@ -57,65 +61,104 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
return nil
}
// VerifyDataColumnsSidecarKZGProofs verifies if the KZG proofs are correct.
// CellProofBundleSegment is returned when a batch fails. The caller can call
// the `.Verify` method to verify just this segment.
type CellProofBundleSegment struct {
indices []uint64
commitments []kzg.Bytes48
cells []kzg.Cell
proofs []kzg.Bytes48
}
// Verify verifies this segment without batching.
func (s CellProofBundleSegment) Verify() error {
if len(s.cells) == 0 {
return ErrEmptySegment
}
verified, err := kzg.VerifyCellKZGProofBatch(s.commitments, s.indices, s.cells, s.proofs)
if err != nil {
return stderrors.Join(err, ErrInvalidKZGProof)
}
if !verified {
return ErrInvalidKZGProof
}
return nil
}
func VerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIter iter.Seq[blocks.CellProofBundle]) error {
// ignore the failed segment list since we are just passing in one segment.
_, err := BatchVerifyDataColumnsCellsKZGProofs(sizeHint, []iter.Seq[blocks.CellProofBundle]{cellProofsIter})
return err
}
// BatchVerifyDataColumnsCellsKZGProofs verifies if the KZG proofs are correct.
// Note: We are slightly deviating from the specification here:
// The specification verifies the KZG proofs for each sidecar separately,
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
// This is done to improve performance since the internal KZG library is way more
// efficient when verifying in batch.
// efficient when verifying in batch. If the batch fails, the failed segments
// are returned to the caller so that they may try segment by segment without
// batching. On success the failed segment list is empty.
//
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
// Compute the total count.
count := 0
for _, sidecar := range sidecars {
count += len(sidecar.Column)
}
func BatchVerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIters []iter.Seq[blocks.CellProofBundle]) ( /* failed segment list */ []CellProofBundleSegment, error) {
commitments := make([]kzg.Bytes48, 0, sizeHint)
indices := make([]uint64, 0, sizeHint)
cells := make([]kzg.Cell, 0, sizeHint)
proofs := make([]kzg.Bytes48, 0, sizeHint)
commitments := make([]kzg.Bytes48, 0, count)
indices := make([]uint64, 0, count)
cells := make([]kzg.Cell, 0, count)
proofs := make([]kzg.Bytes48, 0, count)
for _, sidecar := range sidecars {
for i := range sidecar.Column {
var anySegmentEmpty bool
var segments []CellProofBundleSegment
for _, cellProofsIter := range cellProofsIters {
startIdx := len(cells)
for bundle := range cellProofsIter {
var (
commitment kzg.Bytes48
cell kzg.Cell
proof kzg.Bytes48
)
commitmentBytes := sidecar.KzgCommitments[i]
cellBytes := sidecar.Column[i]
proofBytes := sidecar.KzgProofs[i]
if len(commitmentBytes) != len(commitment) ||
len(cellBytes) != len(cell) ||
len(proofBytes) != len(proof) {
return ErrMismatchLength
if len(bundle.Commitment) != len(commitment) ||
len(bundle.Cell) != len(cell) ||
len(bundle.Proof) != len(proof) {
return nil, ErrMismatchLength
}
copy(commitment[:], commitmentBytes)
copy(cell[:], cellBytes)
copy(proof[:], proofBytes)
copy(commitment[:], bundle.Commitment)
copy(cell[:], bundle.Cell)
copy(proof[:], bundle.Proof)
commitments = append(commitments, commitment)
indices = append(indices, sidecar.Index)
indices = append(indices, bundle.ColumnIndex)
cells = append(cells, cell)
proofs = append(proofs, proof)
}
if len(cells[startIdx:]) == 0 {
anySegmentEmpty = true
}
segments = append(segments, CellProofBundleSegment{
indices: indices[startIdx:],
commitments: commitments[startIdx:],
cells: cells[startIdx:],
proofs: proofs[startIdx:],
})
}
if anySegmentEmpty {
return segments, ErrEmptySegment
}
// Batch verify that the cells match the corresponding commitments and proofs.
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
if err != nil {
return errors.Wrap(err, "verify cell KZG proof batch")
return segments, stderrors.Join(err, ErrInvalidKZGProof)
}
if !verified {
return ErrInvalidKZGProof
return segments, ErrInvalidKZGProof
}
return nil
return nil, nil
}
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.

View File

@@ -3,6 +3,7 @@ package peerdas_test
import (
"crypto/rand"
"fmt"
"iter"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
@@ -72,7 +73,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
})
@@ -80,14 +81,15 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0][0]++ // It is OK to overflow
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
})
t.Run("nominal", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
failedSegments, err := peerdas.BatchVerifyDataColumnsCellsKZGProofs(blobCount, []iter.Seq[blocks.CellProofBundle]{blocks.RODataColumnsToCellProofBundles(sidecars)})
require.NoError(t, err)
require.Equal(t, 0, len(failedSegments))
})
}
@@ -273,7 +275,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
for _, sidecar := range sidecars {
sidecars := []blocks.RODataColumn{sidecar}
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
b.StopTimer()
require.NoError(b, err)
}
@@ -308,7 +310,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
}
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(allSidecars))
b.StopTimer()
require.NoError(b, err)
}
@@ -341,7 +343,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
for _, sidecars := range allSidecars {
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(len(allSidecars), blocks.RODataColumnsToCellProofBundles(sidecars))
b.StopTimer()
require.NoError(b, err)
}

View File

@@ -5,6 +5,7 @@ import (
"sync"
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -338,8 +339,16 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
return cellsPerBlob, proofsPerBlob, nil
}
// StructuredCellsAndProofs packages the results of computing cells and proofs from structured blobs.
type StructuredCellsAndProofs struct {
Included bitfield.Bitlist
CellsPerBlob [][]kzg.Cell
ProofsPerBlob [][]kzg.Proof
}
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
// commitmentCount is required to return the correct sized bitlist even if we see a nil slice of blobsAndProofs.
func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs []*pb.BlobAndProofV2) (StructuredCellsAndProofs, error) {
start := time.Now()
defer func() {
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
@@ -347,14 +356,24 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
var wg errgroup.Group
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
var blobsPresent int
for _, blobAndProof := range blobsAndProofs {
if blobAndProof != nil {
blobsPresent++
}
}
cellsPerBlob := make([][]kzg.Cell, blobsPresent)
proofsPerBlob := make([][]kzg.Proof, blobsPresent)
included := bitfield.NewBitlist(commitmentCount)
var j int
for i, blobAndProof := range blobsAndProofs {
if blobAndProof == nil {
return nil, nil, ErrNilBlobAndProof
continue
}
included.SetBitAt(uint64(i), true)
compactIndex := j
wg.Go(func() error {
var kzgBlob kzg.Blob
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
@@ -381,17 +400,22 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
kzgProofs = append(kzgProofs, kzgProof)
}
cellsPerBlob[i] = cells
proofsPerBlob[i] = kzgProofs
cellsPerBlob[compactIndex] = cells
proofsPerBlob[compactIndex] = kzgProofs
return nil
})
j++
}
if err := wg.Wait(); err != nil {
return nil, nil, err
return StructuredCellsAndProofs{}, err
}
return cellsPerBlob, proofsPerBlob, nil
return StructuredCellsAndProofs{
Included: included,
CellsPerBlob: cellsPerBlob,
ProofsPerBlob: proofsPerBlob,
}, nil
}
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.

View File

@@ -479,8 +479,9 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
t.Run("nil blob and proof", func(t *testing.T) {
_, _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
result, err := peerdas.ComputeCellsAndProofsFromStructured(0, []*pb.BlobAndProofV2{nil})
require.NoError(t, err)
require.Equal(t, uint64(0), result.Included.Count())
})
t.Run("nominal", func(t *testing.T) {
@@ -533,24 +534,25 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
require.NoError(t, err)
// Test ComputeCellsAndProofs
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
result, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(blobsAndProofs)), blobsAndProofs)
require.Equal(t, result.Included.Count(), uint64(len(result.CellsPerBlob)))
require.NoError(t, err)
require.Equal(t, blobCount, len(actualCellsPerBlob))
require.Equal(t, blobCount, len(result.CellsPerBlob))
// Verify the results match expected
for i := range blobCount {
require.Equal(t, len(expectedCellsPerBlob[i]), len(actualCellsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), len(actualProofsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), cap(actualProofsPerBlob[i]))
require.Equal(t, len(expectedCellsPerBlob[i]), len(result.CellsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), len(result.ProofsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), cap(result.ProofsPerBlob[i]))
// Compare cells
for j, expectedCell := range expectedCellsPerBlob[i] {
require.Equal(t, expectedCell, actualCellsPerBlob[i][j])
require.Equal(t, expectedCell, result.CellsPerBlob[i][j])
}
// Compare proofs
for j, expectedProof := range expectedProofsPerBlob[i] {
require.Equal(t, expectedProof, actualProofsPerBlob[i][j])
require.Equal(t, expectedProof, result.ProofsPerBlob[i][j])
}
}
})

View File

@@ -3,6 +3,7 @@ package peerdas
import (
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
beaconState "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -23,11 +24,13 @@ var (
var (
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
_ ConstructionPopulator = (*PartialDataColumnHeaderReconstructionSource)(nil)
)
const (
BlockType = "BeaconBlock"
SidecarType = "DataColumnSidecar"
BlockType = "BeaconBlock"
SidecarType = "DataColumnSidecar"
PartialDataColumnHeaderType = "PartialDataColumnHeader"
)
type (
@@ -54,6 +57,11 @@ type (
blocks.VerifiedRODataColumn
}
PartialDataColumnHeaderReconstructionSource struct {
*ethpb.PartialDataColumnHeader
root [fieldparams.RootLength]byte
}
blockInfo struct {
signedBlockHeader *ethpb.SignedBeaconBlockHeader
kzgCommitments [][]byte
@@ -71,6 +79,19 @@ func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstruc
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
}
// PopulateFromPartialHeader creates a PartialDataColumnHeaderReconstructionSource from a partial header.
// It eagerly computes and validates the hash tree root of the block header.
func PopulateFromPartialHeader(header *ethpb.PartialDataColumnHeader) (*PartialDataColumnHeaderReconstructionSource, error) {
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
return nil, errors.New("nil signed block header or header")
}
root, err := header.SignedBlockHeader.Header.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "hash tree root")
}
return &PartialDataColumnHeaderReconstructionSource{PartialDataColumnHeader: header, root: root}, nil
}
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
func ValidatorsCustodyRequirement(st beaconState.ReadOnlyBalances, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
@@ -142,6 +163,41 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
return roSidecars, nil
}
func PartialColumns(included bitfield.Bitlist, cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator,
opts ...blocks.PartialDataColumnOption) ([]blocks.PartialDataColumn, error) {
start := time.Now()
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
}
info, err := src.extract()
if err != nil {
return nil, errors.Wrap(err, "extract block info")
}
dataColumns := make([]blocks.PartialDataColumn, 0, numberOfColumns)
for idx := range numberOfColumns {
dc, err := blocks.NewPartialDataColumn(src.Root(), info.signedBlockHeader, idx, info.kzgCommitments, info.kzgInclusionProof, opts...)
if err != nil {
return nil, errors.Wrap(err, "new ro data column")
}
for i := range len(info.kzgCommitments) {
if !included.BitAt(uint64(i)) {
continue
}
dc.ExtendFromVerifiedCell(uint64(i), cells[idx][0], proofs[idx][0])
cells[idx] = cells[idx][1:]
proofs[idx] = proofs[idx][1:]
}
dataColumns = append(dataColumns, dc)
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return dataColumns, nil
}
// Slot returns the slot of the source
func (s *BlockReconstructionSource) Slot() primitives.Slot {
return s.Block().Slot()
@@ -253,3 +309,39 @@ func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
return info, nil
}
// Slot returns the slot from the partial data column header
func (p *PartialDataColumnHeaderReconstructionSource) Slot() primitives.Slot {
return p.SignedBlockHeader.Header.Slot
}
// Root returns the block root computed from the header
func (p *PartialDataColumnHeaderReconstructionSource) Root() [fieldparams.RootLength]byte {
return p.root
}
// ProposerIndex returns the proposer index from the header
func (p *PartialDataColumnHeaderReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
return p.SignedBlockHeader.Header.ProposerIndex
}
// Commitments returns the KZG commitments from the header
func (p *PartialDataColumnHeaderReconstructionSource) Commitments() ([][]byte, error) {
return p.KzgCommitments, nil
}
// Type returns the type of the source
func (p *PartialDataColumnHeaderReconstructionSource) Type() string {
return PartialDataColumnHeaderType
}
// extract extracts the block information from the partial header
func (p *PartialDataColumnHeaderReconstructionSource) extract() (*blockInfo, error) {
info := &blockInfo{
signedBlockHeader: p.SignedBlockHeader,
kzgCommitments: p.KzgCommitments,
kzgInclusionProof: p.KzgCommitmentsInclusionProof,
}
return info, nil
}

View File

@@ -267,4 +267,32 @@ func TestReconstructionSource(t *testing.T) {
require.Equal(t, peerdas.SidecarType, src.Type())
})
t.Run("from partial header", func(t *testing.T) {
referenceSidecar := sidecars[0]
partialHeader := &ethpb.PartialDataColumnHeader{
SignedBlockHeader: referenceSidecar.SignedBlockHeader,
KzgCommitments: referenceSidecar.KzgCommitments,
KzgCommitmentsInclusionProof: referenceSidecar.KzgCommitmentsInclusionProof,
}
src, err := peerdas.PopulateFromPartialHeader(partialHeader)
require.NoError(t, err)
require.Equal(t, referenceSidecar.SignedBlockHeader.Header.Slot, src.Slot())
// Compute expected root
expectedRoot, err := referenceSidecar.SignedBlockHeader.Header.HashTreeRoot()
require.NoError(t, err)
require.Equal(t, expectedRoot, src.Root())
require.Equal(t, referenceSidecar.SignedBlockHeader.Header.ProposerIndex, src.ProposerIndex())
commitments, err := src.Commitments()
require.NoError(t, err)
require.Equal(t, 2, len(commitments))
require.DeepEqual(t, commitment1, commitments[0])
require.DeepEqual(t, commitment2, commitments[1])
require.Equal(t, peerdas.PartialDataColumnHeaderType, src.Type())
})
}

View File

@@ -27,7 +27,6 @@ go_library(
],
deps = [
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",

View File

@@ -8,7 +8,6 @@ import (
"time"
"github.com/OffchainLabs/prysm/v7/api/server/structs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
@@ -59,6 +58,7 @@ var (
fuluEngineEndpoints = []string{
GetPayloadMethodV5,
GetBlobsV2,
GetBlobsV3,
}
)
@@ -112,6 +112,8 @@ const (
GetBlobsV2 = "engine_getBlobsV2"
// GetClientVersionV1 is the JSON-RPC method that identifies the execution client.
GetClientVersionV1 = "engine_getClientVersionV1"
// GetBlobsV3 request string for JSON-RPC.
GetBlobsV3 = "engine_getBlobsV3"
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
defaultEngineTimeout = time.Second
)
@@ -141,7 +143,7 @@ type Reconstructor interface {
ctx context.Context, blockHashes [][32]byte,
) (map[[32]byte]*pb.ExecutionPayloadDeneb, error)
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error)
ReconstructExecutionPayloadEnvelope(ctx context.Context, envelope *ethpb.SignedBlindedExecutionPayloadEnvelope) (*ethpb.SignedExecutionPayloadEnvelope, error)
}
@@ -625,6 +627,22 @@ func (s *Service) GetClientVersionV1(ctx context.Context) ([]*structs.ClientVers
return result, nil
}
func (s *Service) GetBlobsV3(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV3")
defer span.End()
start := time.Now()
if !s.capabilityCache.has(GetBlobsV3) {
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV3))
}
getBlobsV3RequestsTotal.Inc()
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV3, versionedHashes)
getBlobsV3Latency.Observe(time.Since(start).Seconds())
return result, handleRPCError(err)
}
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
// a beacon block with a full execution payload via the engine API.
func (s *Service) ReconstructFullBlock(
@@ -915,40 +933,50 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
return verifiedBlobs, nil
}
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
root := populator.Root()
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
commitments, err := populator.Commitments()
if err != nil {
return nil, wrapWithBlockRoot(err, root, "commitments")
return nil, nil, wrapWithBlockRoot(err, root, "commitments")
}
cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
cp, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
if err != nil {
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
return nil, nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
}
log.Debug("Received cells and proofs from execution client", "included", cp.Included, "cells count", len(cp.CellsPerBlob), "err", err)
var partialColumns []blocks.PartialDataColumn
if s.partialColumnsSupported {
partialColumns, err = peerdas.PartialColumns(cp.Included, cp.CellsPerBlob, cp.ProofsPerBlob, populator)
if err != nil {
return nil, nil, wrapWithBlockRoot(err, root, "construct partial columns")
}
}
// Return early if nothing is returned from the EL.
if len(cellsPerBlob) == 0 {
return nil, nil
haveAllBlobs := cp.Included.Count() == uint64(len(commitments))
log.Debug("Constructed partial columns", "haveAllBlobs", haveAllBlobs)
if haveAllBlobs {
// Construct data column sidecars from the signed block and cells and proofs.
roSidecars, err := peerdas.DataColumnSidecars(cp.CellsPerBlob, cp.ProofsPerBlob, populator)
if err != nil {
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "data column sidecars from column sidecar")
}
// Upgrade the sidecars to verified sidecars.
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
return verifiedROSidecars, partialColumns, nil
}
// Construct data column sidears from the signed block and cells and proofs.
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
if err != nil {
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
}
// Upgrade the sidecars to verified sidecars.
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
return verifiedROSidecars, nil
return nil, partialColumns, nil
}
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) (peerdas.StructuredCellsAndProofs, error) {
// Collect KZG hashes for all blobs.
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
for _, commitment := range kzgCommitments {
@@ -956,24 +984,40 @@ func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommi
versionedHashes = append(versionedHashes, versionedHash)
}
var blobAndProofs []*pb.BlobAndProofV2
// Fetch all blobsAndCellsProofs from the execution client.
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
if err != nil {
return nil, nil, errors.Wrapf(err, "get blobs V2")
var err error
useV3 := s.useV3()
if useV3 {
// v3 can return a partial response. V2 is all or nothing
blobAndProofs, err = s.GetBlobsV3(ctx, versionedHashes)
} else {
blobAndProofs, err = s.GetBlobsV2(ctx, versionedHashes)
}
// Return early if nothing is returned from the EL.
if len(blobAndProofV2s) == 0 {
return nil, nil, nil
if err != nil {
return peerdas.StructuredCellsAndProofs{}, errors.Wrapf(err, "get blobs V2/3")
}
// Compute cells and proofs from the blobs and cell proofs.
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s)
result, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(kzgCommitments)), blobAndProofs)
if err != nil {
return nil, nil, errors.Wrap(err, "compute cells and proofs")
return peerdas.StructuredCellsAndProofs{}, errors.Wrap(err, "compute cells and proofs")
}
if useV3 {
if result.Included.Count() == uint64(len(kzgCommitments)) {
getBlobsV3CompleteResponsesTotal.Inc()
} else if result.Included.Count() > 0 {
getBlobsV3PartialResponsesTotal.Inc()
}
}
return cellsPerBlob, proofsPerBlob, nil
return result, nil
}
func (s *Service) useV3() bool {
return s.capabilityCache.has(GetBlobsV3) && s.partialColumnsSupported
}
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.

View File

@@ -2681,7 +2681,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
ctx := context.Background()
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
_, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
})
@@ -2692,7 +2692,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
require.Equal(t, 0, len(dataColumns))
})
@@ -2705,7 +2705,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
require.Equal(t, 128, len(dataColumns))
})

View File

@@ -34,6 +34,25 @@ var (
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
},
)
getBlobsV3RequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_engine_getBlobsV3_requests_total",
Help: "Total number of engine_getBlobsV3 requests sent",
})
getBlobsV3CompleteResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_engine_getBlobsV3_complete_responses_total",
Help: "Total number of complete engine_getBlobsV3 successful responses received",
})
getBlobsV3PartialResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_engine_getBlobsV3_partial_responses_total",
Help: "Total number of engine_getBlobsV3 partial responses received",
})
getBlobsV3Latency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "beacon_engine_getBlobsV3_request_duration_seconds",
Help: "Duration of engine_getBlobsV3 requests in seconds",
Buckets: []float64{0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 4},
},
)
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_parse_error_count",
Help: "The number of errors that occurred while parsing execution payload",

View File

@@ -22,6 +22,13 @@ func WithHttpEndpoint(endpointString string) Option {
}
}
func WithPartialColumnsSupported() Option {
return func(s *Service) error {
s.partialColumnsSupported = true
return nil
}
}
// WithHttpEndpointAndJWTSecret for authenticating the execution node JSON-RPC endpoint.
func WithHttpEndpointAndJWTSecret(endpointString string, secret []byte) Option {
return func(s *Service) error {

View File

@@ -84,6 +84,9 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
errorLogger(err, "Could not exchange capabilities with execution client")
}
s.capabilityCache.save(c)
if !s.capabilityCache.has(GetBlobsV3) && s.partialColumnsSupported {
log.Warn("Execution client does not support blobs v3, but partial data columns are enabled")
}
return
case <-s.ctx.Done():

View File

@@ -140,6 +140,7 @@ type config struct {
// Validator Registration Contract on the eth1 chain to kick off the beacon
// chain's validator registration process.
type Service struct {
partialColumnsSupported bool
connectedETH1 bool
isRunning bool
depositRequestsStarted bool

View File

@@ -168,8 +168,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
}
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
return e.DataColumnSidecars, nil, e.ErrorDataColumnSidecars
}
// ReconstructExecutionPayloadEnvelope --

View File

@@ -681,6 +681,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
DB: b.db,
StateGen: b.stateGen,
ClockWaiter: b.ClockWaiter,
PartialDataColumns: b.cliCtx.Bool(flags.PartialDataColumns.Name),
})
if err != nil {
return err
@@ -805,6 +806,10 @@ func (b *BeaconNode) registerPOWChainService() error {
execution.WithVerifierWaiter(b.verifyInitWaiter),
execution.WithGraffitiInfo(graffitiInfo),
)
if b.cliCtx.Bool(flags.PartialDataColumns.Name) {
opts = append(opts, execution.WithPartialColumnsSupported())
}
web3Service, err := execution.NewService(b.ctx, opts...)
if err != nil {
return errors.Wrap(err, "could not register proof-of-work chain web3Service")
@@ -1015,6 +1020,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
PayloadIDCache: b.payloadIDCache,
LCStore: b.lcStore,
GraffitiInfo: web3Service.GraffitiInfo(),
BlockProposalEagerPushCells: b.cliCtx.Bool(flags.BlockProposalEagerPushCells.Name),
})
return b.services.RegisterService(rpcService)

View File

@@ -52,6 +52,7 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",

View File

@@ -2,11 +2,13 @@ package p2p
import (
"bytes"
"cmp"
"context"
"fmt"
"reflect"
"slices"
"sync"
"sync/atomic"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
@@ -343,7 +345,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
// cancelled (whichever comes first).
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error {
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error {
// Increase the number of broadcast attempts.
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
@@ -353,16 +355,24 @@ func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []bl
return errors.Wrap(err, "current fork digest")
}
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars)
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars, partialColumns)
return nil
}
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network.
// For sidecars with available peers, it uses batch publishing.
// For sidecars without peers, it finds peers first and then publishes individually.
// Both paths run in parallel. It returns when all broadcasts are complete, or the context is cancelled.
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn) {
type columnBroadcastItem struct {
fullSidecar *blocks.VerifiedRODataColumn
partialColumn *blocks.PartialDataColumn
index uint64
topic string
wrappedSubIdx uint64
subnet uint64
}
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
// It returns when all broadcasts are complete, or the context is cancelled (whichever comes first).
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) {
type rootAndIndex struct {
root [fieldparams.RootLength]byte
index uint64
@@ -372,105 +382,183 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
logLevel := logrus.GetLevel()
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
topicFunc := func(sidecar blocks.VerifiedRODataColumn) (topic string, wrappedSubIdx uint64, subnet uint64) {
subnet = peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
topic = dataColumnSubnetToTopic(subnet, forkDigest)
wrappedSubIdx = subnet + dataColumnSubnetVal
return
// Build combined items by column index, merging full sidecars and partial columns.
itemsByIndex := make(map[uint64]*columnBroadcastItem)
for i := range sidecars {
sc := &sidecars[i]
slotPerRoot[sc.BlockRoot()] = sc.Slot()
topic, wrappedSubIdx, subnet := columnToTopic(sc.Index, forkDigest)
item, ok := itemsByIndex[sc.Index]
if !ok {
item = &columnBroadcastItem{
index: sc.Index,
topic: topic,
wrappedSubIdx: wrappedSubIdx,
subnet: subnet,
}
itemsByIndex[sc.Index] = item
}
item.fullSidecar = sc
}
sidecarsWithPeers := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
var sidecarsWithoutPeers []blocks.VerifiedRODataColumn
if s.partialColumnBroadcaster != nil {
for i := range partialColumns {
pc := &partialColumns[i]
topic, wrappedSubIdx, subnet := columnToTopic(pc.Index, forkDigest)
item, ok := itemsByIndex[pc.Index]
if !ok {
item = &columnBroadcastItem{
index: pc.Index,
topic: topic,
wrappedSubIdx: wrappedSubIdx,
subnet: subnet,
}
itemsByIndex[pc.Index] = item
}
item.partialColumn = pc
}
}
// Categorize sidecars by peer availability.
for _, sidecar := range sidecars {
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
// Sort ascending by column index so that the first 64 columns
// (which hold the raw field elements) are prioritized.
items := make([]*columnBroadcastItem, 0, len(itemsByIndex))
for _, item := range itemsByIndex {
items = append(items, item)
}
slices.SortFunc(items, func(a, b *columnBroadcastItem) int {
return cmp.Compare(a.index, b.index)
})
topic, wrappedSubIdx, _ := topicFunc(sidecar)
// Check if we have a peer for this subnet (use RLock for read-only check).
mu := s.subnetLocker(wrappedSubIdx)
// Categorize items by peer availability.
var itemsWithPeers []*columnBroadcastItem
var itemsWithoutPeers []*columnBroadcastItem
for _, item := range items {
mu := s.subnetLocker(item.wrappedSubIdx)
mu.RLock()
hasPeer := s.hasPeerWithSubnet(topic)
hasPeer := s.hasPeerWithSubnet(item.topic)
mu.RUnlock()
if hasPeer {
sidecarsWithPeers = append(sidecarsWithPeers, sidecar)
continue
itemsWithPeers = append(itemsWithPeers, item)
} else {
itemsWithoutPeers = append(itemsWithoutPeers, item)
}
sidecarsWithoutPeers = append(sidecarsWithoutPeers, sidecar)
}
var batchWg, individualWg sync.WaitGroup
// Batch publish sidecars that already have peers
// Batch publish full sidecars that already have peers.
var messageBatch pubsub.MessageBatch
for _, sidecar := range sidecarsWithPeers {
var fullSidecarsBatched atomic.Int64
for _, item := range itemsWithPeers {
if item.fullSidecar == nil {
continue
}
batchWg.Go(func() {
_, span := trace.StartSpan(ctx, "p2p.broadcastDataColumnSidecars")
ctx := trace.NewContext(s.ctx, span)
defer span.End()
topic, _, _ := topicFunc(sidecar)
if err := s.batchObject(ctx, &messageBatch, sidecar, topic); err != nil {
if err := s.batchObject(ctx, &messageBatch, *item.fullSidecar, item.topic); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot batch data column sidecar")
return
}
fullSidecarsBatched.Add(1)
if logLevel >= logrus.DebugLevel {
root := sidecar.BlockRoot()
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
root := item.fullSidecar.BlockRoot()
timings.Store(rootAndIndex{root: root, index: item.index}, time.Now())
}
})
}
// For sidecars without peers, find peers and publish individually (no batching).
for _, sidecar := range sidecarsWithoutPeers {
batchDone := make(chan struct{})
go func() {
// Wait for batch to be populated, then publish.
batchWg.Wait()
if fullSidecarsBatched.Load() > 0 {
if err := s.pubsub.PublishBatch(&messageBatch); err != nil {
log.WithError(err).Error("Cannot publish batch for data column sidecars")
} else {
dataColumnSidecarBroadcasts.Add(float64(fullSidecarsBatched.Load()))
}
}
close(batchDone)
}()
// Publish partial columns that already have peers.
if s.partialColumnBroadcaster != nil {
var partialsWithPeers atomic.Int64
iterFunc := func(yield func(string, blocks.PartialDataColumn) bool) {
for _, item := range itemsWithPeers {
if item.partialColumn == nil {
continue
}
partialsWithPeers.Add(1)
fullTopicStr := item.topic + s.Encoding().ProtocolSuffix()
if !yield(fullTopicStr, *item.partialColumn) {
return
}
}
}
if err := s.partialColumnBroadcaster.Publish(ctx, iterFunc); err != nil {
log.WithError(err).Error("Cannot publish partial data columns")
} else {
partialDataColumnBroadcasts.Add(float64(partialsWithPeers.Load()))
}
}
// For items without peers, find peers and publish individually.
// One goroutine per item performs a single findPeersIfNeeded call
// that covers both the full sidecar and partial column for that subnet.
for _, item := range itemsWithoutPeers {
individualWg.Go(func() {
_, span := trace.StartSpan(ctx, "p2p.broadcastDataColumnSidecars")
ctx := trace.NewContext(s.ctx, span)
defer span.End()
topic, wrappedSubIdx, subnet := topicFunc(sidecar)
// Find peers for this sidecar's subnet.
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
if err := s.findPeersIfNeeded(ctx, item.wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, item.subnet); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot find peers if needed")
return
}
// Publish individually (not batched) since we just found peers.
if err := s.broadcastObject(ctx, sidecar, topic); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot broadcast data column sidecar")
return
if item.fullSidecar != nil {
if err := s.broadcastObject(ctx, *item.fullSidecar, item.topic); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot broadcast data column sidecar")
} else {
dataColumnSidecarBroadcasts.Inc()
if logLevel >= logrus.DebugLevel {
root := item.fullSidecar.BlockRoot()
timings.Store(rootAndIndex{root: root, index: item.index}, time.Now())
}
}
}
dataColumnSidecarBroadcasts.Inc()
if logLevel >= logrus.DebugLevel {
root := sidecar.BlockRoot()
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
if item.partialColumn != nil && s.partialColumnBroadcaster != nil {
pc := *item.partialColumn
fullTopicStr := item.topic + s.Encoding().ProtocolSuffix()
if err := s.partialColumnBroadcaster.Publish(ctx, func(yield func(string, blocks.PartialDataColumn) bool) {
yield(fullTopicStr, pc)
}); err != nil {
log.WithError(err).Error("Cannot publish partial data column")
} else {
partialDataColumnBroadcasts.Inc()
}
}
})
}
// Wait for batch to be populated, then publish.
batchWg.Wait()
if len(sidecarsWithPeers) > 0 {
if err := s.pubsub.PublishBatch(&messageBatch); err != nil {
log.WithError(err).Error("Cannot publish batch for data column sidecars")
} else {
dataColumnSidecarBroadcasts.Add(float64(len(sidecarsWithPeers)))
}
}
// Wait for all individual publishes to complete.
individualWg.Wait()
<-batchDone
// The rest of this function is only for debug logging purposes.
if logLevel < logrus.DebugLevel {
return
@@ -536,6 +624,13 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
}
}
func columnToTopic(dcIndex uint64, forkDigest [fieldparams.VersionLength]byte) (topic string, wrappedSubIdx uint64, subnet uint64) {
subnet = peerdas.ComputeSubnetForDataColumnSidecar(dcIndex)
topic = dataColumnSubnetToTopic(subnet, forkDigest)
wrappedSubIdx = subnet + dataColumnSubnetVal
return
}
func (s *Service) findPeersIfNeeded(
ctx context.Context,
wrappedSubIdx uint64,

View File

@@ -803,7 +803,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
// Broadcast to peers and wait.
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar}, nil)
require.NoError(t, err)
// Receive the message.
@@ -969,7 +969,7 @@ func TestService_BroadcastDataColumnRoundRobin(t *testing.T) {
time.Sleep(100 * time.Millisecond)
// Broadcast all sidecars.
err = service.BroadcastDataColumnSidecars(ctx, verifiedRoSidecars)
err = service.BroadcastDataColumnSidecars(ctx, verifiedRoSidecars, nil)
require.NoError(t, err)
// Give some time for messages to be sent.
time.Sleep(100 * time.Millisecond)

View File

@@ -26,6 +26,7 @@ const (
// Config for the p2p service. These parameters are set from application level flags
// to initialize the p2p service.
type Config struct {
PartialDataColumns bool
NoDiscovery bool
EnableUPnP bool
StaticPeerID bool

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -28,6 +29,7 @@ type (
Broadcaster
SetStreamHandler
PubSubProvider
PartialColumnBroadcasterProvider
PubSubTopicUser
SenderEncoder
PeerManager
@@ -52,7 +54,7 @@ type (
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error
}
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
@@ -92,6 +94,11 @@ type (
PubSub() *pubsub.PubSub
}
// PubSubProvider provides the p2p pubsub protocol.
PartialColumnBroadcasterProvider interface {
PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster
}
// PeerManager abstracts some peer management methods from libp2p.
PeerManager interface {
Disconnect(peer.ID) error

View File

@@ -97,6 +97,11 @@ var (
Help: "The number of data column sidecar message broadcast attempts.",
})
// Partial Data Column Metrics
partialDataColumnBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_partial_data_column_broadcasts",
Help: "The number of partial data column messages that were broadcasted.",
})
// Gossip Tracer Metrics
pubsubTopicsActive = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "p2p_pubsub_topic_active",
@@ -157,6 +162,11 @@ var (
Help: "The number of publish messages received via rpc for a particular topic",
},
[]string{"topic"})
pubsubRPCPubRecvSize = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_recv_pub_bytes_total",
Help: "The total size in bytes of publish messages received via rpc for a particular topic",
},
[]string{"topic", "is_partial"})
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_drop_total",
Help: "The number of messages dropped via rpc for a particular control message",
@@ -171,6 +181,11 @@ var (
Help: "The number of publish messages dropped via rpc for a particular topic",
},
[]string{"topic"})
pubsubRPCPubDropSize = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_drop_pub_bytes_total",
Help: "The total size in bytes of publish messages dropped via rpc for a particular topic",
},
[]string{"topic", "is_partial"})
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_sent_total",
Help: "The number of messages sent via rpc for a particular control message",
@@ -185,6 +200,16 @@ var (
Help: "The number of publish messages sent via rpc for a particular topic",
},
[]string{"topic"})
pubsubRPCPubSentSize = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "gossipsub_topic_msg_sent_bytes",
Help: "The total size of publish messages sent via rpc for a particular topic",
},
[]string{"topic", "partial"})
pubsubMeshPeers = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "gossipsub_mesh_peer_counts",
Help: "The number of capable peers in mesh",
},
[]string{"topic", "supports_partial"})
)
func (s *Service) updateMetrics() {

View File

@@ -0,0 +1,53 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"metrics.go",
"partial.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//internal/logrusadapter:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"config_test.go",
"gossip_logger_test.go",
"partial_test.go",
"publish_blocking_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/verification:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"@com_github_libp2p_go_libp2p//:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,41 @@
package partialdatacolumnbroadcaster
import (
"testing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
// TestNewBroadcasterRespectsConfigOverride verifies that NewBroadcaster picks
// up the current BeaconConfig().DataColumnSidecarSubnetCount, not a stale
// value captured at package init time.
//
// The current code uses package-level vars:
//
// var maxConcurrentValidators = params.BeaconConfig().DataColumnSidecarSubnetCount
//
// which freezes the value at init. If a test (or a future config change)
// modifies BeaconConfig before creating a broadcaster, the semaphore size
// won't reflect it.
func TestNewBroadcasterRespectsConfigOverride(t *testing.T) {
// Save and restore the original config.
origConfig := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(origConfig)
// Override DataColumnSidecarSubnetCount to a distinctive value.
cfg := params.BeaconConfig().Copy()
cfg.DataColumnSidecarSubnetCount = 42
params.OverrideBeaconConfig(cfg)
b := NewBroadcaster(nil)
// The semaphore capacity should match the overridden config value.
gotValidatorCap := cap(b.concurrentValidatorSemaphore)
gotHeaderCap := cap(b.concurrentHeaderHandlerSemaphore)
require.Equal(t, 42, gotValidatorCap,
"concurrentValidatorSemaphore should use current config, not init-time value")
require.Equal(t, 42, gotHeaderCap,
"concurrentHeaderHandlerSemaphore should use current config, not init-time value")
}

View File

@@ -0,0 +1,91 @@
package partialdatacolumnbroadcaster
import (
"bytes"
"strings"
"testing"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/testing/require"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// TestGossipUsesInjectedLogger verifies that the gossip() method logs through
// the broadcaster's injected logger (p.logger), not the package-level `log`
// variable. It triggers an error inside gossip's publishPartialCol to produce
// a log line, then asserts that line appears in the injected logger's buffer.
func TestGossipUsesInjectedLogger(t *testing.T) {
var buf bytes.Buffer
injectedLogger := &logrus.Logger{
Out: &buf,
Formatter: &logrus.TextFormatter{DisableTimestamp: true},
Level: logrus.DebugLevel,
}
topic := "/eth2/00000000/data_column_sidecar_7/ssz_snappy"
groupID := []byte{0, 1, 2, 3}
b := NewBroadcaster(injectedLogger)
// Wire up a publishPartialCol that always errors so gossip() logs the failure.
b.publishPartialCol = func(t string, g []byte, c *blocks.PartialDataColumn) error {
return errors.New("publish failed")
}
b.peerFeedback = func(_ string, _ peer.ID, _ pubsub.PeerFeedbackKind) error { return nil }
// Create a partial column in the store so gossip() has something to publish.
col := createPartialColumn(t, 2, map[uint64][]byte{0: {0xAA}})
col.Published = true
verifier := &verification.PartialColumnVerifier{Column: col}
b.partialMsgStore[topic] = map[string]*verification.PartialColumnVerifier{
string(groupID): verifier,
}
// Run gossip directly (it's called from the event loop).
b.gossip(topic, groupID)
// Give a moment for any async log to flush.
time.Sleep(10 * time.Millisecond)
output := buf.String()
if !strings.Contains(output, "publish") {
t.Errorf("gossip() error log did not appear in the injected logger.\n"+
"Injected logger output: %q\n"+
"This means gossip() is using the package-level `log` instead of `p.logger`.", output)
}
}
// TestGossipUsesInjectedLoggerSuccess verifies that on success, the gossip()
// method does not log an error to the injected logger.
func TestGossipUsesInjectedLoggerSuccess(t *testing.T) {
var buf bytes.Buffer
injectedLogger := &logrus.Logger{
Out: &buf,
Formatter: &logrus.TextFormatter{DisableTimestamp: true},
Level: logrus.DebugLevel,
}
topic := "/eth2/00000000/data_column_sidecar_7/ssz_snappy"
groupID := []byte{0, 1, 2, 3}
b := NewBroadcaster(injectedLogger)
b.publishPartialCol = func(t string, g []byte, c *blocks.PartialDataColumn) error {
return nil // success
}
col := createPartialColumn(t, 2, map[uint64][]byte{0: {0xAA}})
col.Published = true
verifier := &verification.PartialColumnVerifier{Column: col}
b.partialMsgStore[topic] = map[string]*verification.PartialColumnVerifier{
string(groupID): verifier,
}
b.gossip(topic, groupID)
output := buf.String()
require.Equal(t, "", output, "no log output expected on successful gossip")
}

View File

@@ -0,0 +1,26 @@
load("@prysm//tools/go:def.bzl", "go_test")
go_test(
name = "go_default_test",
size = "medium",
srcs = ["two_node_test.go"],
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p//x/simlibp2p:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_marcopolo_simnet//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,271 @@
package integrationtest
import (
"context"
"crypto/rand"
"fmt"
"testing"
"testing/synctest"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
simlibp2p "github.com/libp2p/go-libp2p/x/simlibp2p"
"github.com/marcopolo/simnet"
"github.com/sirupsen/logrus"
)
// testColumnCallbacks implements partialdatacolumnbroadcaster.ColumnCallbacks for integration tests.
type testColumnCallbacks struct {
t *testing.T
newVerifier func(col *blocks.PartialDataColumn) (*verification.PartialColumnVerifier, error)
completeCh chan blocks.VerifiedRODataColumn
label string
}
func (c *testColumnCallbacks) PartialVerifierFromHeader(col *blocks.PartialDataColumn) (*verification.PartialColumnVerifier, bool, error) {
if col.SignedBlockHeader == nil || col.SignedBlockHeader.Header == nil {
return nil, true, fmt.Errorf("nil signed block header")
}
if len(col.KzgCommitments) == 0 {
return nil, true, fmt.Errorf("empty kzg commitments")
}
verifier, err := c.newVerifier(col)
if err != nil {
return nil, true, err
}
return verifier, false, nil
}
func (c *testColumnCallbacks) PartialVerifierFromTrustedColumn(col *blocks.PartialDataColumn) (*verification.PartialColumnVerifier, error) {
return c.newVerifier(col)
}
func (c *testColumnCallbacks) ValidateColumn(_ []blocks.CellProofBundle) error {
return nil
}
func (c *testColumnCallbacks) HandleColumn(_ string, col blocks.VerifiedRODataColumn) {
c.t.Logf("%s: Completed! Column has %d cells", c.label, len(col.Column))
c.completeCh <- col
}
func (c *testColumnCallbacks) HandleHeader(_ *ethpb.PartialDataColumnHeader, _ string) {}
// TestTwoNodePartialColumnExchange tests that two nodes can exchange partial columns
// and reconstruct the complete column. Node 1 has cells 0-2, Node 2 has cells 3-5.
// After exchange, both should have all cells.
func TestTwoNodePartialColumnExchange(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
// Create a simulated libp2p network
latency := time.Millisecond * 10
network, meta, err := simlibp2p.SimpleLibp2pNetwork([]simlibp2p.NodeLinkSettingsAndCount{
{LinkSettings: simnet.NodeBiDiLinkSettings{
Downlink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
Uplink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
}, Count: 2},
}, simlibp2p.NetworkSettings{UseBlankHost: true})
require.NoError(t, err)
require.NoError(t, network.Start())
defer func() {
require.NoError(t, network.Close())
}()
defer func() {
for _, node := range meta.Nodes {
err := node.Close()
if err != nil {
panic(err)
}
}
}()
h1 := meta.Nodes[0]
h2 := meta.Nodes[1]
logger := logrus.New()
logger.SetLevel(logrus.DebugLevel)
broadcaster1 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
broadcaster2 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
opts1 := broadcaster1.AppendPubSubOpts([]pubsub.Option{
pubsub.WithMessageSigning(false),
pubsub.WithStrictSignatureVerification(false),
})
opts2 := broadcaster2.AppendPubSubOpts([]pubsub.Option{
pubsub.WithMessageSigning(false),
pubsub.WithStrictSignatureVerification(false),
})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ps1, err := pubsub.NewGossipSub(ctx, h1, opts1...)
require.NoError(t, err)
ps2, err := pubsub.NewGossipSub(ctx, h2, opts2...)
require.NoError(t, err)
defer func() {
broadcaster1.Stop()
broadcaster2.Stop()
}()
// Generate Test Data
var blockRoot [fieldparams.RootLength]byte
copy(blockRoot[:], []byte("test-block-root"))
numCells := 6
commitments := make([][]byte, numCells)
cells := make([][]byte, numCells)
proofs := make([][]byte, numCells)
for i := range numCells {
commitments[i] = make([]byte, 48)
cells[i] = make([]byte, 2048)
_, err := rand.Read(cells[i])
require.NoError(t, err)
proofs[i] = make([]byte, 48)
_ = fmt.Appendf(proofs[i][:0], "proof %d", i)
}
roDC, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
{
BodyRoot: blockRoot[:],
KzgCommitments: commitments,
Column: cells,
KzgProofs: proofs,
},
})
headerRoot, err := roDC[0].DataColumnSidecar.SignedBlockHeader.Header.HashTreeRoot()
require.NoError(t, err)
pc1, err := blocks.NewPartialDataColumn(headerRoot, roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
require.NoError(t, err)
pc2, err := blocks.NewPartialDataColumn(headerRoot, roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
require.NoError(t, err)
// Split data
for i := range numCells {
if i%2 == 0 {
pc1.ExtendFromVerifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
} else {
pc2.ExtendFromVerifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
}
}
// Setup Topic and Subscriptions
digest := params.ForkDigest(0)
columnIndex := uint64(12)
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
topicStr := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) +
encoder.SszNetworkEncoder{}.ProtocolSuffix()
time.Sleep(100 * time.Millisecond)
topic1, err := ps1.Join(topicStr, pubsub.RequestPartialMessages())
require.NoError(t, err)
topic2, err := ps2.Join(topicStr, pubsub.RequestPartialMessages())
require.NoError(t, err)
newVerifier := func(col *blocks.PartialDataColumn) (*verification.PartialColumnVerifier, error) {
mock := &verification.MockDataColumnsVerifier{}
roCol, err := blocks.NewRODataColumn(col.DataColumnSidecar)
if err != nil {
return nil, err
}
mock.AppendRODataColumns(roCol)
verifier := verification.NewPartialColumnVerifier(mock, col)
return verifier, nil
}
node1Complete := make(chan blocks.VerifiedRODataColumn, 1)
node2Complete := make(chan blocks.VerifiedRODataColumn, 1)
newTestCallbacks := func(completeCh chan blocks.VerifiedRODataColumn, label string) *testColumnCallbacks {
return &testColumnCallbacks{
t: t,
newVerifier: newVerifier,
completeCh: completeCh,
label: label,
}
}
// Connect hosts
err = h1.Connect(context.Background(), peer.AddrInfo{
ID: h2.ID(),
Addrs: h2.Addrs(),
})
require.NoError(t, err)
time.Sleep(300 * time.Millisecond)
// Subscribe to regular GossipSub (critical for partial message RPC exchange!)
sub1, err := topic1.Subscribe()
require.NoError(t, err)
defer sub1.Cancel()
sub2, err := topic2.Subscribe()
require.NoError(t, err)
defer sub2.Cancel()
go broadcaster1.Start(newTestCallbacks(node1Complete, "Node 1"))
go broadcaster2.Start(newTestCallbacks(node2Complete, "Node 2"))
err = broadcaster1.Subscribe(ctx, topic1)
require.NoError(t, err)
err = broadcaster2.Subscribe(ctx, topic2)
require.NoError(t, err)
// Wait for mesh to form
time.Sleep(2 * time.Second)
// Publish
t.Log("Publishing from Node 1")
err = broadcaster1.Publish(ctx, func(yield func(string, blocks.PartialDataColumn) bool) {
yield(topicStr, pc1)
})
require.NoError(t, err)
time.Sleep(200 * time.Millisecond)
t.Log("Publishing from Node 2")
err = broadcaster2.Publish(ctx, func(yield func(string, blocks.PartialDataColumn) bool) {
yield(topicStr, pc2)
})
require.NoError(t, err)
// Wait for Completion
timeout := time.After(10 * time.Second)
var col1, col2 blocks.VerifiedRODataColumn
receivedCount := 0
for receivedCount < 2 {
select {
case col1 = <-node1Complete:
t.Log("Node 1 completed reconstruction")
receivedCount++
case col2 = <-node2Complete:
t.Log("Node 2 completed reconstruction")
receivedCount++
case <-timeout:
t.Fatalf("Timeout: Only %d/2 nodes completed", receivedCount)
}
}
// Verify both columns have all cells
assert.Equal(t, numCells, len(col1.Column), "Node 1 should have all cells")
assert.Equal(t, numCells, len(col2.Column), "Node 2 should have all cells")
assert.DeepSSZEqual(t, cells, col1.Column, "Node 1 cell mismatch")
assert.DeepSSZEqual(t, cells, col2.Column, "Node 2 cell mismatch")
})
}

View File

@@ -0,0 +1,9 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package partialdatacolumnbroadcaster
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/p2p/partialdatacolumnbroadcaster")

View File

@@ -0,0 +1,18 @@
package partialdatacolumnbroadcaster
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
partialMessageUsefulCellsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "beacon_partial_message_useful_cells_total",
Help: "Number of useful cells received via a partial message",
}, []string{"column_index"})
partialMessageCellsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "beacon_partial_message_cells_received_total",
Help: "Number of total cells received via a partial message",
}, []string{"column_index"})
)

View File

@@ -0,0 +1,915 @@
package partialdatacolumnbroadcaster
import (
"bytes"
"context"
stderrors "errors"
"iter"
"log/slog"
"strconv"
"strings"
"sync"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/internal/logrusadapter"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p-pubsub/partialmessages"
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const TTLInSlots = 3
var errInvalidHeader = errors.New("invalid header")
const dataColumnSidecarPrefix = "data_column_sidecar_"
func extractColumnIndexFromTopic(topic string) (uint64, error) {
idx := strings.Index(topic, dataColumnSidecarPrefix)
if idx == -1 {
return 0, errors.New("could not extract column index from topic")
}
sub := topic[idx+len(dataColumnSidecarPrefix):]
end := strings.Index(sub, "/")
if end != -1 {
sub = sub[:end]
}
return strconv.ParseUint(sub, 10, 64)
}
// ColumnCallbacks is the interface that the broadcaster uses to validate and handle
// partial data column headers and cells.
type ColumnCallbacks interface {
// PartialVerifierFromHeader builds and validates a partial column from a new header.
// Returns (verifier, reject, err) where:
// - reject=true, err!=nil: REJECT - peer should be penalized
// - reject=false, err!=nil: IGNORE - don't penalize, just ignore
// - reject=false, err=nil: valid verifier
PartialVerifierFromHeader(col *blocks.PartialDataColumn) (verifier *verification.PartialColumnVerifier, reject bool, err error)
// PartialVerifierFromTrustedColumn creates a verifier from a previously validated column.
PartialVerifierFromTrustedColumn(col *blocks.PartialDataColumn) (*verification.PartialColumnVerifier, error)
// ValidateColumn validates the KZG proofs of the given cells.
ValidateColumn(cells []blocks.CellProofBundle) error
// HandleColumn is called when a partial column has been fully reconstructed.
HandleColumn(topic string, col blocks.VerifiedRODataColumn)
// HandleHeader is called when a new partial data column header is first validated.
HandleHeader(header *ethpb.PartialDataColumnHeader, groupID string)
}
type PartialColumnBroadcaster struct {
logger *logrus.Logger
peerFeedback func(topic string, peer peer.ID, kind pubsub.PeerFeedbackKind) error
publishPartialCol func(topic string, groupID []byte, col *blocks.PartialDataColumn) error
stop chan struct{}
stopOnce sync.Once
callbacks ColumnCallbacks
// map topic -> *pubsub.Topic
topics map[string]*pubsub.Topic
concurrentValidatorSemaphore chan struct{}
concurrentHeaderHandlerSemaphore chan struct{}
// map topic -> map[groupID]PartialColumnVerifier
partialMsgStore map[string]map[string]*verification.PartialColumnVerifier
groupTTL map[string]int8
// validHeaderCache caches validated headers by group ID (works across topics)
validHeaderCache map[string]*ethpb.PartialDataColumnHeader
// map groupID -> map[peer.ID]bool
headerSentCache map[string]map[peer.ID]bool
incomingReq chan request
}
type requestKind uint8
const (
requestKindPublish requestKind = iota
requestKindSubscribe
requestKindUnsubscribe
requestKindGossip
requestKindHandleIncomingRPC
requestKindCellsValidated
)
func (k requestKind) String() string {
switch k {
case requestKindPublish:
return "publish"
case requestKindSubscribe:
return "subscribe"
case requestKindUnsubscribe:
return "unsubscribe"
case requestKindGossip:
return "gossip"
case requestKindHandleIncomingRPC:
return "handle_incoming_rpc"
case requestKindCellsValidated:
return "cells_validated"
default:
return "unknown"
}
}
type requestValues struct {
cellsValidated *cellsValidated
unsub unsubscribe
incomingRPC incomingPartialRPC
sub subscribe
publish publish
gossip gossip
}
type request struct {
requestValues
ctx context.Context
kind requestKind
response chan error
}
func newRequest(ctx context.Context, kind requestKind, v requestValues) request {
return request{
requestValues: v,
ctx: ctx,
kind: kind,
response: make(chan error, 1),
}
}
// finish sends the result to the caller waiting on the response channel.
func (r request) finish(err error) {
r.response <- err
}
// enqueue creates and enqueues a request, blocking until it is accepted.
// Returns an error if the broadcaster has stopped or the context has been cancelled.
// A nil ctx is permitted for fire-and-forget requests that have no cancellation.
func (p *PartialColumnBroadcaster) enqueue(ctx context.Context, kind requestKind, v requestValues) (request, error) {
req := newRequest(ctx, kind, v)
select {
case p.incomingReq <- req:
return req, nil
case <-p.stop:
return req, errPartialBroadcasterStopped
case <-ctx.Done():
return req, ctx.Err()
}
}
// tryEnqueue creates and enqueues a request without blocking.
// Returns false if the request channel is full.
func (p *PartialColumnBroadcaster) tryEnqueue(kind requestKind, v requestValues) (request, bool) {
req := newRequest(context.Background(), kind, v)
select {
case p.incomingReq <- req:
return req, true
default:
return req, false
}
}
// waitForResponse blocks until the request has been processed and returns the result.
// If the request's context is cancelled before a response arrives, it returns the context error.
func (r request) waitForResponse() error {
select {
case err := <-r.response:
return err
case <-r.ctx.Done():
return r.ctx.Err()
}
}
type publish struct {
topicsAndColumns iter.Seq2[string, blocks.PartialDataColumn]
}
type subscribe struct {
t *pubsub.Topic
}
type unsubscribe struct {
topic string
}
type incomingPartialRPC struct {
*pubsub_pb.PartialMessagesExtension
from peer.ID
message *ethpb.PartialDataColumnSidecar
}
func (r incomingPartialRPC) logFields() logrus.Fields {
return logrus.Fields{
"from": r.from,
"topic": r.GetTopicID(),
"group": r.GroupID,
}
}
type cellsValidated struct {
validationTook time.Duration
topic string
group []byte
cellIndices []uint64
cells []blocks.CellProofBundle
}
func (c *cellsValidated) logFields() logrus.Fields {
return logrus.Fields{
"topic": c.topic,
"group": c.group,
}
}
// gossip is used when we are republishing our PartialDataColumn to gossip peers.
type gossip struct {
topic string
groupID []byte
}
func NewBroadcaster(logger *logrus.Logger) *PartialColumnBroadcaster {
concurrency := params.BeaconConfig().DataColumnSidecarSubnetCount
return &PartialColumnBroadcaster{
topics: make(map[string]*pubsub.Topic),
partialMsgStore: make(map[string]map[string]*verification.PartialColumnVerifier),
groupTTL: make(map[string]int8),
validHeaderCache: make(map[string]*ethpb.PartialDataColumnHeader),
headerSentCache: make(map[string]map[peer.ID]bool),
stop: make(chan struct{}),
// GossipSub sends the messages to this channel. The buffer should be
// big enough to avoid dropping messages. We don't want to block the gossipsub event loop for this.
incomingReq: make(chan request, 128*16),
logger: logger,
concurrentValidatorSemaphore: make(chan struct{}, concurrency),
concurrentHeaderHandlerSemaphore: make(chan struct{}, concurrency),
}
}
// onEmitGossip enqueues a gossip request for the broadcaster's event loop.
func (p *PartialColumnBroadcaster) onEmitGossip(topic string, groupID []byte, _ []peer.ID, _ map[peer.ID]blocks.PartialDataColumnPeerState) {
// Drop gossip emission if we have too many pending requests.
p.tryEnqueue(requestKindGossip, requestValues{
gossip: gossip{
topic: topic,
groupID: groupID,
},
})
}
// onIncomingRPC processes an incoming partial message RPC by updating peer state
// and enqueuing the message for the broadcaster's event loop.
func (p *PartialColumnBroadcaster) onIncomingRPC(from peer.ID, peerStates map[peer.ID]blocks.PartialDataColumnPeerState, rpc *pubsub_pb.PartialMessagesExtension) error {
if rpc == nil {
return nil
}
expectedGroupIDLen := fieldparams.RootLength + 1
if len(rpc.GetGroupID()) != expectedGroupIDLen {
_ = p.peerFeedback(rpc.GetTopicID(), from, pubsub.PeerFeedbackInvalidMessage)
p.logger.WithFields(logrus.Fields{
"peer": from,
"topic": rpc.GetTopicID(),
"got": len(rpc.GetGroupID()),
"expected": expectedGroupIDLen,
}).Debug("Invalid group ID length")
return errors.Errorf("invalid group ID length: got %d, expected %d", len(rpc.GetGroupID()), expectedGroupIDLen)
}
nextPeerState, message, err := updatePeerStateFromIncomingRPC(peerStates[from], rpc)
if err != nil {
return err
}
_, ok := p.tryEnqueue(requestKindHandleIncomingRPC, requestValues{
incomingRPC: incomingPartialRPC{rpc, from, message},
})
if !ok {
p.logger.Warn("Dropping incoming partial RPC", "rpc", rpc)
return errors.New("incomingReq channel is full, dropping RPC")
}
peerStates[from] = nextPeerState
return nil
}
// AppendPubSubOpts adds the necessary pubsub options to enable partial messages.
func (p *PartialColumnBroadcaster) AppendPubSubOpts(opts []pubsub.Option) []pubsub.Option {
slogger := slog.New(logrusadapter.Handler{Logger: p.logger})
opts = append(opts,
pubsub.WithPartialMessagesExtension(&partialmessages.PartialMessagesExtension[blocks.PartialDataColumnPeerState]{
Logger: slogger,
OnEmitGossip: p.onEmitGossip,
OnIncomingRPC: p.onIncomingRPC,
}),
func(ps *pubsub.PubSub) error {
p.peerFeedback = ps.PeerFeedback
p.publishPartialCol = func(topic string, groupID []byte, col *blocks.PartialDataColumn) error {
if _, ok := p.headerSentCache[string(groupID)]; !ok {
p.headerSentCache[string(groupID)] = make(map[peer.ID]bool)
}
return pubsub.PublishPartial(ps, topic, groupID, col.PublishActionsFn(p.headerSentCache[string(groupID)]))
}
return nil
},
)
return opts
}
// Start starts the event loop of the PartialColumnBroadcaster.
// It accepts the required validator and handler functions, returning an error if any is nil.
// Note: The event loop is blocking and so the broadcaster should be started in a goroutine.
func (p *PartialColumnBroadcaster) Start(callbacks ColumnCallbacks) {
p.callbacks = callbacks
p.loop()
}
var (
errPartialBroadcasterStopped = errors.New("partial column broadcaster stopped")
errUnknownRequestKind = errors.New("unknown request kind")
)
func (p *PartialColumnBroadcaster) loop() {
cleanup := time.NewTicker(time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot))
defer cleanup.Stop()
for {
select {
case req := <-p.incomingReq:
// This check enables the requester to cancel the request by cancelling the given context.
if req.ctx.Err() != nil {
p.logger.WithError(req.ctx.Err()).WithField("kind", req.kind.String()).
Debug("Context canceled for PartialColumnBroadcaster event.") // Debug log level to avoid log storm at node shutdown.
req.finish(req.ctx.Err())
continue
}
var err error
switch req.kind {
case requestKindPublish:
err = p.publish(req.publish.topicsAndColumns)
case requestKindSubscribe:
err = p.subscribe(req.sub.t)
case requestKindUnsubscribe:
err = p.unsubscribe(req.unsub.topic)
case requestKindGossip:
p.gossip(req.gossip.topic, req.gossip.groupID)
case requestKindHandleIncomingRPC:
err = p.handleIncomingRPC(req.incomingRPC)
case requestKindCellsValidated:
err = p.handleCellsValidated(req.cellsValidated)
default:
err = errUnknownRequestKind
}
if err != nil {
p.logger.WithField("kind", req.kind.String()).WithError(err).
Error("Failure handling PartialColumnBroadcaster event.")
err = errors.Wrap(err, "partial column broadcaster "+req.kind.String()+" event")
}
req.finish(err)
case <-p.stop:
// Drain remaining requests before exiting the loop.
for {
select {
case req := <-p.incomingReq:
req.finish(errPartialBroadcasterStopped)
default:
return
}
}
case <-cleanup.C:
for groupID, ttl := range p.groupTTL {
if ttl > 0 {
p.groupTTL[groupID] = ttl - 1
continue
}
delete(p.groupTTL, groupID)
delete(p.validHeaderCache, groupID)
delete(p.headerSentCache, groupID)
for topic, msgStore := range p.partialMsgStore {
delete(msgStore, groupID)
if len(msgStore) == 0 {
delete(p.partialMsgStore, topic)
}
}
}
}
}
}
func (p *PartialColumnBroadcaster) getPartialVerifier(topic string, group []byte) *verification.PartialColumnVerifier {
topicStore, ok := p.partialMsgStore[topic]
if !ok {
return nil
}
verifier, ok := topicStore[string(group)]
if !ok {
return nil
}
return verifier
}
func (p *PartialColumnBroadcaster) getDataColumn(topic string, group []byte) *blocks.PartialDataColumn {
verifier := p.getPartialVerifier(topic, group)
if verifier == nil {
return nil
}
return verifier.Column
}
func decodePartsMetadataFromPeerState(state *ethpb.PartialDataColumnPartsMetadata, expectedLength uint64) (*ethpb.PartialDataColumnPartsMetadata, error) {
if state == nil {
return blocks.NewPartsMetaWithNoAvailableAndNoRequests(expectedLength), nil
}
return state, nil
}
func updatePeerStateFromIncomingRPC(peerState blocks.PartialDataColumnPeerState, rpc *pubsub_pb.PartialMessagesExtension) (blocks.PartialDataColumnPeerState,
*ethpb.PartialDataColumnSidecar, error) {
peerState = blocks.ClonePeerState(peerState)
hasIncomingPartsMetadata := len(rpc.PartsMetadata) > 0
hasMessage := len(rpc.PartialMessage) > 0
if hasIncomingPartsMetadata {
var incomingMeta ethpb.PartialDataColumnPartsMetadata
if err := incomingMeta.UnmarshalSSZ(rpc.PartsMetadata); err != nil {
return peerState, nil, errors.Wrap(err, "failed to unmarshal incoming parts metadata")
}
if incomingMeta.Available.Len() == 0 {
return peerState, nil, errors.New("incoming parts metadata has 0 length availability")
}
if peerState.Recvd == nil {
peerState.Recvd = &incomingMeta
} else {
if peerState.Recvd.Requests.Len() != incomingMeta.Requests.Len() {
return peerState, nil, errors.New("failed to merge available cells into recvdState parts metadata. requests length mismatch")
}
peerState.Recvd.Requests = incomingMeta.Requests
var err error
peerState.Recvd.Available, err = peerState.Recvd.Available.Or(incomingMeta.Available)
if err != nil {
return peerState, nil, errors.Wrap(err, "failed to merge available cells into recvdState parts metadata")
}
}
}
// we've already handled the update to the peer state based on the incoming parts metadata,
// so we can return early if there's no message to process.
if !hasMessage {
return peerState, nil, nil
}
var message ethpb.PartialDataColumnSidecar
if err := message.UnmarshalSSZ(rpc.PartialMessage); err != nil {
return peerState, nil, errors.Wrap(err, "failed to unmarshal partial message data")
}
if len(message.CellsPresentBitmap) == 0 {
return peerState, &message, nil
}
nKzgCommitments := message.CellsPresentBitmap.Len()
if nKzgCommitments == 0 {
return peerState, nil, errors.New("length of cells present bitmap is 0")
}
// only update RecvdState using the incoming partial message if the peer did not send us their parts metadata
if !hasIncomingPartsMetadata {
recievedMeta, err := decodePartsMetadataFromPeerState(peerState.Recvd, nKzgCommitments)
if err != nil {
return peerState, nil, errors.Wrap(err, "received")
}
recvdState, err := blocks.MergeAvailableIntoPartsMetadata(recievedMeta, message.CellsPresentBitmap)
if err != nil {
return peerState, nil, err
}
peerState.Recvd = recvdState
}
sentMeta, err := decodePartsMetadataFromPeerState(peerState.Sent, nKzgCommitments)
if err != nil {
return peerState, nil, errors.Wrap(err, "sent")
}
sentState, err := blocks.MergeAvailableIntoPartsMetadata(sentMeta, message.CellsPresentBitmap)
if err != nil {
return peerState, nil, err
}
peerState.Sent = sentState
return peerState, &message, nil
}
func (p *PartialColumnBroadcaster) handleIncomingRPC(rpc incomingPartialRPC) error {
if p.peerFeedback == nil || p.publishPartialCol == nil {
return errors.New("pubsub not initialized")
}
message := rpc.message
hasMessage := message != nil
topicID := rpc.GetTopicID()
groupID := rpc.GroupID
ourVerifier := p.getPartialVerifier(topicID, groupID)
var shouldRepublish bool
if ourVerifier == nil && hasMessage {
header, headerWasCached := p.getHeader(groupID, message)
if header == nil {
return nil
}
// downscore peer if invalid header
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
p.logger.WithFields(rpc.logFields()).Debug("Header is missing signed block header or header")
_ = p.peerFeedback(topicID, rpc.from, pubsub.PeerFeedbackInvalidMessage)
return errors.New("header is missing signed block header or header")
}
// downscore peer if invalid header
root, err := header.SignedBlockHeader.Header.HashTreeRoot()
if err != nil {
p.logger.WithFields(rpc.logFields()).WithError(err).Debug("Failed to get root from header")
_ = p.peerFeedback(topicID, rpc.from, pubsub.PeerFeedbackInvalidMessage)
return errors.Wrap(err, "failed to get root from header")
}
columnIndex, err := extractColumnIndexFromTopic(topicID)
if err != nil {
return err
}
verifier, err := p.makeVerifierFromHeader(root, header, columnIndex, headerWasCached, rpc)
if err != nil {
if err == errInvalidHeader {
return nil
}
return err
}
if !headerWasCached {
p.logger.WithFields(rpc.logFields()).Debug("Handling header as it was previously not cached for this group")
p.handleHeader(rpc, header)
}
// Save to store
topicStore, ok := p.partialMsgStore[topicID]
if !ok {
topicStore = make(map[string]*verification.PartialColumnVerifier)
p.partialMsgStore[topicID] = topicStore
}
topicStore[string(groupID)] = verifier
p.groupTTL[string(groupID)] = TTLInSlots
ourVerifier = verifier
shouldRepublish = true
}
if ourVerifier == nil {
// We don't have a partial column for this. Can happen if we got cells
// without a header.
return nil
}
ourDataColumn := ourVerifier.Column
if hasMessage {
err := p.handlePartialCells(ourDataColumn, message, rpc)
if err != nil {
return err
}
}
return p.republishColumn(ourDataColumn, rpc, shouldRepublish)
}
func (p *PartialColumnBroadcaster) makeVerifierFromHeader(root [fieldparams.RootLength]byte, header *ethpb.PartialDataColumnHeader, columnIndex uint64,
headerWasCached bool, rpc incomingPartialRPC) (*verification.PartialColumnVerifier, error) {
topicID := rpc.GetTopicID()
newColumn, err := blocks.NewPartialDataColumn(
root,
header.SignedBlockHeader,
columnIndex,
header.KzgCommitments,
header.KzgCommitmentsInclusionProof,
)
if err != nil {
p.logger.WithError(err).WithFields(logrus.Fields{
"topic": topicID,
"columnIndex": columnIndex,
"numCommitments": len(header.KzgCommitments),
}).Error("Failed to create partial data column from header")
return nil, err
}
if !bytes.Equal(newColumn.GroupID(), rpc.GroupID) {
p.logger.WithFields(rpc.logFields()).Error("Group ID mismatch")
// REJECT case: penalize the peer
_ = p.peerFeedback(topicID, rpc.from, pubsub.PeerFeedbackInvalidMessage)
return nil, errors.New("group ID mismatch")
}
if headerWasCached {
verifier, err := p.callbacks.PartialVerifierFromTrustedColumn(&newColumn)
if err != nil {
p.logger.WithError(err).WithFields(logrus.Fields{
"topic": topicID,
"columnIndex": columnIndex,
"numCommitments": len(header.KzgCommitments),
}).Error("Failed to create partial column verifier from header")
return nil, err
}
return verifier, nil
}
var reject bool
verifier, reject, err := p.callbacks.PartialVerifierFromHeader(&newColumn)
if err != nil {
p.logger.WithError(err).WithField("reject", reject).Debug("Header validation failed")
if reject {
// REJECT case: penalize the peer
_ = p.peerFeedback(topicID, rpc.from, pubsub.PeerFeedbackInvalidMessage)
}
// Both REJECT and IGNORE: don't process further
return nil, errInvalidHeader
}
return verifier, nil
}
func (p *PartialColumnBroadcaster) getHeader(groupID []byte, message *ethpb.PartialDataColumnSidecar) (*ethpb.PartialDataColumnHeader, bool) {
if cachedHeader, ok := p.validHeaderCache[string(groupID)]; ok {
return cachedHeader, true
} else {
// We haven't seen this group before. Check if we have a valid header.
if len(message.Header) == 0 {
p.logger.Debug("No partial column found and no header in message, ignoring")
return nil, false
}
return message.Header[0], false
}
}
func (p *PartialColumnBroadcaster) republishColumn(ourDataColumn *blocks.PartialDataColumn, rpc incomingPartialRPC,
shouldRepublish bool) error {
if !ourDataColumn.Published {
p.logger.WithFields(rpc.logFields()).Debug("Column not published, skipping republish")
return nil
}
topicId := rpc.GetTopicID()
peerMeta := rpc.PartsMetadata
myMeta, err := ourDataColumn.PartsMetadata()
if err != nil {
return err
}
if !shouldRepublish && len(peerMeta) > 0 && !bytes.Equal(peerMeta, myMeta) {
// Either we have something they don't or vice versa
shouldRepublish = true
p.logger.WithFields(rpc.logFields()).Debug("Republishing due to parts metadata difference")
}
if shouldRepublish {
err := p.publishPartialCol(topicId, ourDataColumn.GroupID(), ourDataColumn)
if err != nil {
return err
}
}
return nil
}
func (p *PartialColumnBroadcaster) handlePartialCells(ourDataColumn *blocks.PartialDataColumn, message *ethpb.PartialDataColumnSidecar,
rpc incomingPartialRPC) error {
topicId := rpc.GetTopicID()
cellIndices, cellsToVerify, err := ourDataColumn.CellsToVerifyFromPartialMessage(message)
if err != nil {
return err
}
// Track cells received via partial message
if len(cellIndices) > 0 {
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
partialMessageCellsReceivedTotal.WithLabelValues(columnIndexStr).Add(float64(len(cellIndices)))
}
if len(cellsToVerify) > 0 {
p.concurrentValidatorSemaphore <- struct{}{}
go func() {
defer func() {
<-p.concurrentValidatorSemaphore
}()
start := time.Now()
err := p.callbacks.ValidateColumn(cellsToVerify)
if err != nil {
p.logger.WithError(err).WithFields(rpc.logFields()).Error("Failed to validate cells")
_ = p.peerFeedback(topicId, rpc.from, pubsub.PeerFeedbackInvalidMessage)
return
}
_ = p.peerFeedback(topicId, rpc.from, pubsub.PeerFeedbackUsefulMessage)
_, _ = p.enqueue(context.Background(), requestKindCellsValidated, requestValues{
cellsValidated: &cellsValidated{
validationTook: time.Since(start),
topic: topicId,
group: ourDataColumn.GroupID(),
cells: cellsToVerify,
cellIndices: cellIndices,
},
})
}()
}
return nil
}
func (p *PartialColumnBroadcaster) handleHeader(rpc incomingPartialRPC, header *ethpb.PartialDataColumnHeader) {
groupID := rpc.GroupID
p.logger.Debug("Handling header as it was previously not cached for this group")
// Cache the valid header.
p.validHeaderCache[string(groupID)] = header
select {
case p.concurrentHeaderHandlerSemaphore <- struct{}{}:
go func() {
p.callbacks.HandleHeader(header, string(groupID))
<-p.concurrentHeaderHandlerSemaphore
}()
default:
p.logger.WithFields(rpc.logFields()).Warn("Dropping header handler, max concurrent header handlers reached")
}
}
func (p *PartialColumnBroadcaster) handleCellsValidated(cells *cellsValidated) error {
ourVerifier := p.getPartialVerifier(cells.topic, cells.group)
if ourVerifier == nil {
return errors.New("data column not found for verified cells")
}
ourDataColumn := ourVerifier.Column
var extended bool
for i, bundle := range cells.cells {
if bundle.ColumnIndex != ourDataColumn.Index {
return errors.New("cell bundle has wrong column index")
}
if ourVerifier.ExtendFromVerifiedCell(cells.cellIndices[i], bundle.Cell, bundle.Proof) {
extended = true
}
}
p.logger.WithFields(logrus.Fields{"duration": cells.validationTook, "extended": extended}).Debug("Extended partial message")
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
if extended {
// Track useful cells (cells that extended our data)
partialMessageUsefulCellsTotal.WithLabelValues(columnIndexStr).Add(float64(len(cells.cells)))
col, ok, err := ourVerifier.Complete()
if err != nil {
p.logger.WithError(err).WithFields(cells.logFields()).Error("Failed to complete partial column verifier")
return err
}
if ok {
p.logger.WithFields(cells.logFields()).Info("Completed partial column")
go p.callbacks.HandleColumn(cells.topic, col)
} else {
p.logger.WithFields(cells.logFields()).Info("Extended partial column")
}
if !ourDataColumn.Published {
p.logger.WithFields(cells.logFields()).Debug("Column not published, skipping republish")
return nil
}
err = p.publishPartialCol(cells.topic, ourDataColumn.GroupID(), ourDataColumn)
if err != nil {
return err
}
}
return nil
}
func (p *PartialColumnBroadcaster) Stop() {
p.stopOnce.Do(func() {
close(p.stop)
})
}
// Publish publishes partial columns for the given topics.
func (p *PartialColumnBroadcaster) Publish(ctx context.Context, topicsAndColumns iter.Seq2[string, blocks.PartialDataColumn]) error {
if p.peerFeedback == nil || p.publishPartialCol == nil {
return errors.New("pubsub not initialized")
}
req, err := p.enqueue(ctx, requestKindPublish, requestValues{
publish: publish{
topicsAndColumns: topicsAndColumns,
},
})
if err != nil {
return err
}
return req.waitForResponse()
}
func (p *PartialColumnBroadcaster) gossip(topic string, groupID []byte) {
topicStore, ok := p.partialMsgStore[topic]
if !ok {
return
}
existing := topicStore[string(groupID)]
if existing == nil {
return
}
if existing.Column.Included.Count() == 0 {
// Nothing useful here
return
}
if !existing.Column.Published {
return
}
err := p.publishPartialCol(topic, existing.Column.GroupID(), existing.Column)
if err != nil {
p.logger.WithFields(logrus.Fields{"err": err}).Warn("Failed to publish gossip")
}
}
func (p *PartialColumnBroadcaster) publish(topicsAndColumns iter.Seq2[string, blocks.PartialDataColumn]) error {
var aggErr error
for topic, partialCol := range topicsAndColumns {
if len(partialCol.KzgCommitments) == 0 {
p.logger.WithFields(logrus.Fields{
"topic": topic,
}).Debug("Skipping publish for column with no KZG commitments")
continue
}
groupIDBytes := partialCol.GroupID()
topicStore, ok := p.partialMsgStore[topic]
if !ok {
topicStore = make(map[string]*verification.PartialColumnVerifier)
p.partialMsgStore[topic] = topicStore
}
verifier := p.getPartialVerifier(topic, groupIDBytes)
if verifier == nil {
var err error
verifier, err = p.callbacks.PartialVerifierFromTrustedColumn(&partialCol)
if err != nil {
aggErr = stderrors.Join(aggErr, err)
continue
}
topicStore[string(groupIDBytes)] = verifier
} else {
for i := range partialCol.Included.Len() {
if partialCol.Included.BitAt(i) {
verifier.ExtendFromVerifiedCell(uint64(i), partialCol.Column[i], partialCol.KzgProofs[i])
}
}
}
ourColummn := verifier.Column
p.groupTTL[string(groupIDBytes)] = TTLInSlots
err := p.publishPartialCol(topic, ourColummn.GroupID(), ourColummn)
if err == nil {
ourColummn.Published = true
} else {
aggErr = stderrors.Join(aggErr, err)
}
}
return aggErr
}
func (p *PartialColumnBroadcaster) Subscribe(ctx context.Context, t *pubsub.Topic) error {
req, err := p.enqueue(ctx, requestKindSubscribe, requestValues{
sub: subscribe{
t: t,
},
})
if err != nil {
return err
}
return req.waitForResponse()
}
func (p *PartialColumnBroadcaster) subscribe(t *pubsub.Topic) error {
topic := t.String()
if _, ok := p.topics[topic]; ok {
return errors.New("already subscribed")
}
p.topics[topic] = t
return nil
}
func (p *PartialColumnBroadcaster) Unsubscribe(ctx context.Context, topic string) error {
req, err := p.enqueue(ctx, requestKindUnsubscribe, requestValues{
unsub: unsubscribe{
topic: topic,
},
})
if err != nil {
return err
}
return req.waitForResponse()
}
func (p *PartialColumnBroadcaster) unsubscribe(topic string) error {
t, ok := p.topics[topic]
if !ok {
return errors.New("topic not found")
}
delete(p.topics, topic)
delete(p.partialMsgStore, topic)
return t.Close()
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
package partialdatacolumnbroadcaster
import (
"context"
"testing"
"time"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/testing/require"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/sirupsen/logrus"
)
// TestPublishReturnsOnContextDeadline verifies that Publish returns a
// context error when the event loop is not processing requests, rather
// than blocking indefinitely.
func TestPublishReturnsOnContextDeadline(t *testing.T) {
b := NewBroadcaster(logrus.New())
b.peerFeedback = func(_ string, _ peer.ID, _ pubsub.PeerFeedbackKind) error { return nil }
b.publishPartialCol = func(_ string, _ []byte, _ *blocks.PartialDataColumn) error { return nil }
// Deliberately do NOT start the event loop. This simulates loop() being
// busy with another request — the effect is the same: respCh never gets a response.
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
err := b.Publish(ctx, func(yield func(string, blocks.PartialDataColumn) bool) {})
require.ErrorIs(t, err, context.DeadlineExceeded)
}

View File

@@ -58,7 +58,7 @@ func TestPeerExplicitAdd(t *testing.T) {
resAddress, err := p.Address(id)
require.NoError(t, err)
assert.Equal(t, address, resAddress, "Unexpected address")
assert.Equal(t, address.Equal(resAddress), true, "Unexpected address")
resDirection, err := p.Direction(id)
require.NoError(t, err)
@@ -72,7 +72,7 @@ func TestPeerExplicitAdd(t *testing.T) {
resAddress2, err := p.Address(id)
require.NoError(t, err)
assert.Equal(t, address2, resAddress2, "Unexpected address")
assert.Equal(t, address2.Equal(resAddress2), true, "Unexpected address")
resDirection2, err := p.Direction(id)
require.NoError(t, err)

View File

@@ -170,7 +170,7 @@ func (s *Service) pubsubOptions() []pubsub.Option {
pubsub.WithPeerScore(peerScoringParams(s.cfg.IPColocationWhitelist)),
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
pubsub.WithGossipSubParams(pubsubGossipParam()),
pubsub.WithRawTracer(gossipTracer{host: s.host}),
pubsub.WithRawTracer(&gossipTracer{host: s.host, allowedTopics: filt}),
}
if len(s.cfg.StaticPeers) > 0 {
@@ -181,6 +181,9 @@ func (s *Service) pubsubOptions() []pubsub.Option {
}
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
}
if s.partialColumnBroadcaster != nil {
psOpts = s.partialColumnBroadcaster.AppendPubSubOpts(psOpts)
}
return psOpts
}

View File

@@ -1,6 +1,8 @@
package p2p
import (
"sync"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
@@ -8,7 +10,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
var _ = pubsub.RawTracer(gossipTracer{})
var _ = pubsub.RawTracer(&gossipTracer{})
// Initializes the values for the pubsub rpc action.
type action int
@@ -23,85 +25,160 @@ const (
// and broadcasted through gossipsub.
type gossipTracer struct {
host host.Host
allowedTopics pubsub.SubscriptionFilter
mu sync.Mutex
// map topic -> Set(peerID). Peer is in set if it supports partial messages.
partialMessagePeers map[string]map[peer.ID]struct{}
// map topic -> Set(peerID). Peer is in set if in the mesh.
meshPeers map[string]map[peer.ID]struct{}
}
// AddPeer .
func (g gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
func (g *gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
// no-op
}
// RemovePeer .
func (g gossipTracer) RemovePeer(p peer.ID) {
// no-op
func (g *gossipTracer) RemovePeer(p peer.ID) {
g.mu.Lock()
defer g.mu.Unlock()
for _, peers := range g.partialMessagePeers {
delete(peers, p)
}
for topic, peers := range g.meshPeers {
if _, ok := peers[p]; ok {
delete(peers, p)
g.updateMeshPeersMetric(topic)
}
}
}
// Join .
func (g gossipTracer) Join(topic string) {
func (g *gossipTracer) Join(topic string) {
pubsubTopicsActive.WithLabelValues(topic).Set(1)
g.mu.Lock()
defer g.mu.Unlock()
if g.partialMessagePeers == nil {
g.partialMessagePeers = make(map[string]map[peer.ID]struct{})
}
if g.partialMessagePeers[topic] == nil {
g.partialMessagePeers[topic] = make(map[peer.ID]struct{})
}
if g.meshPeers == nil {
g.meshPeers = make(map[string]map[peer.ID]struct{})
}
if g.meshPeers[topic] == nil {
g.meshPeers[topic] = make(map[peer.ID]struct{})
}
}
// Leave .
func (g gossipTracer) Leave(topic string) {
func (g *gossipTracer) Leave(topic string) {
pubsubTopicsActive.WithLabelValues(topic).Set(0)
g.mu.Lock()
defer g.mu.Unlock()
delete(g.partialMessagePeers, topic)
delete(g.meshPeers, topic)
}
// Graft .
func (g gossipTracer) Graft(p peer.ID, topic string) {
func (g *gossipTracer) Graft(p peer.ID, topic string) {
pubsubTopicsGraft.WithLabelValues(topic).Inc()
g.mu.Lock()
defer g.mu.Unlock()
if m, ok := g.meshPeers[topic]; ok {
m[p] = struct{}{}
}
g.updateMeshPeersMetric(topic)
}
// Prune .
func (g gossipTracer) Prune(p peer.ID, topic string) {
func (g *gossipTracer) Prune(p peer.ID, topic string) {
pubsubTopicsPrune.WithLabelValues(topic).Inc()
g.mu.Lock()
defer g.mu.Unlock()
if m, ok := g.meshPeers[topic]; ok {
delete(m, p)
}
g.updateMeshPeersMetric(topic)
}
// ValidateMessage .
func (g gossipTracer) ValidateMessage(msg *pubsub.Message) {
func (g *gossipTracer) ValidateMessage(msg *pubsub.Message) {
pubsubMessageValidate.WithLabelValues(*msg.Topic).Inc()
}
// DeliverMessage .
func (g gossipTracer) DeliverMessage(msg *pubsub.Message) {
func (g *gossipTracer) DeliverMessage(msg *pubsub.Message) {
pubsubMessageDeliver.WithLabelValues(*msg.Topic).Inc()
}
// RejectMessage .
func (g gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
func (g *gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
pubsubMessageReject.WithLabelValues(*msg.Topic, reason).Inc()
}
// DuplicateMessage .
func (g gossipTracer) DuplicateMessage(msg *pubsub.Message) {
func (g *gossipTracer) DuplicateMessage(msg *pubsub.Message) {
pubsubMessageDuplicate.WithLabelValues(*msg.Topic).Inc()
}
// UndeliverableMessage .
func (g gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
func (g *gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
pubsubMessageUndeliverable.WithLabelValues(*msg.Topic).Inc()
}
// ThrottlePeer .
func (g gossipTracer) ThrottlePeer(p peer.ID) {
func (g *gossipTracer) ThrottlePeer(p peer.ID) {
agent := agentFromPid(p, g.host.Peerstore())
pubsubPeerThrottle.WithLabelValues(agent).Inc()
}
// RecvRPC .
func (g gossipTracer) RecvRPC(rpc *pubsub.RPC) {
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCRecv, rpc)
func (g *gossipTracer) RecvRPC(rpc *pubsub.RPC) {
from := rpc.From()
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCPubRecvSize, pubsubRPCRecv, rpc)
g.mu.Lock()
defer g.mu.Unlock()
for _, sub := range rpc.Subscriptions {
topic := sub.GetTopicid()
if !g.allowedTopics.CanSubscribe(topic) {
continue
}
if g.partialMessagePeers == nil {
g.partialMessagePeers = make(map[string]map[peer.ID]struct{})
}
m, ok := g.partialMessagePeers[topic]
if !ok {
m = make(map[peer.ID]struct{})
g.partialMessagePeers[topic] = m
}
if sub.GetSubscribe() && sub.GetRequestsPartial() {
m[from] = struct{}{}
} else {
delete(m, from)
if len(m) == 0 {
delete(g.partialMessagePeers, topic)
}
}
}
}
// SendRPC .
func (g gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCSent, rpc)
func (g *gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCPubSentSize, pubsubRPCSent, rpc)
}
// DropRPC .
func (g gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCDrop, rpc)
func (g *gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCPubDropSize, pubsubRPCDrop, rpc)
}
func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
func (g *gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, pubSizeCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
subCtr.Add(float64(len(rpc.Subscriptions)))
if rpc.Control != nil {
ctrlCtr.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
@@ -110,12 +187,41 @@ func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pu
ctrlCtr.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
ctrlCtr.WithLabelValues("idontwant").Add(float64(len(rpc.Control.Idontwant)))
}
// For incoming messages from pubsub, we do not record metrics for them as these values
// could be junk.
if act == recv {
return
}
for _, msg := range rpc.Publish {
// For incoming messages from pubsub, we do not record metrics for them as these values
// could be junk.
if act == recv {
continue
}
pubCtr.WithLabelValues(*msg.Topic).Inc()
pubCtr.WithLabelValues(msg.GetTopic()).Inc()
pubSizeCtr.WithLabelValues(msg.GetTopic(), "false").Add(float64(msg.Size()))
}
if rpc.Partial != nil {
pubCtr.WithLabelValues(rpc.Partial.GetTopicID()).Inc()
pubSizeCtr.WithLabelValues(rpc.Partial.GetTopicID(), "true").Add(float64(rpc.Partial.Size()))
}
}
// updateMeshPeersMetric requires the caller to hold the state mutex
func (g *gossipTracer) updateMeshPeersMetric(topic string) {
meshPeers, ok := g.meshPeers[topic]
if !ok {
return
}
partialPeers, ok := g.partialMessagePeers[topic]
if !ok {
return
}
var supportsPartial, doesNotSupportPartial float64
for p := range meshPeers {
if _, ok := partialPeers[p]; ok {
supportsPartial++
} else {
doesNotSupportPartial++
}
}
pubsubMeshPeers.WithLabelValues(topic, "true").Set(supportsPartial)
pubsubMeshPeers.WithLabelValues(topic, "false").Set(doesNotSupportPartial)
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v7/async"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
@@ -77,6 +78,7 @@ type Service struct {
privKey *ecdsa.PrivateKey
metaData metadata.Metadata
pubsub *pubsub.PubSub
partialColumnBroadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
joinedTopics map[string]*pubsub.Topic
joinedTopicsLock sync.RWMutex
subnetsLock map[uint64]*sync.RWMutex
@@ -147,6 +149,10 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
custodyInfoSet: make(chan struct{}),
}
if cfg.PartialDataColumns {
s.partialColumnBroadcaster = partialdatacolumnbroadcaster.NewBroadcaster(log.Logger)
}
ipAddr := prysmnetwork.IPAddr()
opts, err := s.buildOptions(ipAddr, s.privKey)
@@ -314,6 +320,7 @@ func (s *Service) Stop() error {
if s.dv5Listener != nil {
s.dv5Listener.Close()
}
return nil
}
@@ -350,6 +357,10 @@ func (s *Service) PubSub() *pubsub.PubSub {
return s.pubsub
}
func (s *Service) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
return s.partialColumnBroadcaster
}
// Host returns the currently running libp2p
// host of the service.
func (s *Service) Host() host.Host {

View File

@@ -21,6 +21,7 @@ go_library(
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//config/fieldparams:go_default_library",

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -108,6 +109,10 @@ func (*FakeP2P) PubSub() *pubsub.PubSub {
return nil
}
func (*FakeP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
return nil
}
// MetadataSeq -- fake.
func (*FakeP2P) MetadataSeq() uint64 {
return 0
@@ -169,7 +174,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
}
// BroadcastDataColumnSidecar -- fake.
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
return nil
}

View File

@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
}
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
m.BroadcastCalled.Store(true)
return nil
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -248,7 +249,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
}
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
p.BroadcastCalled.Store(true)
return nil
}
@@ -314,6 +315,10 @@ func (p *TestP2P) PubSub() *pubsub.PubSub {
return p.pubsub
}
func (p *TestP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
return nil
}
// Disconnect from a peer.
func (p *TestP2P) Disconnect(pid peer.ID) error {
return p.BHost.Network().ClosePeer(pid)

View File

@@ -7,6 +7,7 @@ import (
"sync"
"time"
"github.com/OffchainLabs/go-bitfield"
builderapi "github.com/OffchainLabs/prysm/v7/api/client/builder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
@@ -338,6 +339,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
}
rob, err := blocks.NewROBlockWithRoot(block, root)
var partialColumns []blocks.PartialDataColumn
if block.IsBlinded() {
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
if errors.Is(err, builderapi.ErrBadGateway) {
@@ -345,7 +347,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
} else if block.Version() >= version.Deneb && block.Version() < version.Gloas {
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(rob, req)
blobSidecars, dataColumnSidecars, partialColumns, err = vs.handleUnblindedBlock(rob, req)
}
if err != nil {
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
@@ -366,7 +368,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
wg.Wait()
if block.Version() < version.Gloas {
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars, partialColumns); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
}
}
@@ -384,9 +386,10 @@ func (vs *Server) broadcastAndReceiveSidecars(
root [fieldparams.RootLength]byte,
blobSidecars []*ethpb.BlobSidecar,
dataColumnSidecars []blocks.RODataColumn,
partialColumns []blocks.PartialDataColumn,
) error {
if block.Version() >= version.Fulu {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, partialColumns); err != nil {
return errors.Wrap(err, "broadcast and receive data columns")
}
return nil
@@ -435,34 +438,50 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
func (vs *Server) handleUnblindedBlock(
block blocks.ROBlock,
req *ethpb.GenericSignedBeaconBlock,
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, error) {
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, []blocks.PartialDataColumn, error) {
rawBlobs, proofs, err := blobsAndProofs(req)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
if block.Version() >= version.Fulu {
// Compute cells and proofs from the blobs and cell proofs.
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs)
if err != nil {
return nil, nil, errors.Wrap(err, "compute cells and proofs")
return nil, nil, nil, errors.Wrap(err, "compute cells and proofs")
}
// Construct data column sidecars from the signed block and cells and proofs.
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
if err != nil {
return nil, nil, errors.Wrap(err, "data column sidcars")
return nil, nil, nil, errors.Wrap(err, "data column sidecars")
}
return nil, roDataColumnSidecars, nil
if len(cellsPerBlob) == 0 {
return nil, roDataColumnSidecars, nil, nil
}
included := bitfield.NewBitlist(uint64(len(cellsPerBlob)))
included = included.Not() // all bits set to 1
var partialColumnOpts []blocks.PartialDataColumnOption
if vs.BlockProposalEagerPushCells {
log.Debug("Block proposer eager push cells enabled, including cells in eager push")
partialColumnOpts = append(partialColumnOpts, blocks.WithByBlockProposer())
}
partialColumns, err := peerdas.PartialColumns(included, cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block), partialColumnOpts...)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "data column sidecars")
}
return nil, roDataColumnSidecars, partialColumns, nil
}
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
if err != nil {
return nil, nil, errors.Wrap(err, "build blob sidecars")
return nil, nil, nil, errors.Wrap(err, "build blob sidecars")
}
return blobSidecars, nil, nil
return blobSidecars, nil, nil, nil
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
@@ -529,7 +548,7 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
}
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn) error {
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn, partialColumns []blocks.PartialDataColumn) error {
// We built this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
for _, sidecar := range roSidecars {
@@ -538,7 +557,7 @@ func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars
}
// Broadcast sidecars (non blocking).
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars); err != nil {
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars, partialColumns); err != nil {
return errors.Wrap(err, "broadcast data column sidecars")
}

View File

@@ -77,6 +77,7 @@ type Server struct {
ExecutionPayloadEnvelopeReceiver blockchain.ExecutionPayloadEnvelopeReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
BlockProposalEagerPushCells bool
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher

View File

@@ -98,6 +98,7 @@ type Config struct {
ExecutionChainInfoFetcher execution.ChainInfoFetcher
GenesisTimeFetcher blockchain.TimeFetcher
GenesisFetcher blockchain.GenesisFetcher
BlockProposalEagerPushCells bool
MockEth1Votes bool
EnableDebugRPCEndpoints bool
AttestationCache *cache.AttestationCache
@@ -266,6 +267,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
PayloadIDCache: s.cfg.PayloadIDCache,
AttestationStateFetcher: s.cfg.AttestationReceiver,
GraffitiInfo: s.cfg.GraffitiInfo,
BlockProposalEagerPushCells: s.cfg.BlockProposalEagerPushCells,
}
s.validatorServer = validatorServer
nodeServer := &nodev1alpha1.Server{

View File

@@ -64,6 +64,7 @@ go_library(
"validate_data_column.go",
"validate_execution_payload_envelope.go",
"validate_light_client.go",
"validate_partial_header.go",
"validate_payload_attestation.go",
"validate_proposer_slashing.go",
"validate_sync_committee_message.go",
@@ -107,6 +108,7 @@ go_library(
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/slasher/types:go_default_library",
@@ -228,6 +230,7 @@ go_test(
"validate_data_column_test.go",
"validate_execution_payload_envelope_test.go",
"validate_light_client_test.go",
"validate_partial_header_test.go",
"validate_payload_attestation_test.go",
"validate_proposer_slashing_test.go",
"validate_sync_committee_message_test.go",

View File

@@ -2,8 +2,12 @@ package sync
import (
"context"
"iter"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
@@ -19,9 +23,16 @@ type signatureVerifier struct {
resChan chan error
}
type errorWithSegment struct {
err error
// segment is only available if the batched verification failed
segment *peerdas.CellProofBundleSegment
}
type kzgVerifier struct {
dataColumns []blocks.RODataColumn
resChan chan error
sizeHint int
cellProofs iter.Seq[blocks.CellProofBundle]
resChan chan errorWithSegment
}
// A routine that runs in the background to perform batch
@@ -53,6 +64,33 @@ func (s *Service) verifierRoutine() {
}
}
// A routine that runs in the background to perform batch
// KZG verifications by draining the channel and processing all pending requests.
func (s *Service) kzgVerifierRoutine() {
kzgBatch := make([]*kzgVerifier, 0, 1)
for {
kzgBatch = kzgBatch[:0]
select {
case <-s.ctx.Done():
return
case kzg := <-s.kzgChan:
kzgBatch = append(kzgBatch, kzg)
}
for {
select {
case <-s.ctx.Done():
return
case kzg := <-s.kzgChan:
kzgBatch = append(kzgBatch, kzg)
continue
default:
verifyKzgBatch(kzgBatch)
}
break
}
}
}
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
defer span.End()
@@ -124,3 +162,90 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
}
return aggSet, nil
}
func (s *Service) validateKZGProofs(ctx context.Context, sizeHint int, cellProofs iter.Seq[blocks.CellProofBundle]) error {
_, span := trace.StartSpan(ctx, "sync.validateKZGProofs")
defer span.End()
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
resChan := make(chan errorWithSegment, 1)
verificationSet := &kzgVerifier{sizeHint: sizeHint, cellProofs: cellProofs, resChan: resChan}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
select {
case s.kzgChan <- verificationSet:
case <-ctx.Done():
return ctx.Err()
}
select {
case <-ctx.Done():
return ctx.Err() // parent context canceled, give up
case errWithSegment := <-resChan:
if errWithSegment.err != nil {
err := errWithSegment.err
log.WithError(err).Trace("Could not perform batch verification of cells")
tracing.AnnotateError(span, err)
if errWithSegment.segment == nil {
return err
}
// We failed batch verification. Try again in this goroutine without batching
return validateUnbatchedKZGProofs(ctx, *errWithSegment.segment)
}
}
return nil
}
func validateUnbatchedKZGProofs(ctx context.Context, segment peerdas.CellProofBundleSegment) error {
_, span := trace.StartSpan(ctx, "sync.validateUnbatchedColumnsKzg")
defer span.End()
start := time.Now()
if err := segment.Verify(); err != nil {
err = errors.Wrap(err, "could not verify")
tracing.AnnotateError(span, err)
return err
}
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("fallback").Observe(float64(time.Since(start).Milliseconds()))
return nil
}
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
if len(kzgBatch) == 0 {
return
}
cellProofIters := make([]iter.Seq[blocks.CellProofBundle], 0, len(kzgBatch))
var sizeHint int
for _, kzgVerifier := range kzgBatch {
sizeHint += kzgVerifier.sizeHint
cellProofIters = append(cellProofIters, kzgVerifier.cellProofs)
}
var verificationErr error
start := time.Now()
segments, err := peerdas.BatchVerifyDataColumnsCellsKZGProofs(sizeHint, cellProofIters)
elapsed := float64(time.Since(start).Milliseconds())
if err != nil {
verificationErr = errors.Wrap(err, "batch KZG verification failed")
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("batch_failed").Observe(elapsed)
} else {
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("batch").Observe(elapsed)
}
segmentAvailable := verificationErr != nil && len(segments) == len(kzgBatch)
// Send the same result to all verifiers in the batch
for i, verifier := range kzgBatch {
var segment *peerdas.CellProofBundleSegment
if segmentAvailable {
failedSegment := segments[i]
segment = &failedSegment
}
verifier.resChan <- errorWithSegment{
err: verificationErr,
segment: segment,
}
}
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/time/slots"
@@ -74,6 +75,10 @@ func (s *Service) processDataColumnSidecarsFromReconstruction(ctx context.Contex
duration := time.Since(startTime)
dataColumnReconstructionHistogram.Observe(float64(duration.Milliseconds()))
if len(reconstructedSidecars) < len(verifiedSidecars) {
log.Error("More verified sidecars than reconstructed sidecars")
return
}
dataColumnReconstructionCounter.Add(float64(len(reconstructedSidecars) - len(verifiedSidecars)))
// Retrieve indices of data column sidecars to sample.
@@ -90,6 +95,36 @@ func (s *Service) processDataColumnSidecarsFromReconstruction(ctx context.Contex
}
if len(unseenIndices) > 0 {
// Publish partial columns for unseen indices.
if broadcaster := s.cfg.p2p.PartialColumnBroadcaster(); broadcaster != nil {
digest, err := s.currentForkDigest()
if err != nil {
log.Error("Failed to get current fork digest")
} else {
err := broadcaster.Publish(ctx, func(yield func(string, blocks.PartialDataColumn) bool) {
for _, sc := range reconstructedSidecars {
if !unseenIndices[sc.Index] {
continue
}
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sc.Index)
topic := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) + s.cfg.p2p.Encoding().ProtocolSuffix()
if !yield(topic, blocks.NewPartialDataColumnFromVerifiedRODataColumn(sc)) {
return
}
}
})
if err != nil {
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": slot,
"proposerIndex": proposerIndex,
"count": len(unseenIndices),
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
}).WithError(err).Error("Failed to publish reconstructed partial column")
}
}
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": slot,

View File

@@ -1,8 +1,11 @@
package sync
import (
"context"
"testing"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
@@ -60,3 +63,81 @@ func createInvalidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn
}
return dataColumns
}
func TestVerifierRoutine(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
t.Run("processes single request", func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
dataColumns := createValidTestDataColumns(t, 1)
resChan := make(chan errorWithSegment, 1)
service.kzgChan <- &kzgVerifier{sizeHint: 1, cellProofs: blocks.RODataColumnsToCellProofBundles(dataColumns), resChan: resChan}
select {
case errWithSegment := <-resChan:
require.NoError(t, errWithSegment.err)
case <-time.After(time.Second):
t.Fatal("timeout waiting for verification result")
}
})
t.Run("batches multiple requests", func(t *testing.T) {
ctx := t.Context()
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
go service.kzgVerifierRoutine()
const numRequests = 5
resChans := make([]chan errorWithSegment, numRequests)
for i := range numRequests {
dataColumns := createValidTestDataColumns(t, 1)
resChan := make(chan errorWithSegment, 1)
resChans[i] = resChan
service.kzgChan <- &kzgVerifier{sizeHint: 1, cellProofs: blocks.RODataColumnsToCellProofBundles(dataColumns), resChan: resChan}
}
for i := range numRequests {
select {
case errWithSegment := <-resChans[i]:
require.NoError(t, errWithSegment.err)
case <-time.After(time.Second):
t.Fatalf("timeout waiting for verification result %d", i)
}
}
})
t.Run("context cancellation stops routine", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
service := &Service{
ctx: ctx,
kzgChan: make(chan *kzgVerifier, 100),
}
routineDone := make(chan struct{})
go func() {
service.kzgVerifierRoutine()
close(routineDone)
}()
cancel()
select {
case <-routineDone:
case <-time.After(time.Second):
t.Fatal("timeout waiting for routine to exit")
}
})
}

View File

@@ -256,6 +256,16 @@ var (
Help: "Count the number of data column sidecars obtained via the execution layer.",
},
)
usefulFullColumnsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "beacon_useful_full_columns_received_total",
Help: "Number of useful full columns (any cell being useful) received",
}, []string{"column_index"})
partialMessageColumnCompletionsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "beacon_partial_message_column_completions_total",
Help: "How often the partial message first completed the column",
}, []string{"column_index"})
)
func (s *Service) updateMetrics() {

View File

@@ -6,6 +6,8 @@ package sync
import (
"context"
"slices"
"strconv"
"sync"
"time"
@@ -178,6 +180,7 @@ type Service struct {
syncContributionBitsOverlapLock sync.RWMutex
syncContributionBitsOverlapCache *lru.Cache
signatureChan chan *signatureVerifier
kzgChan chan *kzgVerifier
clockWaiter startup.ClockWaiter
initialSyncComplete chan struct{}
verifierWaiter *verification.InitializerWaiter
@@ -226,6 +229,9 @@ func NewService(ctx context.Context, opts ...Option) *Service {
}
// Initialize signature channel with configured limit
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
// Initialize KZG channel with fixed buffer size of 100.
// This buffer size is designed to handle burst traffic of partial data column cells:
r.kzgChan = make(chan *kzgVerifier, 100)
// Correctly remove it from our seen pending block map.
// The eviction method always assumes that the mutex is held.
@@ -287,6 +293,10 @@ func (s *Service) Start() {
s.newExecutionPayloadEnvelopeVerifier = newPayloadVerifierFromInitializer(v)
go s.verifierRoutine()
go s.kzgVerifierRoutine()
s.startPartialColumnBroadcaster()
go s.startDiscoveryAndSubscriptions()
go s.processDataColumnLogs()
@@ -318,6 +328,12 @@ func (s *Service) Start() {
}
func (s *Service) startPartialColumnBroadcaster() {
if broadcaster := s.cfg.p2p.PartialColumnBroadcaster(); broadcaster != nil {
go broadcaster.Start(&partialColumnCallbacks{s: s})
}
}
// Stop the regular sync service.
func (s *Service) Stop() error {
defer func() {
@@ -355,6 +371,9 @@ func (s *Service) Stop() error {
for _, t := range s.cfg.p2p.PubSub().GetTopics() {
s.unSubscribeFromTopic(t)
}
if broadcaster := s.cfg.p2p.PartialColumnBroadcaster(); broadcaster != nil {
broadcaster.Stop()
}
return nil
}
@@ -426,6 +445,59 @@ func (s *Service) waitForChainStart() {
s.markForChainStart()
}
type partialColumnCallbacks struct {
s *Service
}
func (c *partialColumnCallbacks) PartialVerifierFromHeader(col *blocks.PartialDataColumn) (*verification.PartialColumnVerifier, bool, error) {
return c.s.validatePartialDataColumnHeader(c.s.ctx, col)
}
func (c *partialColumnCallbacks) PartialVerifierFromTrustedColumn(col *blocks.PartialDataColumn) (*verification.PartialColumnVerifier, error) {
return c.s.partialVerifierFromTrustedColumn(c.s.ctx, col)
}
func (c *partialColumnCallbacks) ValidateColumn(cellsToVerify []blocks.CellProofBundle) error {
return c.s.validateKZGProofs(c.s.ctx, len(cellsToVerify), slices.Values(cellsToVerify))
}
func (c *partialColumnCallbacks) HandleColumn(topic string, col blocks.VerifiedRODataColumn) {
ctx, cancel := context.WithTimeout(c.s.ctx, pubsubMessageTimeout)
defer cancel()
slot := col.SignedBlockHeader.Header.Slot
proposerIndex := col.SignedBlockHeader.Header.ProposerIndex
if c.s.hasSeenDataColumnIndex(slot, proposerIndex, col.Index) {
return
}
c.s.setSeenDataColumnIndex(slot, proposerIndex, col.Index)
if len(col.KzgCommitments) == 0 {
return
}
// This column was completed from a partial message.
partialMessageColumnCompletionsTotal.WithLabelValues(strconv.FormatUint(col.Index, 10)).Inc()
err := c.s.verifiedRODataColumnSubscriber(ctx, col)
if err != nil {
log.WithError(err).Error("Failed to handle verified RO data column subscriber")
}
}
func (c *partialColumnCallbacks) HandleHeader(header *ethpb.PartialDataColumnHeader, groupID string) {
ctx, cancel := context.WithTimeout(c.s.ctx, pubsubMessageTimeout)
defer cancel()
source, err := peerdas.PopulateFromPartialHeader(header)
if err != nil {
log.WithError(err).Error("Failed to populate from partial data column header")
return
}
log.WithField("slot", source.Slot()).Debug("Received data column header")
err = c.s.processDataColumnSidecarsFromExecution(ctx, source)
if err != nil {
log.WithError(err).Error("Failed to process partial data column header")
}
}
func (s *Service) startDiscoveryAndSubscriptions() {
// Wait for the chain to start.
s.waitForChainStart()

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
@@ -61,6 +62,12 @@ type subscribeParameters struct {
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
// but for which no subscriptions are needed.
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
partial *partialSubscribeParameters
}
type partialSubscribeParameters struct {
broadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
}
// shortTopic is a less verbose version of topic strings used for logging.
@@ -320,6 +327,12 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
// New gossip topic in Fulu.
if params.BeaconConfig().FuluForkEpoch <= nse.Epoch {
s.spawn(func() {
var ps *partialSubscribeParameters
if broadcaster := s.cfg.p2p.PartialColumnBroadcaster(); broadcaster != nil {
ps = &partialSubscribeParameters{
broadcaster: broadcaster,
}
}
s.subscribeWithParameters(subscribeParameters{
topicFormat: p2p.DataColumnSubnetTopicFormat,
validate: s.validateDataColumn,
@@ -327,6 +340,7 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
nse: nse,
getSubnetsToJoin: s.dataColumnSubnetIndices,
getSubnetsRequiringPeers: s.allDataColumnSubnets,
partial: ps,
})
})
}
@@ -386,11 +400,10 @@ func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandle
// Impossible condition as it would mean topic does not exist.
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
}
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest), validator, handle)
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest)+s.cfg.p2p.Encoding().ProtocolSuffix(), validator, handle)
}
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
topic += s.cfg.p2p.Encoding().ProtocolSuffix()
log := log.WithField("topic", topic)
// Do not resubscribe already seen subscriptions.
@@ -553,7 +566,11 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
func (s *Service) pruneNotWanted(t *subnetTracker, wantedSubnets map[uint64]bool) {
for _, subnet := range t.unwanted(wantedSubnets) {
t.cancelSubscription(subnet)
s.unSubscribeFromTopic(t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix()))
topic := t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix())
if t.partial != nil {
_ = t.partial.broadcaster.Unsubscribe(s.ctx, topic)
}
s.unSubscribeFromTopic(topic)
}
}
@@ -600,9 +617,33 @@ func (s *Service) trySubscribeSubnets(t *subnetTracker) {
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
s.pruneNotWanted(t, subnetsToJoin)
for _, subnet := range t.missing(subnetsToJoin) {
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
topic := t.fullTopic(subnet, "")
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
topicStr := t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix())
topicOpts := make([]pubsub.TopicOpt, 0, 2)
requestPartial := t.partial != nil
if requestPartial {
topicOpts = append(topicOpts, pubsub.RequestPartialMessages())
}
topic, err := s.cfg.p2p.JoinTopic(topicStr, topicOpts...)
if err != nil {
log.WithError(err).Error("Failed to join topic")
return
}
if requestPartial {
log.Info("Subscribing to partial columns on", topicStr)
err = t.partial.broadcaster.Subscribe(s.ctx, topic)
if err != nil {
log.WithError(err).Error("Failed to subscribe to partial column")
}
}
// We still need to subscribe to the full columns as well as partial in
// case our peers don't support partial messages.
t.track(subnet, s.subscribeWithBase(topicStr, t.validate, t.handle))
}
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -203,6 +204,10 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
return nil, errors.Wrap(err, "column indices to sample")
}
digest, err := s.currentForkDigest()
if err != nil {
return nil, err
}
log := log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", source.Root()),
"slot": source.Slot(),
@@ -233,11 +238,35 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
}
// Try to reconstruct data column constructedSidecars from the execution client.
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
partialBroadcaster := s.cfg.p2p.PartialColumnBroadcaster()
isPartialEnabled := partialBroadcaster != nil
constructedSidecars, partialColumns, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
if err != nil {
return nil, errors.Wrap(err, "reconstruct data column sidecars")
}
if isPartialEnabled && len(partialColumns) > 0 {
log.WithField("len(partialColumns)", len(partialColumns)).Debug("Publishing partial columns")
// Publish the partial column. This is idempotent if we republish the same data twice.
// Note, the "partial column" may indeed be complete. We still
// should publish to help our peers.
err = partialBroadcaster.Publish(ctx, func(yield func(string, blocks.PartialDataColumn) bool) {
for i := range uint64(len(partialColumns)) {
if !columnIndicesToSample[i] {
continue
}
subnet := peerdas.ComputeSubnetForDataColumnSidecar(i)
topic := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) + s.cfg.p2p.Encoding().ProtocolSuffix()
if !yield(topic, partialColumns[i]) {
return
}
}
})
if err != nil {
log.WithError(err).Warn("Failed to publish partial columns")
}
}
// No sidecars are retrieved from the EL, retry later
constructedCount := uint64(len(constructedSidecars))
@@ -309,7 +338,7 @@ func (s *Service) broadcastAndReceiveUnseenDataColumnSidecars(
}
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars); err != nil {
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars, nil); err != nil {
return nil, errors.Wrap(err, "broadcast data column sidecars")
}

View File

@@ -3,10 +3,12 @@ package sync
import (
"context"
"fmt"
"strconv"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -24,6 +26,30 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
return fmt.Errorf("message was not type blocks.VerifiedRODataColumn, type=%T", msg)
}
// Track useful full columns received via gossip (not previously seen)
slot := sidecar.SignedBlockHeader.Header.Slot
proposerIndex := sidecar.SignedBlockHeader.Header.ProposerIndex
if !s.hasSeenDataColumnIndex(slot, proposerIndex, sidecar.Index) {
usefulFullColumnsReceivedTotal.WithLabelValues(strconv.FormatUint(sidecar.Index, 10)).Inc()
// re-publish the full column on the partial column extension as we don't send full columns to peers
// who have explicitly requested for partial columns. This method is idempotent so this is fine.
if broadcaster := s.cfg.p2p.PartialColumnBroadcaster(); broadcaster != nil {
digest, err := s.currentForkDigest()
if err != nil {
log.Error("Failed to get current fork digest")
} else {
err := broadcaster.Publish(ctx, func(yield func(string, blocks.PartialDataColumn) bool) {
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
topic := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) + s.cfg.p2p.Encoding().ProtocolSuffix()
yield(topic, blocks.NewPartialDataColumnFromVerifiedRODataColumn(sidecar))
})
if err != nil {
log.WithError(err).Error("Failed to publish partial column on getting data column sidecar")
}
}
}
}
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
return wrapDataColumnError(sidecar, "receive data column sidecar", err)
}
@@ -57,6 +83,34 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
return nil
}
func (s *Service) verifiedRODataColumnSubscriber(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
log.WithField("slot", sidecar.Slot()).WithField("column", sidecar.Index).Debug("Received data column sidecar")
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
return errors.Wrap(err, "receive data column sidecar")
}
var wg errgroup.Group
wg.Go(func() error {
if err := s.processDataColumnSidecarsFromReconstruction(ctx, sidecar); err != nil {
return errors.Wrap(err, "process data column sidecars from reconstruction")
}
return nil
})
wg.Go(func() error {
// Broadcast our complete column for peers that don't use partial messages
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{sidecar}, nil); err != nil {
return errors.Wrap(err, "broadcast data column sidecars")
}
return nil
})
return wg.Wait()
}
// receiveDataColumnSidecar receives a single data column sidecar: marks it as seen and saves it to the chain.
// Do not loop over this function to receive multiple sidecars, use receiveDataColumnSidecars instead.
func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {

View File

@@ -0,0 +1,124 @@
package sync
import (
"context"
stderrors "errors"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/pkg/errors"
)
var errHeaderEmptyCommitments = errors.New("header has no kzg commitments")
var errHeaderParentNotSeen = errors.New("header parent not seen")
var errHeaderNil = errors.New("nil header")
func (s *Service) partialVerifierFromTrustedColumn(ctx context.Context, col *blocks.PartialDataColumn) (*verification.PartialColumnVerifier,
error) {
if col == nil || col.SignedBlockHeader == nil || col.SignedBlockHeader.Header == nil {
return nil, errHeaderNil
}
if len(col.KzgCommitments) == 0 {
return nil, errHeaderEmptyCommitments
}
roDataColumn, err := blocks.NewRODataColumn(col.DataColumnSidecar)
if err != nil {
return nil, err
}
dcv := s.newColumnsVerifier([]blocks.RODataColumn{roDataColumn}, verification.PartialColumnRequirements)
verifier := verification.NewPartialColumnVerifier(dcv, col)
// mark all header checks as completed
verifier.SatisfyRequirement(verification.RequireNotFromFutureSlot)
verifier.SatisfyRequirement(verification.RequireSlotAboveFinalized)
verifier.SatisfyRequirement(verification.RequireSidecarParentSeen)
verifier.SatisfyRequirement(verification.RequireSidecarParentValid)
verifier.SatisfyRequirement(verification.RequireSidecarParentSlotLower)
verifier.SatisfyRequirement(verification.RequireSidecarDescendsFromFinalized)
verifier.SatisfyRequirement(verification.RequireSidecarInclusionProven)
verifier.SatisfyRequirement(verification.RequireSidecarProposerExpected)
verifier.SatisfyRequirement(verification.RequireValidProposerSignature)
return verifier, nil
}
// validatePartialDataColumn validates only the header-applicable checks for a partial data column.
func (s *Service) validatePartialDataColumnHeader(ctx context.Context, col *blocks.PartialDataColumn) (*verification.PartialColumnVerifier,
bool, error) {
// [IGNORE]
if col == nil || col.SignedBlockHeader == nil || col.SignedBlockHeader.Header == nil {
return nil, false, errHeaderNil
}
// [REJECT] kzg_commitments list is non-empty
if len(col.KzgCommitments) == 0 {
return nil, true, errHeaderEmptyCommitments
}
roDataColumn, err := blocks.NewRODataColumn(col.DataColumnSidecar)
if err != nil {
return nil, false, err
}
dcv := s.newColumnsVerifier([]blocks.RODataColumn{roDataColumn}, verification.PartialColumnRequirements)
verifier := verification.NewPartialColumnVerifier(dcv, col)
// [IGNORE] Not from future slot (with MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
if err := verifier.NotFromFutureSlot(); err != nil {
return verifier, false, err
}
// [IGNORE] Slot above finalized
if err := verifier.SlotAboveFinalized(); err != nil {
return verifier, false, err
}
// [IGNORE] Parent has been seen
parentRoot := bytesutil.ToBytes32(col.SignedBlockHeader.Header.ParentRoot)
if !s.cfg.chain.HasBlock(ctx, parentRoot) {
return verifier, false, errHeaderParentNotSeen
}
if err := verifier.SidecarParentSeen(s.hasBadBlock); err != nil {
return verifier, false, err
}
// [REJECT] Parent passes validation (not a bad block)
if err := verifier.SidecarParentValid(s.hasBadBlock); err != nil {
return verifier, true, err
}
// [REJECT] Header slot > parent slot
if err := verifier.SidecarParentSlotLower(); err != nil {
if stderrors.Is(err, verification.ErrSidecarParentUnknown) {
return verifier, false, err
}
return verifier, true, err
}
// [REJECT] Finalized checkpoint is ancestor (parent is in forkchoice)
if err := verifier.SidecarDescendsFromFinalized(); err != nil {
return verifier, true, err
}
// [REJECT] Inclusion proof valid
if err := verifier.SidecarInclusionProven(); err != nil {
return verifier, true, err
}
// [REJECT] Expected proposer for slot
if err := verifier.SidecarProposerExpected(ctx); err != nil {
return verifier, true, err
}
// [REJECT] Valid proposer signature
if err := verifier.ValidProposerSignature(ctx); err != nil {
return verifier, true, err
}
return verifier, false, nil
}

View File

@@ -0,0 +1,298 @@
package sync
import (
"context"
"testing"
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
dbtest "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/pkg/errors"
)
func TestService_PartialVerifierFromTrustedColumn(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
col *blocks.PartialDataColumn
verifier verification.MockDataColumnsVerifier
wantErr error
expectResult bool
verify func(t *testing.T, v *verification.PartialColumnVerifier)
}{
{
name: "nil column",
col: nil,
wantErr: errHeaderNil,
},
{
name: "nil signed header",
col: &blocks.PartialDataColumn{DataColumnSidecar: &ethpb.DataColumnSidecar{}},
wantErr: errHeaderNil,
},
{
name: "empty commitments",
col: buildPartialColumn(t, 0, nil),
wantErr: errHeaderEmptyCommitments,
},
{
name: "marks included cells as verified",
col: buildPartialColumn(t, 2, []uint64{0, 1}),
verifier: verification.MockDataColumnsVerifier{},
expectResult: true,
verify: func(t *testing.T, v *verification.PartialColumnVerifier) {
require.NoError(t, v.SidecarKzgProofVerified())
_, ok, err := v.Complete()
require.NoError(t, err)
require.Equal(t, true, ok)
},
},
{
name: "propagates verifier field errors on completion",
col: buildPartialColumn(t, 1, []uint64{0}),
verifier: verification.MockDataColumnsVerifier{
ErrValidFields: errors.New("invalid fields"),
},
expectResult: true,
verify: func(t *testing.T, v *verification.PartialColumnVerifier) {
_, _, err := v.Complete()
require.ErrorContains(t, "invalid fields", err)
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
service := &Service{
newColumnsVerifier: testNewColumnsVerifier(tc.verifier),
}
got, err := service.partialVerifierFromTrustedColumn(ctx, tc.col)
require.ErrorIs(t, tc.wantErr, err)
require.Equal(t, tc.expectResult, got != nil)
if tc.verify != nil {
tc.verify(t, got)
}
})
}
}
func TestService_ValidatePartialDataColumnHeader(t *testing.T) {
ctx := context.Background()
genericErr := errors.New("generic error")
unavailableParentSlotErr := errors.Wrap(verification.ErrSidecarParentUnknown, "slot lookup failed")
invalidVerifierErr := errors.Wrap(verification.ErrInvalid, "invalid verification")
db := dbtest.SetupDB(t)
// chainWithParent returns a mock chain where HasBlock returns true for the zero parent root.
chainWithParent := func() *mock.ChainService {
return &mock.ChainService{
DB: db,
InitSyncBlockRoots: map[[32]byte]bool{
{}: true, // zero root matches buildPartialColumn's parent root
},
}
}
// chainWithoutParent returns a mock chain where HasBlock returns false.
chainWithoutParent := func() *mock.ChainService {
return &mock.ChainService{DB: db}
}
tests := []struct {
name string
col *blocks.PartialDataColumn
chain *mock.ChainService
verifier verification.MockDataColumnsVerifier
wantErr error
wantReject bool
expectResult bool
}{
{
name: "nil column",
col: nil,
wantErr: errHeaderNil,
wantReject: false,
},
{
name: "empty commitments is reject",
col: buildPartialColumn(t, 0, nil),
wantErr: errHeaderEmptyCommitments,
wantReject: true,
},
{
name: "not from future slot is ignore",
col: buildPartialColumn(t, 1, nil),
verifier: verification.MockDataColumnsVerifier{ErrNotFromFutureSlot: genericErr},
wantErr: genericErr,
wantReject: false,
expectResult: true,
},
{
name: "slot above finalized is ignore",
col: buildPartialColumn(t, 1, nil),
verifier: verification.MockDataColumnsVerifier{ErrSlotAboveFinalized: genericErr},
wantErr: genericErr,
wantReject: false,
expectResult: true,
},
{
name: "parent not seen is ignore",
col: buildPartialColumn(t, 1, nil),
chain: chainWithoutParent(),
wantErr: errHeaderParentNotSeen,
wantReject: false,
expectResult: true,
},
{
name: "parent seen is ignore",
col: buildPartialColumn(t, 1, nil),
chain: chainWithParent(),
verifier: verification.MockDataColumnsVerifier{ErrSidecarParentSeen: genericErr},
wantErr: genericErr,
wantReject: false,
expectResult: true,
},
{
name: "parent valid is reject",
col: buildPartialColumn(t, 1, nil),
chain: chainWithParent(),
verifier: verification.MockDataColumnsVerifier{ErrSidecarParentValid: genericErr},
wantErr: genericErr,
wantReject: true,
expectResult: true,
},
{
name: "parent slot unavailable is ignore",
col: buildPartialColumn(t, 1, nil),
chain: chainWithParent(),
verifier: verification.MockDataColumnsVerifier{ErrSidecarParentSlotLower: unavailableParentSlotErr},
wantErr: unavailableParentSlotErr,
wantReject: false,
expectResult: true,
},
{
name: "parent slot lower invalid is reject",
col: buildPartialColumn(t, 1, nil),
chain: chainWithParent(),
verifier: verification.MockDataColumnsVerifier{ErrSidecarParentSlotLower: genericErr},
wantErr: genericErr,
wantReject: true,
expectResult: true,
},
{
name: "proposer expected verification failure is reject",
col: buildPartialColumn(t, 1, nil),
chain: chainWithParent(),
verifier: verification.MockDataColumnsVerifier{ErrSidecarProposerExpected: invalidVerifierErr},
wantErr: invalidVerifierErr,
wantReject: true,
expectResult: true,
},
{
name: "proposer expected non verification failure is reject",
col: buildPartialColumn(t, 1, nil),
chain: chainWithParent(),
verifier: verification.MockDataColumnsVerifier{ErrSidecarProposerExpected: genericErr},
wantErr: genericErr,
wantReject: true,
expectResult: true,
},
{
name: "invalid proposer signature is reject",
col: buildPartialColumn(t, 1, nil),
chain: chainWithParent(),
verifier: verification.MockDataColumnsVerifier{ErrValidProposerSignature: verification.ErrInvalidProposerSignature},
wantErr: verification.ErrInvalidProposerSignature,
wantReject: true,
expectResult: true,
},
{
name: "signature infra failure is reject",
col: buildPartialColumn(t, 1, nil),
chain: chainWithParent(),
verifier: verification.MockDataColumnsVerifier{ErrValidProposerSignature: genericErr},
wantErr: genericErr,
wantReject: true,
expectResult: true,
},
{
name: "nominal",
col: buildPartialColumn(t, 1, nil),
chain: chainWithParent(),
verifier: verification.MockDataColumnsVerifier{},
wantErr: nil,
wantReject: false,
expectResult: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
service := &Service{
newColumnsVerifier: testNewColumnsVerifier(tc.verifier),
}
if tc.chain != nil {
service.cfg = &config{chain: tc.chain}
}
got, reject, err := service.validatePartialDataColumnHeader(ctx, tc.col)
require.ErrorIs(t, tc.wantErr, err)
require.Equal(t, tc.wantReject, reject)
require.Equal(t, tc.expectResult, got != nil)
})
}
}
func testNewColumnsVerifier(v verification.MockDataColumnsVerifier) verification.NewDataColumnsVerifier {
return func(cols []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
for _, col := range cols {
v.AppendRODataColumns(col)
}
return &v
}
}
func buildPartialColumn(t *testing.T, nCommitments int, included []uint64) *blocks.PartialDataColumn {
t.Helper()
commitments := make([][]byte, nCommitments)
for i := range nCommitments {
commitments[i] = make([]byte, fieldparams.KzgCommitmentSize)
commitments[i][0] = byte(i + 1)
}
inclusionProof := [][]byte{
make([]byte, 32),
make([]byte, 32),
make([]byte, 32),
make([]byte, 32),
}
col, err := blocks.NewPartialDataColumn(
[fieldparams.RootLength]byte{},
&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ParentRoot: make([]byte, fieldparams.RootLength),
StateRoot: make([]byte, fieldparams.RootLength),
BodyRoot: make([]byte, fieldparams.RootLength),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
0,
commitments,
inclusionProof,
)
require.NoError(t, err)
for _, idx := range included {
extended := col.ExtendFromVerifiedCell(idx, []byte{byte(idx + 1)}, []byte{byte(idx + 2)})
require.Equal(t, true, extended)
}
return &col
}

View File

@@ -40,6 +40,8 @@ var (
RequireSidecarProposerExpected,
}
PartialColumnRequirements = requirementList(GossipDataColumnSidecarRequirements).excluding(RequireCorrectSubnet)
// ByRangeRequestDataColumnSidecarRequirements defines the set of requirements that DataColumnSidecars received
// via the by range request must satisfy in order to upgrade an RODataColumn to a VerifiedRODataColumn.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
@@ -73,6 +75,46 @@ type LazyHeadStateProvider struct {
var _ HeadStateProvider = &LazyHeadStateProvider{}
// PartialColumnVerifier is used to verify a partial data column before it can be used as a fully verified data column
// Note: It is not thread safe and the caller is responsible for thread-safety
type PartialColumnVerifier struct {
DataColumnsVerifier
Column *blocks.PartialDataColumn
}
func NewPartialColumnVerifier(dv DataColumnsVerifier, col *blocks.PartialDataColumn) *PartialColumnVerifier {
return &PartialColumnVerifier{
DataColumnsVerifier: dv,
Column: col,
}
}
func (pv *PartialColumnVerifier) Complete() (blocks.VerifiedRODataColumn, bool, error) {
if !pv.Column.IsComplete() {
return blocks.VerifiedRODataColumn{}, false, nil
}
// now that we have all the cells and proofs, the valid fields check should pass
if err := pv.ValidFields(); err != nil {
return blocks.VerifiedRODataColumn{}, false, err
}
pv.SatisfyRequirement(RequireSidecarKzgProofVerified)
cols, err := pv.VerifiedRODataColumns()
if err != nil {
return blocks.VerifiedRODataColumn{}, false, err
}
if len(cols) != 1 {
return blocks.VerifiedRODataColumn{}, false, errors.New("unexpected number of verified data columns")
}
return cols[0], true, nil
}
func (pv *PartialColumnVerifier) ExtendFromVerifiedCell(cellIndex uint64, cell, proof []byte) bool {
return pv.Column.ExtendFromVerifiedCell(cellIndex, cell, proof)
}
type (
RODataColumnsVerifier struct {
*sharedResources
@@ -395,7 +437,7 @@ func (dv *RODataColumnsVerifier) SidecarParentSlotLower() (err error) {
// Compute the slot of the parent block.
parentSlot, err := dv.fc.Slot(dataColumn.ParentRoot())
if err != nil {
return columnErrBuilder(errors.Wrap(err, "slot"))
return columnErrBuilder(errors.Wrap(ErrSidecarParentUnknown, err.Error()))
}
// Check if the data column slot is after the parent slot.

View File

@@ -2,6 +2,7 @@ package verification
import (
"context"
"fmt"
"reflect"
"testing"
"time"
@@ -815,6 +816,115 @@ func TestDataColumnsSidecarKzgProofVerified(t *testing.T) {
}
}
func TestPartialColumnVerifierComplete(t *testing.T) {
validFieldsErr := errors.New("invalid fields")
testCases := []struct {
wantComplete bool
wantErr error
validFieldsErr error
name string
verifiedCols []blocks.RODataColumn
included []uint64
}{
{
name: "incomplete column",
included: []uint64{0},
wantComplete: false,
},
{
name: "complete happy path",
included: []uint64{0, 1},
verifiedCols: []blocks.RODataColumn{
{},
},
wantComplete: true,
},
{
name: "valid fields failure",
included: []uint64{0, 1},
validFieldsErr: validFieldsErr,
wantComplete: false,
wantErr: validFieldsErr,
},
{
name: "missing verified cell",
included: []uint64{0},
verifiedCols: []blocks.RODataColumn{{}},
wantComplete: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
verifier := &MockDataColumnsVerifier{
ErrValidFields: tc.validFieldsErr,
}
verifier.AppendRODataColumns(tc.verifiedCols...)
pv := NewPartialColumnVerifier(verifier, buildTestPartialColumnForVerifier(t, 2, tc.included))
_, complete, err := pv.Complete()
require.Equal(t, tc.wantComplete, complete)
if tc.wantErr != nil {
fmt.Println("error is", err)
require.ErrorIs(t, err, tc.wantErr)
return
}
require.NoError(t, err)
})
}
}
func TestPartialColumnVerifierExtendFromVerifiedCell(t *testing.T) {
testCases := []struct {
name string
initialIncluded []uint64
cellIndex uint64
cell []byte
proof []byte
wantExtended bool
wantMarked bool
wantCell []byte
wantProof []byte
}{
{
name: "happy path",
initialIncluded: nil,
cellIndex: 1,
cell: []byte{9},
proof: []byte{8},
wantExtended: true,
wantMarked: true,
wantCell: []byte{9},
wantProof: []byte{8},
},
{
name: "already included cell",
initialIncluded: []uint64{1},
cellIndex: 1,
cell: []byte{9},
proof: []byte{8},
wantExtended: false,
wantMarked: true,
wantCell: []byte{2},
wantProof: []byte{3},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
pv := NewPartialColumnVerifier(&MockDataColumnsVerifier{}, buildTestPartialColumnForVerifier(t, 2, tc.initialIncluded))
extended := pv.ExtendFromVerifiedCell(tc.cellIndex, tc.cell, tc.proof)
require.Equal(t, tc.wantExtended, extended)
require.Equal(t, tc.wantMarked, pv.Column.Included.BitAt(tc.cellIndex))
require.Equal(t, true, reflect.DeepEqual(tc.wantCell, pv.Column.Column[tc.cellIndex]))
require.Equal(t, true, reflect.DeepEqual(tc.wantProof, pv.Column.KzgProofs[tc.cellIndex]))
})
}
}
func TestDataColumnsSidecarProposerExpected(t *testing.T) {
const (
columnSlot = 1
@@ -907,6 +1017,46 @@ func generateTestDataColumnsWithProposer(t *testing.T, parent [fieldparams.RootL
return roDataColumnSidecars
}
func buildTestPartialColumnForVerifier(t *testing.T, nCommitments int, included []uint64) *blocks.PartialDataColumn {
t.Helper()
commitments := make([][]byte, nCommitments)
for i := range nCommitments {
commitments[i] = make([]byte, fieldparams.KzgCommitmentSize)
commitments[i][0] = byte(i + 1)
}
inclusionProof := [][]byte{
make([]byte, fieldparams.RootLength),
make([]byte, fieldparams.RootLength),
make([]byte, fieldparams.RootLength),
make([]byte, fieldparams.RootLength),
}
col, err := blocks.NewPartialDataColumn(
[fieldparams.RootLength]byte{},
&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ParentRoot: make([]byte, fieldparams.RootLength),
StateRoot: make([]byte, fieldparams.RootLength),
BodyRoot: make([]byte, fieldparams.RootLength),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
0,
commitments,
inclusionProof,
)
require.NoError(t, err)
for _, idx := range included {
extended := col.ExtendFromVerifiedCell(idx, []byte{byte(idx + 1)}, []byte{byte(idx + 2)})
require.Equal(t, true, extended)
}
return &col
}
func TestColumnRequirementSatisfaction(t *testing.T) {
const (
columnSlot = 1

View File

@@ -40,6 +40,8 @@ var (
// errSidecarParentNotSeen means RequireSidecarParentSeen failed.
errSidecarParentNotSeen = errors.New("parent root has not been seen")
// ErrSidecarParentUnknown means that the sidecar parent was not found in the forkchoice.
ErrSidecarParentUnknown = errors.New("parent not found in forkchoice")
// errSidecarParentInvalid means RequireSidecarParentValid failed.
errSidecarParentInvalid = errors.Join(ErrBlobInvalid, errors.New("parent block is not valid"))

View File

@@ -80,11 +80,21 @@ func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs []Requirement) *RO
// WARNING: The returned verifier is not thread-safe, and should not be used concurrently.
func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColumn, reqs []Requirement) *RODataColumnsVerifier {
return &RODataColumnsVerifier{
sharedResources: ini.shared,
dataColumns: roDataColumns,
results: newResults(reqs...),
verifyDataColumnsCommitment: peerdas.VerifyDataColumnsSidecarKZGProofs,
stateByRoot: make(map[[fieldparams.RootLength]byte]state.BeaconState),
sharedResources: ini.shared,
dataColumns: roDataColumns,
results: newResults(reqs...),
verifyDataColumnsCommitment: func(rc []blocks.RODataColumn) error {
if len(rc) == 0 {
return nil
}
var sizeHint int
if len(rc) > 0 {
sizeHint = len(rc[0].Column)
}
sizeHint *= len(rc)
return peerdas.VerifyDataColumnsCellsKZGProofs(sizeHint, blocks.RODataColumnsToCellProofBundles(rc))
},
stateByRoot: make(map[[fieldparams.RootLength]byte]state.BeaconState),
}
}

View File

@@ -0,0 +1,3 @@
### Added
- Create SSZ encoded container for parts metadata.

View File

@@ -0,0 +1,2 @@
### Ignored
- Changing `PartialColumnBroadcaster` to allow callers to cancel requests via context cancellation.

View File

@@ -0,0 +1,2 @@
### Ignore
- Miscellanious small fixes for the partial columns branches.

View File

@@ -0,0 +1,3 @@
### Added
- Add support for partial columns

View File

@@ -368,4 +368,14 @@ var (
Usage: "Disables the engine_getBlobsV2 usage.",
Hidden: true,
}
// PartialDataColumns specifies the regex for enabling partial messages on datacolumns
PartialDataColumns = &cli.BoolFlag{
Name: "partial-data-columns",
Usage: "Enable cell-level dissemination for PeerDAS data columns",
}
// BlockProposalEagerPushCells enables eager pushing of all cells when proposing blocks.
BlockProposalEagerPushCells = &cli.BoolFlag{
Name: "block-proposal-eager-push-cells",
Usage: "When proposing a block, eagerly push all cells and proofs to peers in the initial partial message",
}
)

View File

@@ -162,6 +162,8 @@ var appFlags = []cli.Flag{
flags.BatchVerifierLimit,
flags.StateDiffExponents,
flags.DisableEphemeralLogFile,
flags.PartialDataColumns,
flags.BlockProposalEagerPushCells,
}
func init() {

View File

@@ -75,6 +75,8 @@ var appHelpFlagGroups = []flagGroup{
flags.RPCPort,
flags.BatchVerifierLimit,
flags.StateDiffExponents,
flags.PartialDataColumns,
flags.BlockProposalEagerPushCells,
},
},
{

View File

@@ -9,6 +9,8 @@ go_library(
"get_payload.go",
"getters.go",
"kzg.go",
"log.go",
"partialdatacolumn.go",
"proofs.go",
"proto.go",
"roblob.go",
@@ -37,8 +39,12 @@ go_library(
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/validator-client:go_default_library",
"//runtime/version:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
@@ -51,6 +57,8 @@ go_test(
"factory_test.go",
"getters_test.go",
"kzg_test.go",
"partialdatacolumn_mutation_test.go",
"partialdatacolumn_test.go",
"proofs_test.go",
"proto_test.go",
"roblob_test.go",
@@ -76,6 +84,9 @@ go_test(
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],

View File

@@ -0,0 +1,9 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package blocks
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "consensus-types/blocks")

View File

@@ -0,0 +1,476 @@
package blocks
import (
"bytes"
"iter"
"slices"
"github.com/OffchainLabs/go-bitfield"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/libp2p/go-libp2p-pubsub/partialmessages"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// CellProofBundle contains a cell, its proof, and the corresponding
// commitment/index information.
type CellProofBundle struct {
ColumnIndex uint64
Commitment []byte
Cell []byte
Proof []byte
}
type PartialDataColumnPeerState struct {
Sent *ethpb.PartialDataColumnPartsMetadata
Recvd *ethpb.PartialDataColumnPartsMetadata
}
// PartialDataColumn is a partially populated DataColumnSidecar used for
// exchanging cells with peers.
type PartialDataColumn struct {
*ethpb.DataColumnSidecar
root [fieldparams.RootLength]byte
groupID []byte
Included bitfield.Bitlist
// set to true when the node itself has Published this column. We only want
// to republish in response to an incoming RPC after we publish this column
// ourselves, as that is the point we know what cells we have or are
// missing.
Published bool
// byBlockProposer indicates this column was created by the block proposer.
// When true, the eager push will include all available cells and proofs
// rather than just the header.
byBlockProposer bool
}
// PartialDataColumnOption is a functional option for NewPartialDataColumn.
type PartialDataColumnOption func(*PartialDataColumn)
// WithByBlockProposer marks the partial data column as created by the block
// proposer, causing eager pushes to include all available cells and proofs.
func WithByBlockProposer() PartialDataColumnOption {
return func(p *PartialDataColumn) {
p.byBlockProposer = true
}
}
func NewPartialDataColumnFromVerifiedRODataColumn(c VerifiedRODataColumn) PartialDataColumn {
included := bitfield.NewBitlist(uint64(len(c.KzgCommitments)))
included = included.Not()
return PartialDataColumn{
DataColumnSidecar: c.DataColumnSidecar,
root: c.root,
Included: included,
groupID: groupIdFromRoot(c.root),
}
}
func groupIdFromRoot(root [fieldparams.RootLength]byte) []byte {
groupID := make([]byte, len(root)+1)
copy(groupID[1:], root[:])
// Version 0
groupID[0] = 0
return groupID
}
// NewPartialDataColumn creates a new Partial Data Column for the given block.
// It does not validate the inputs. The caller is responsible for validating the
// block header and KZG Commitment Inclusion proof.
func NewPartialDataColumn(
root [fieldparams.RootLength]byte,
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
columnIndex uint64,
kzgCommitments [][]byte,
kzgInclusionProof [][]byte,
opts ...PartialDataColumnOption,
) (PartialDataColumn, error) {
if signedBlockHeader == nil {
return PartialDataColumn{}, errors.New("signedBlockHeader is nil")
}
sidecar := &ethpb.DataColumnSidecar{
Index: columnIndex,
KzgCommitments: kzgCommitments,
Column: make([][]byte, len(kzgCommitments)),
KzgProofs: make([][]byte, len(kzgCommitments)),
SignedBlockHeader: signedBlockHeader,
KzgCommitmentsInclusionProof: kzgInclusionProof,
}
c := PartialDataColumn{
DataColumnSidecar: sidecar,
root: root,
groupID: groupIdFromRoot(root),
Included: bitfield.NewBitlist(uint64(len(sidecar.KzgCommitments))),
}
for _, opt := range opts {
opt(&c)
}
return c, nil
}
// GroupID returns the libp2p partial-messages group identifier.
func (p *PartialDataColumn) GroupID() []byte {
return p.groupID
}
func (p *PartialDataColumn) newPartsMetadata() *ethpb.PartialDataColumnPartsMetadata {
n := uint64(len(p.KzgCommitments))
available := slices.Clone(p.Included)
requests := bitfield.NewBitlist(n)
requests = requests.Not()
return &ethpb.PartialDataColumnPartsMetadata{
Available: available,
Requests: requests,
}
}
// NewPartsMetaWithNoAvailableAndNoRequests creates metadata for n parts where
// no parts are marked as available and no requests are set.
func NewPartsMetaWithNoAvailableAndNoRequests(n uint64) *ethpb.PartialDataColumnPartsMetadata {
return &ethpb.PartialDataColumnPartsMetadata{
Available: bitfield.NewBitlist(n),
Requests: bitfield.NewBitlist(n),
}
}
func marshalPartsMetadata(meta *ethpb.PartialDataColumnPartsMetadata) (partialmessages.PartsMetadata, error) {
b, err := meta.MarshalSSZ()
if err != nil {
return nil, err
}
return partialmessages.PartsMetadata(b), nil
}
// ClonePeerState creates a deep copy of the given PeerState. It clones the
// RecvdState and SentState fields if they are of type *PartialDataColumnPartsMetadata,
// ensuring that modifications to the returned state do not affect the original.
func ClonePeerState(peerState PartialDataColumnPeerState) PartialDataColumnPeerState {
clonePartsMetadataF := func(meta *ethpb.PartialDataColumnPartsMetadata) *ethpb.PartialDataColumnPartsMetadata {
if meta == nil {
return nil
}
return &ethpb.PartialDataColumnPartsMetadata{
Available: slices.Clone(meta.Available),
Requests: slices.Clone(meta.Requests),
}
}
var nextPeerState PartialDataColumnPeerState
nextPeerState.Sent = clonePartsMetadataF(peerState.Sent)
nextPeerState.Recvd = clonePartsMetadataF(peerState.Recvd)
return nextPeerState
}
// NKzgCommitments returns the number of commitments in the block header for this column which
// in turn will be equal to the number of cells in this column.
func (p *PartialDataColumn) NKzgCommitments() uint64 {
return p.Included.Len()
}
func (p *PartialDataColumn) cellsToSendForPeer(peerMeta *ethpb.PartialDataColumnPartsMetadata) (encodedMsg []byte, cellsSent bitfield.Bitlist, err error) {
// We have it and the peer requested it.
meetsRequests, err := peerMeta.Requests.And(p.Included)
if err != nil {
return nil, nil, errors.Wrap(err, "peer metadata bitmap length mismatch - requests")
}
// Even though the bitmaps provide the flexibility for the peer to request cells it has, we will still save bandwidth by filtering those out.
meetsNeeds, err := meetsRequests.And(peerMeta.Available.Not())
if err != nil {
return nil, nil, errors.Wrap(err, "peer metadata bitmap length mismatch - available")
}
size := meetsNeeds.Len()
// Nothing to send
if meetsNeeds.Count() == 0 {
return nil, nil, nil
}
nCells := meetsNeeds.Count()
out := ethpb.PartialDataColumnSidecar{
PartialColumn: make([][]byte, 0, nCells),
KzgProofs: make([][]byte, 0, nCells),
CellsPresentBitmap: meetsNeeds,
}
for i := range size {
if meetsNeeds.BitAt(i) {
out.PartialColumn = append(out.PartialColumn, p.Column[i])
out.KzgProofs = append(out.KzgProofs, p.KzgProofs[i])
}
}
marshalled, err := out.MarshalSSZ()
if err != nil {
return nil, nil, err
}
return marshalled, meetsNeeds, nil
}
// eagerPushBytes builds SSZ-encoded PartialDataColumnSidecar for the initial eager push.
// When byBlockProposer is true, all available cells and proofs are included.
// Otherwise, only the header is sent (no cells).
func (p *PartialDataColumn) eagerPushBytes(remote peer.ID, includeCellAndProofs bool, includeHeader bool) (encoded []byte, err error) {
log.WithFields(logrus.Fields{
"peer": remote,
"index": p.Index,
"includeHeader": includeHeader,
"includeCellAndProofs": includeCellAndProofs,
}).Debug("Eager push")
if !includeHeader && (!includeCellAndProofs || p.Included.Count() == 0) {
return nil, nil
}
outMessage := &ethpb.PartialDataColumnSidecar{}
if includeHeader {
outMessage.Header = []*ethpb.PartialDataColumnHeader{{
KzgCommitments: p.KzgCommitments,
SignedBlockHeader: p.SignedBlockHeader,
KzgCommitmentsInclusionProof: p.KzgCommitmentsInclusionProof,
}}
}
if !includeCellAndProofs {
outMessage.CellsPresentBitmap = bitfield.NewBitlist(uint64(len(p.KzgCommitments)))
encoded, err = outMessage.MarshalSSZ()
return encoded, err
}
nCells := p.Included.Count()
outMessage.CellsPresentBitmap = slices.Clone(p.Included)
outMessage.PartialColumn = make([][]byte, 0, nCells)
outMessage.KzgProofs = make([][]byte, 0, nCells)
for i := range p.Included.Len() {
if p.Included.BitAt(i) {
outMessage.PartialColumn = append(outMessage.PartialColumn, p.Column[i])
outMessage.KzgProofs = append(outMessage.KzgProofs, p.KzgProofs[i])
}
}
encoded, err = outMessage.MarshalSSZ()
return encoded, err
}
// PartsMetadata returns SSZ-encoded PartialDataColumnPartsMetadata.
func (p *PartialDataColumn) PartsMetadata() (partialmessages.PartsMetadata, error) {
meta := p.newPartsMetadata()
return marshalPartsMetadata(meta)
}
// MergeAvailableIntoPartsMetadata merges additional available cells into the base partsmetadata's available cells.
func MergeAvailableIntoPartsMetadata(base *ethpb.PartialDataColumnPartsMetadata, additionalAvailable bitfield.Bitlist) (*ethpb.PartialDataColumnPartsMetadata, error) {
if base == nil {
return nil, errors.New("base is nil")
}
if base.Requests.Len() != additionalAvailable.Len() {
return nil, errors.New("requests length mismatch")
}
merged, err := base.Available.Or(additionalAvailable)
if err != nil {
return nil, err
}
base.Available = merged
return base, nil
}
func (p *PartialDataColumn) PublishActionsFn(headerSentCache map[peer.ID]bool) partialmessages.PublishActionsFn[PartialDataColumnPeerState] {
return func(peerStates map[peer.ID]PartialDataColumnPeerState, peerRequestsPartial func(peer.ID) bool) iter.Seq2[peer.ID, partialmessages.PublishAction] {
return func(yield func(peer.ID, partialmessages.PublishAction) bool) {
for peer, peerState := range peerStates {
nextState, action, includeHeader := p.forPeer(peer, peerRequestsPartial(peer), peerState, !headerSentCache[peer])
if action.Err == nil {
v := headerSentCache[peer]
headerSentCache[peer] = headerSentCache[peer] || includeHeader
if v != headerSentCache[peer] {
log.WithFields(logrus.Fields{
"peer": peer,
"index": p.Index,
"includeHeader": includeHeader,
"headerSentCache": headerSentCache[peer],
}).Debug("Header sent cache updated")
}
// Only update state if there was no error.
peerStates[peer] = nextState
}
if !yield(peer, action) {
return
}
}
}
}
}
// forPeer returns the next peer state and the publish action for this peer
func (p *PartialDataColumn) forPeer(remote peer.ID, requestedMessage bool, peerState PartialDataColumnPeerState, includeHeader bool) (PartialDataColumnPeerState, partialmessages.PublishAction,
bool) {
peerState = ClonePeerState(peerState)
// Eager push - we don't know what the peer has and message has been requested.
// Set RecvdState so subsequent calls skip the eager push path.
if requestedMessage && peerState.Recvd == nil {
encoded, err := p.eagerPushBytes(remote, p.byBlockProposer, includeHeader)
if err != nil {
return peerState, partialmessages.PublishAction{Err: err}, false
}
myPartsMeta := p.newPartsMetadata()
if p.byBlockProposer {
log.WithFields(logrus.Fields{
"peer": remote,
"column": p.Index,
"cellCount": p.Included.Count(),
}).Debug("Eager pushing cells to peer (block proposer)")
peerState.Recvd = &ethpb.PartialDataColumnPartsMetadata{
Available: slices.Clone(p.Included),
Requests: bitfield.NewBitlist(p.NKzgCommitments()),
}
} else {
peerState.Recvd = NewPartsMetaWithNoAvailableAndNoRequests(p.NKzgCommitments())
}
// either ways, we're sending our parts metadata so update the sent state i.e. the peer's view of what we have.
peerState.Sent = myPartsMeta
encodedMeta, err := marshalPartsMetadata(myPartsMeta)
if err != nil {
return peerState, partialmessages.PublishAction{Err: err}, false
}
return peerState, partialmessages.PublishAction{
EncodedPartialMessage: encoded,
EncodedPartsMetadata: encodedMeta,
}, includeHeader
}
var cellsSent bitfield.Bitlist
sentMeta := peerState.Sent
recvdMeta := peerState.Recvd
var encodedMsg []byte
// Normal - message requested and we have RecvdState.
if requestedMessage && recvdMeta != nil {
var err error
encodedMsg, cellsSent, err = p.cellsToSendForPeer(recvdMeta)
if err != nil {
return peerState, partialmessages.PublishAction{Err: err}, false
}
if cellsSent != nil && cellsSent.Count() != 0 {
newRecvd, err := MergeAvailableIntoPartsMetadata(recvdMeta, cellsSent)
if err != nil {
return peerState, partialmessages.PublishAction{Err: err}, false
}
peerState.Recvd = newRecvd
}
}
// Check if we need to send partsMetadata.
var partsMetadataToSend partialmessages.PartsMetadata
myPartsMeta := p.newPartsMetadata()
var shouldSendPartsMetadata bool
if sentMeta != nil {
if !bytes.Equal(sentMeta.Requests, myPartsMeta.Requests) {
shouldSendPartsMetadata = true
} else {
contains, err := sentMeta.Available.Contains(myPartsMeta.Available)
if err != nil {
return peerState, partialmessages.PublishAction{Err: err}, false
}
shouldSendPartsMetadata = !contains
}
}
if sentMeta == nil || shouldSendPartsMetadata {
var err error
partsMetadataToSend, err = marshalPartsMetadata(myPartsMeta)
if err != nil {
return peerState, partialmessages.PublishAction{Err: err}, false
}
if sentMeta == nil {
peerState.Sent = myPartsMeta
} else {
sentMeta, err = MergeAvailableIntoPartsMetadata(sentMeta, myPartsMeta.Available)
if err != nil {
return peerState, partialmessages.PublishAction{Err: err}, false
}
sentMeta.Requests = myPartsMeta.Requests
peerState.Sent = sentMeta
}
}
return peerState, partialmessages.PublishAction{
EncodedPartialMessage: encodedMsg,
EncodedPartsMetadata: partsMetadataToSend,
}, false
}
// CellsToVerifyFromPartialMessage returns cells from the partial message that need to be verified.
func (p *PartialDataColumn) CellsToVerifyFromPartialMessage(message *ethpb.PartialDataColumnSidecar) ([]uint64, []CellProofBundle, error) {
included := message.CellsPresentBitmap
if included.Len() == 0 {
return nil, nil, nil
}
// Some basic sanity checks
includedCells := included.Count()
if uint64(len(message.KzgProofs)) != includedCells {
return nil, nil, errors.New("invalid message. Missing KZG proofs")
}
if uint64(len(message.PartialColumn)) != includedCells {
return nil, nil, errors.New("invalid message. Missing cells")
}
ourIncludedList := p.Included
if included.Len() != ourIncludedList.Len() {
return nil, nil, errors.New("invalid message: wrong bitmap length")
}
cellIndices := make([]uint64, 0, includedCells)
cellsToVerify := make([]CellProofBundle, 0, includedCells)
// Filter out cells we already have.
// j tracks position in the compact PartialColumn/KzgProofs arrays.
var j int
for i := range included.Len() {
if !included.BitAt(i) {
continue
}
if j >= len(message.PartialColumn) {
break
}
if !ourIncludedList.BitAt(i) {
cellIndices = append(cellIndices, i)
cellsToVerify = append(cellsToVerify, CellProofBundle{
ColumnIndex: p.Index,
Cell: message.PartialColumn[j],
Proof: message.KzgProofs[j],
// Use the commitment from our datacolumn, indexed by i since we
// have all commitments.
Commitment: p.KzgCommitments[i],
})
}
j++
}
return cellIndices, cellsToVerify, nil
}
// ExtendFromVerifiedCell extends this partial column with one verified cell.
func (p *PartialDataColumn) ExtendFromVerifiedCell(cellIndex uint64, cell, proof []byte) bool {
if p.Included.BitAt(cellIndex) {
// We already have this cell
return false
}
p.Included.SetBitAt(cellIndex, true)
p.Column[cellIndex] = cell
p.KzgProofs[cellIndex] = proof
return true
}
// IsComplete returns true if all cells are now present in this column.
func (p *PartialDataColumn) IsComplete() bool {
return uint64(len(p.KzgCommitments)) == p.Included.Count()
}

View File

@@ -0,0 +1,63 @@
package blocks
import (
"testing"
"github.com/OffchainLabs/go-bitfield"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
// TestCellsToVerifyFromPartialMessage_DoesNotMutateInput verifies that
// CellsToVerifyFromPartialMessage does not modify the caller's message slices.
// The current implementation re-slices message.PartialColumn and
// message.KzgProofs in place, which silently truncates the input.
func TestCellsToVerifyFromPartialMessage_DoesNotMutateInput(t *testing.T) {
nCommitments := uint64(3)
col, err := NewPartialDataColumn(
[fieldparams.RootLength]byte{},
&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ParentRoot: make([]byte, fieldparams.RootLength),
StateRoot: make([]byte, fieldparams.RootLength),
BodyRoot: make([]byte, fieldparams.RootLength),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
0,
make([][]byte, nCommitments), // 3 commitments
nil,
)
require.NoError(t, err)
// The partial column has cell 0 already; cells 1 and 2 are missing.
col.ExtendFromVerifiedCell(0, []byte{0x01}, []byte{0x02})
// Build a message that offers cells 1 and 2.
bitmap := bitfield.NewBitlist(nCommitments)
bitmap.SetBitAt(1, true)
bitmap.SetBitAt(2, true)
msg := &ethpb.PartialDataColumnSidecar{
CellsPresentBitmap: bitmap,
PartialColumn: [][]byte{{0x10}, {0x20}},
KzgProofs: [][]byte{{0xA0}, {0xB0}},
}
origColumnLen := len(msg.PartialColumn)
origProofsLen := len(msg.KzgProofs)
_, _, err = col.CellsToVerifyFromPartialMessage(msg)
require.NoError(t, err)
// Assert that the message's slices were NOT modified.
if len(msg.PartialColumn) != origColumnLen {
t.Errorf("CellsToVerifyFromPartialMessage mutated message.PartialColumn: len went from %d to %d",
origColumnLen, len(msg.PartialColumn))
}
if len(msg.KzgProofs) != origProofsLen {
t.Errorf("CellsToVerifyFromPartialMessage mutated message.KzgProofs: len went from %d to %d",
origProofsLen, len(msg.KzgProofs))
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,8 @@
package blocks
import (
"iter"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
@@ -85,3 +87,20 @@ type VerifiedRODataColumn struct {
func NewVerifiedRODataColumn(roDataColumn RODataColumn) VerifiedRODataColumn {
return VerifiedRODataColumn{RODataColumn: roDataColumn}
}
func RODataColumnsToCellProofBundles(sidecars []RODataColumn) iter.Seq[CellProofBundle] {
return func(yield func(CellProofBundle) bool) {
for _, sidecar := range sidecars {
for i := range sidecar.Column {
if !yield(CellProofBundle{
ColumnIndex: sidecar.Index,
Commitment: sidecar.KzgCommitments[i],
Cell: sidecar.Column[i],
Proof: sidecar.KzgProofs[i],
}) {
return
}
}
}
}
}

196
deps.bzl
View File

@@ -76,12 +76,6 @@ def prysm_deps():
sum = "h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=",
version = "v1.2.1-0.20190218064605-e24eb225f156",
)
go_repository(
name = "com_github_andreasbriese_bbloom",
importpath = "github.com/AndreasBriese/bbloom",
sum = "h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=",
version = "v0.0.0-20190825152654-46b345b51c96",
)
go_repository(
name = "com_github_andybalholm_brotli",
importpath = "github.com/andybalholm/brotli",
@@ -376,12 +370,6 @@ def prysm_deps():
sum = "h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=",
version = "v1.1.1",
)
go_repository(
name = "com_github_cespare_xxhash",
importpath = "github.com/cespare/xxhash",
sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=",
version = "v1.1.0",
)
go_repository(
name = "com_github_cespare_xxhash_v2",
importpath = "github.com/cespare/xxhash/v2",
@@ -406,12 +394,6 @@ def prysm_deps():
sum = "h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=",
version = "v1.0.0",
)
go_repository(
name = "com_github_cilium_ebpf",
importpath = "github.com/cilium/ebpf",
sum = "h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=",
version = "v0.9.1",
)
go_repository(
name = "com_github_clbanning_x2j",
importpath = "github.com/clbanning/x2j",
@@ -644,18 +626,6 @@ def prysm_deps():
sum = "h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=",
version = "v1.8.2",
)
go_repository(
name = "com_github_dgraph_io_badger",
importpath = "github.com/dgraph-io/badger",
sum = "h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8=",
version = "v1.6.2",
)
go_repository(
name = "com_github_dgraph_io_ristretto",
importpath = "github.com/dgraph-io/ristretto",
sum = "h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po=",
version = "v0.0.2",
)
go_repository(
name = "com_github_dgraph_io_ristretto_v2",
importpath = "github.com/dgraph-io/ristretto/v2",
@@ -1634,20 +1604,8 @@ def prysm_deps():
go_repository(
name = "com_github_ipfs_go_datastore",
importpath = "github.com/ipfs/go-datastore",
sum = "h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk=",
version = "v0.6.0",
)
go_repository(
name = "com_github_ipfs_go_ds_badger",
importpath = "github.com/ipfs/go-ds-badger",
sum = "h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro=",
version = "v0.3.0",
)
go_repository(
name = "com_github_ipfs_go_ds_leveldb",
importpath = "github.com/ipfs/go-ds-leveldb",
sum = "h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo=",
version = "v0.5.0",
sum = "h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U=",
version = "v0.8.2",
)
go_repository(
name = "com_github_ipfs_go_log_v2",
@@ -1674,12 +1632,6 @@ def prysm_deps():
sum = "h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=",
version = "v0.1.0",
)
go_repository(
name = "com_github_jbenet_goprocess",
importpath = "github.com/jbenet/goprocess",
sum = "h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=",
version = "v0.1.4",
)
go_repository(
name = "com_github_jcmturner_gofork",
importpath = "github.com/jcmturner/gofork",
@@ -1852,8 +1804,8 @@ def prysm_deps():
go_repository(
name = "com_github_klauspost_cpuid_v2",
importpath = "github.com/klauspost/cpuid/v2",
sum = "h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=",
version = "v2.2.9",
sum = "h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=",
version = "v2.2.10",
)
go_repository(
name = "com_github_klauspost_reedsolomon",
@@ -1876,8 +1828,8 @@ def prysm_deps():
go_repository(
name = "com_github_koron_go_ssdp",
importpath = "github.com/koron/go-ssdp",
sum = "h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk=",
version = "v0.0.5",
sum = "h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=",
version = "v0.0.6",
)
go_repository(
name = "com_github_kr_fs",
@@ -1958,8 +1910,8 @@ def prysm_deps():
],
build_file_proto_mode = "disable_global",
importpath = "github.com/libp2p/go-libp2p",
sum = "h1:1Ur6rPCf3GR+g8jkrnaQaM0ha2IGespsnNlCqJLLALE=",
version = "v0.39.1",
sum = "h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs=",
version = "v0.44.0",
)
go_repository(
name = "com_github_libp2p_go_libp2p_asn_util",
@@ -1970,15 +1922,15 @@ def prysm_deps():
go_repository(
name = "com_github_libp2p_go_libp2p_mplex",
importpath = "github.com/libp2p/go-libp2p-mplex",
sum = "h1:R58pDRAmuBXkYugbSSXR9wrTX3+1pFM1xP2bLuodIq8=",
version = "v0.9.0",
sum = "h1:0vwpLXRSfkTzshEjETIEgJaVxXvg+orbxYoIb3Ty5qM=",
version = "v0.11.0",
)
go_repository(
name = "com_github_libp2p_go_libp2p_pubsub",
build_file_proto_mode = "disable_global",
importpath = "github.com/libp2p/go-libp2p-pubsub",
sum = "h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8=",
version = "v0.14.2",
sum = "h1:Fy44dIDY1aBEJVIvXdT5A1VUU0tKEo95sSicWFlZIfU=",
version = "v0.15.1-0.20260304214010-30edef80aae7",
)
go_repository(
name = "com_github_libp2p_go_libp2p_testing",
@@ -2022,6 +1974,12 @@ def prysm_deps():
sum = "h1:nrLh89LN/LEiqcFiqdKDRHjGstN300C1269K/EX0CPU=",
version = "v4.0.2",
)
go_repository(
name = "com_github_libp2p_go_yamux_v5",
importpath = "github.com/libp2p/go-yamux/v5",
sum = "h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=",
version = "v5.0.1",
)
go_repository(
name = "com_github_libp2p_zeroconf_v2",
importpath = "github.com/libp2p/zeroconf/v2",
@@ -2082,6 +2040,12 @@ def prysm_deps():
sum = "h1:3l11YT8tm9MnwGFQ4kETwkzpAwY2Jt9lCrumCUW4+z4=",
version = "v0.7.0",
)
go_repository(
name = "com_github_marcopolo_simnet",
importpath = "github.com/marcopolo/simnet",
sum = "h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg=",
version = "v0.0.1",
)
go_repository(
name = "com_github_marten_seemann_tcp",
importpath = "github.com/marten-seemann/tcp",
@@ -2139,8 +2103,8 @@ def prysm_deps():
go_repository(
name = "com_github_miekg_dns",
importpath = "github.com/miekg/dns",
sum = "h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=",
version = "v1.1.63",
sum = "h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=",
version = "v1.1.66",
)
go_repository(
name = "com_github_mikioh_tcp",
@@ -2277,8 +2241,8 @@ def prysm_deps():
go_repository(
name = "com_github_multiformats_go_multiaddr",
importpath = "github.com/multiformats/go-multiaddr",
sum = "h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU=",
version = "v0.14.0",
sum = "h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=",
version = "v0.16.0",
)
go_repository(
name = "com_github_multiformats_go_multiaddr_dns",
@@ -2304,8 +2268,8 @@ def prysm_deps():
"gazelle:exclude gen.go",
],
importpath = "github.com/multiformats/go-multicodec",
sum = "h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=",
version = "v0.9.0",
sum = "h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=",
version = "v0.9.1",
)
go_repository(
name = "com_github_multiformats_go_multihash",
@@ -2316,8 +2280,8 @@ def prysm_deps():
go_repository(
name = "com_github_multiformats_go_multistream",
importpath = "github.com/multiformats/go-multistream",
sum = "h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA=",
version = "v0.6.0",
sum = "h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=",
version = "v0.6.1",
)
go_repository(
name = "com_github_multiformats_go_varint",
@@ -2400,8 +2364,8 @@ def prysm_deps():
go_repository(
name = "com_github_nxadm_tail",
importpath = "github.com/nxadm/tail",
sum = "h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=",
version = "v1.4.11",
sum = "h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=",
version = "v1.4.4",
)
go_repository(
name = "com_github_offchainlabs_hashtree",
@@ -2613,8 +2577,8 @@ def prysm_deps():
go_repository(
name = "com_github_pion_dtls_v3",
importpath = "github.com/pion/dtls/v3",
sum = "h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=",
version = "v3.0.4",
sum = "h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=",
version = "v3.0.6",
)
go_repository(
name = "com_github_pion_ice_v2",
@@ -2625,14 +2589,14 @@ def prysm_deps():
go_repository(
name = "com_github_pion_ice_v4",
importpath = "github.com/pion/ice/v4",
sum = "h1:jmM9HwI9lfetQV/39uD0nY4y++XZNPhvzIPCb8EwxUM=",
version = "v4.0.6",
sum = "h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=",
version = "v4.0.10",
)
go_repository(
name = "com_github_pion_interceptor",
importpath = "github.com/pion/interceptor",
sum = "h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=",
version = "v0.1.37",
sum = "h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=",
version = "v0.1.40",
)
go_repository(
name = "com_github_pion_logging",
@@ -2667,26 +2631,26 @@ def prysm_deps():
go_repository(
name = "com_github_pion_rtp",
importpath = "github.com/pion/rtp",
sum = "h1:17xjnY5WO5hgO6SD3/NTIUPvSFw/PbLsIJyz1r1yNIk=",
version = "v1.8.11",
sum = "h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=",
version = "v1.8.19",
)
go_repository(
name = "com_github_pion_sctp",
importpath = "github.com/pion/sctp",
sum = "h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA=",
version = "v1.8.35",
sum = "h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=",
version = "v1.8.39",
)
go_repository(
name = "com_github_pion_sdp_v3",
importpath = "github.com/pion/sdp/v3",
sum = "h1:6MChLE/1xYB+CjumMw+gZ9ufp2DPApuVSnDT8t5MIgA=",
version = "v3.0.10",
sum = "h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=",
version = "v3.0.13",
)
go_repository(
name = "com_github_pion_srtp_v3",
importpath = "github.com/pion/srtp/v3",
sum = "h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=",
version = "v3.0.4",
sum = "h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=",
version = "v3.0.6",
)
go_repository(
name = "com_github_pion_stun",
@@ -2727,14 +2691,14 @@ def prysm_deps():
go_repository(
name = "com_github_pion_turn_v4",
importpath = "github.com/pion/turn/v4",
sum = "h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=",
version = "v4.0.0",
sum = "h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=",
version = "v4.0.2",
)
go_repository(
name = "com_github_pion_webrtc_v4",
importpath = "github.com/pion/webrtc/v4",
sum = "h1:T1ZmnT9qxIJIt4d8XoiMOBrTClGHDDXNg9e/fh018Qc=",
version = "v4.0.8",
sum = "h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=",
version = "v4.1.2",
)
go_repository(
name = "com_github_pkg_diff",
@@ -2793,26 +2757,26 @@ def prysm_deps():
go_repository(
name = "com_github_prometheus_client_golang",
importpath = "github.com/prometheus/client_golang",
sum = "h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=",
version = "v1.20.5",
sum = "h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=",
version = "v1.22.0",
)
go_repository(
name = "com_github_prometheus_client_model",
importpath = "github.com/prometheus/client_model",
sum = "h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=",
version = "v0.6.1",
sum = "h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=",
version = "v0.6.2",
)
go_repository(
name = "com_github_prometheus_common",
importpath = "github.com/prometheus/common",
sum = "h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=",
version = "v0.62.0",
sum = "h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=",
version = "v0.64.0",
)
go_repository(
name = "com_github_prometheus_procfs",
importpath = "github.com/prometheus/procfs",
sum = "h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=",
version = "v0.15.1",
sum = "h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=",
version = "v0.16.1",
)
go_repository(
name = "com_github_prometheus_prom2json",
@@ -2884,14 +2848,14 @@ def prysm_deps():
"gazelle:exclude tools.go",
],
importpath = "github.com/quic-go/quic-go",
sum = "h1:x09Agz4ATTMEP3qb5P0MRxNZfd6O9wAyK3qwwqQZVQc=",
version = "v0.49.1-0.20250925085836-275c172fec2b",
sum = "h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk=",
version = "v0.55.0",
)
go_repository(
name = "com_github_quic_go_webtransport_go",
importpath = "github.com/quic-go/webtransport-go",
sum = "h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg=",
version = "v0.8.1-0.20241018022711-4ac2c9250e66",
sum = "h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=",
version = "v0.9.0",
)
go_repository(
name = "com_github_r3labs_sse_v2",
@@ -3377,8 +3341,8 @@ def prysm_deps():
go_repository(
name = "com_github_urfave_cli",
importpath = "github.com/urfave/cli",
sum = "h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=",
version = "v1.22.2",
sum = "h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=",
version = "v1.22.1",
)
go_repository(
name = "com_github_urfave_cli_v2",
@@ -4298,8 +4262,8 @@ def prysm_deps():
go_repository(
name = "com_lukechampine_blake3",
importpath = "lukechampine.com/blake3",
sum = "h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE=",
version = "v1.3.0",
sum = "h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=",
version = "v1.4.1",
)
go_repository(
name = "com_shuralyov_dmitri_app_changes",
@@ -4723,8 +4687,8 @@ def prysm_deps():
go_repository(
name = "org_golang_google_protobuf",
importpath = "google.golang.org/protobuf",
sum = "h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=",
version = "v1.36.5",
sum = "h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=",
version = "v1.36.6",
)
go_repository(
name = "org_golang_x_build",
@@ -4741,8 +4705,8 @@ def prysm_deps():
go_repository(
name = "org_golang_x_exp",
importpath = "golang.org/x/exp",
sum = "h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI=",
version = "v0.0.0-20250506013437-ce4c2cf36ca6",
sum = "h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=",
version = "v0.0.0-20250606033433-dcc06ee1d476",
)
go_repository(
name = "org_golang_x_exp_typeparams",
@@ -4783,8 +4747,8 @@ def prysm_deps():
go_repository(
name = "org_golang_x_oauth2",
importpath = "golang.org/x/oauth2",
sum = "h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=",
version = "v0.25.0",
sum = "h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=",
version = "v0.30.0",
)
go_repository(
name = "org_golang_x_perf",
@@ -4825,8 +4789,8 @@ def prysm_deps():
go_repository(
name = "org_golang_x_time",
importpath = "golang.org/x/time",
sum = "h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=",
version = "v0.9.0",
sum = "h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=",
version = "v0.12.0",
)
go_repository(
name = "org_golang_x_tools",
@@ -4866,14 +4830,14 @@ def prysm_deps():
go_repository(
name = "org_uber_go_dig",
importpath = "go.uber.org/dig",
sum = "h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw=",
version = "v1.18.0",
sum = "h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=",
version = "v1.19.0",
)
go_repository(
name = "org_uber_go_fx",
importpath = "go.uber.org/fx",
sum = "h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg=",
version = "v1.23.0",
sum = "h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=",
version = "v1.24.0",
)
go_repository(
name = "org_uber_go_goleak",

80
go.mod
View File

@@ -41,23 +41,24 @@ require (
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213
github.com/kisielk/errcheck v1.8.0
github.com/kr/pretty v0.3.1
github.com/libp2p/go-libp2p v0.39.1
github.com/libp2p/go-libp2p-mplex v0.9.0
github.com/libp2p/go-libp2p-pubsub v0.14.2
github.com/libp2p/go-libp2p v0.44.0
github.com/libp2p/go-libp2p-mplex v0.11.0
github.com/libp2p/go-libp2p-pubsub v0.15.1-0.20260304214010-30edef80aae7
github.com/libp2p/go-mplex v0.7.0
github.com/logrusorgru/aurora v2.0.3+incompatible
github.com/manifoldco/promptui v0.7.0
github.com/marcopolo/simnet v0.0.1
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b
github.com/minio/highwayhash v1.0.2
github.com/minio/sha256-simd v1.0.1
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
github.com/multiformats/go-multiaddr v0.14.0
github.com/multiformats/go-multiaddr v0.16.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/paulbellamy/ratecounter v0.2.0
github.com/pborman/uuid v1.2.1
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/client_model v0.6.1
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_model v0.6.2
github.com/prometheus/prom2json v1.3.0
github.com/prysmaticlabs/fastssz v0.0.0-20251103153600-259302269bfc
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c
@@ -88,12 +89,12 @@ require (
go.uber.org/automaxprocs v1.5.2
go.uber.org/mock v0.5.2
golang.org/x/crypto v0.44.0
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476
golang.org/x/sync v0.18.0
golang.org/x/tools v0.39.0
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
google.golang.org/grpc v1.71.0
google.golang.org/protobuf v1.36.5
google.golang.org/protobuf v1.36.6
gopkg.in/d4l3k/messagediff.v1 v1.2.1
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gopkg.in/yaml.v2 v2.4.0
@@ -122,8 +123,6 @@ require (
github.com/cockroachdb/pebble v1.1.5 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect
@@ -134,9 +133,7 @@ require (
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect
github.com/elastic/gosigar v0.14.3 // indirect
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect
github.com/ethereum/go-verkle v0.2.2 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@@ -148,10 +145,7 @@ require (
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.12.1 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
@@ -169,25 +163,24 @@ require (
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
github.com/koron/go-ssdp v0.0.5 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/koron/go-ssdp v0.0.6 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/leodido/go-urn v1.2.3 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-nat v0.2.0 // indirect
github.com/libp2p/go-netroute v0.4.0 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.2 // indirect
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/lunixbochs/vtclean v1.0.0 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.63 // indirect
github.com/miekg/dns v1.1.66 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
@@ -201,46 +194,41 @@ require (
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.0 // indirect
github.com/multiformats/go-multicodec v0.9.1 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.6.0 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/onsi/gomega v1.36.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pion/datachannel v1.5.10 // indirect
github.com/pion/dtls/v2 v2.2.12 // indirect
github.com/pion/dtls/v3 v3.0.4 // indirect
github.com/pion/ice/v2 v2.3.37 // indirect
github.com/pion/ice/v4 v4.0.6 // indirect
github.com/pion/interceptor v0.1.37 // indirect
github.com/pion/dtls/v3 v3.0.6 // indirect
github.com/pion/ice/v4 v4.0.10 // indirect
github.com/pion/interceptor v0.1.40 // indirect
github.com/pion/logging v0.2.3 // indirect
github.com/pion/mdns v0.0.12 // indirect
github.com/pion/mdns/v2 v2.0.7 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtcp v1.2.15 // indirect
github.com/pion/rtp v1.8.11 // indirect
github.com/pion/sctp v1.8.35 // indirect
github.com/pion/sdp/v3 v3.0.10 // indirect
github.com/pion/srtp/v3 v3.0.4 // indirect
github.com/pion/rtp v1.8.19 // indirect
github.com/pion/sctp v1.8.39 // indirect
github.com/pion/sdp/v3 v3.0.13 // indirect
github.com/pion/srtp/v3 v3.0.6 // indirect
github.com/pion/stun v0.6.1 // indirect
github.com/pion/stun/v2 v2.0.0 // indirect
github.com/pion/stun/v3 v3.0.0 // indirect
github.com/pion/transport/v2 v2.2.10 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pion/turn/v2 v2.1.6 // indirect
github.com/pion/turn/v4 v4.0.0 // indirect
github.com/pion/webrtc/v4 v4.0.8 // indirect
github.com/pion/turn/v4 v4.0.2 // indirect
github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.49.1-0.20250925085836-275c172fec2b // indirect
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/quic-go/quic-go v0.55.0 // indirect
github.com/quic-go/webtransport-go v0.9.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
@@ -258,22 +246,22 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/dig v1.18.0 // indirect
go.uber.org/fx v1.23.0 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools/go/expect v0.1.1-deprecated // indirect
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
lukechampine.com/blake3 v1.3.0 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect

181
go.sum
View File

@@ -91,7 +91,6 @@ github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZw
github.com/bazelbuild/rules_go v0.23.2 h1:Wxu7JjqnF78cKZbsBsARLSXx/jlGaSLCnUV3mTlyHvM=
github.com/bazelbuild/rules_go v0.23.2/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -128,7 +127,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
@@ -152,20 +150,13 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0=
github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c=
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
@@ -203,9 +194,6 @@ github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUn
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo=
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo=
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
@@ -220,9 +208,6 @@ github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A=
github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
@@ -317,21 +302,14 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o=
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
@@ -399,8 +377,6 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -421,7 +397,6 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
@@ -539,7 +514,6 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 h1:qGQQKEcAR99REcMpsXCp3lJ03zYT1PkRd3kQGPn9GVg=
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/errcheck v1.8.0 h1:ZX/URYa7ilESY19ik/vBmCn6zdGQLxACwjAcWbHlYlg=
github.com/kisielk/errcheck v1.8.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ=
@@ -549,14 +523,14 @@ github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/reedsolomon v1.9.3/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk=
github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w=
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -582,28 +556,26 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
github.com/libp2p/go-libp2p v0.39.1 h1:1Ur6rPCf3GR+g8jkrnaQaM0ha2IGespsnNlCqJLLALE=
github.com/libp2p/go-libp2p v0.39.1/go.mod h1:3zicI8Lp7Isun+Afo/JOACUbbJqqR2owK6RQWFsVAbI=
github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs=
github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-mplex v0.9.0 h1:R58pDRAmuBXkYugbSSXR9wrTX3+1pFM1xP2bLuodIq8=
github.com/libp2p/go-libp2p-mplex v0.9.0/go.mod h1:ro1i4kuwiFT+uMPbIDIFkcLs1KRbNp0QwnUXM+P64Og=
github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8=
github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44=
github.com/libp2p/go-libp2p-mplex v0.11.0 h1:0vwpLXRSfkTzshEjETIEgJaVxXvg+orbxYoIb3Ty5qM=
github.com/libp2p/go-libp2p-mplex v0.11.0/go.mod h1:QrsdNY3lzjpdo9V1goJfPb0O65Nms0sUR8CDAO18f6k=
github.com/libp2p/go-libp2p-pubsub v0.15.1-0.20260304214010-30edef80aae7 h1:Fy44dIDY1aBEJVIvXdT5A1VUU0tKEo95sSicWFlZIfU=
github.com/libp2p/go-libp2p-pubsub v0.15.1-0.20260304214010-30edef80aae7/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4=
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
github.com/libp2p/go-mplex v0.7.0 h1:BDhFZdlk5tbr0oyFq/xv/NPGfjbnrsDam1EvutpBDbY=
github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
github.com/libp2p/go-netroute v0.4.0 h1:sZZx9hyANYUx9PZyqcgE/E1GUG3iEtTZHUEvdtXT7/Q=
github.com/libp2p/go-netroute v0.4.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v4 v4.0.2 h1:nrLh89LN/LEiqcFiqdKDRHjGstN300C1269K/EX0CPU=
github.com/libp2p/go-yamux/v4 v4.0.2/go.mod h1:C808cCRgOs1iBwY4S71T5oxgMxgLmqUw56qh4AeBW2o=
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8=
@@ -620,6 +592,8 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/manifoldco/promptui v0.7.0 h1:3l11YT8tm9MnwGFQ4kETwkzpAwY2Jt9lCrumCUW4+z4=
github.com/manifoldco/promptui v0.7.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ=
github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg=
github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
@@ -652,8 +626,8 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1f
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
@@ -700,21 +674,21 @@ github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYg
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU=
github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4=
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA=
github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg=
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
@@ -730,9 +704,8 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
@@ -745,8 +718,6 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
@@ -756,9 +727,6 @@ github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlR
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc=
github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
@@ -800,33 +768,29 @@ github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oL
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
github.com/pion/ice/v2 v2.3.37 h1:ObIdaNDu1rCo7hObhs34YSBcO7fjslJMZV0ux+uZWh0=
github.com/pion/ice/v2 v2.3.37/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ=
github.com/pion/ice/v4 v4.0.6 h1:jmM9HwI9lfetQV/39uD0nY4y++XZNPhvzIPCb8EwxUM=
github.com/pion/ice/v4 v4.0.6/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8=
github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk=
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
github.com/pion/rtp v1.8.11 h1:17xjnY5WO5hgO6SD3/NTIUPvSFw/PbLsIJyz1r1yNIk=
github.com/pion/rtp v1.8.11/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA=
github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg=
github.com/pion/sdp/v3 v3.0.10 h1:6MChLE/1xYB+CjumMw+gZ9ufp2DPApuVSnDT8t5MIgA=
github.com/pion/sdp/v3 v3.0.10/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0=
@@ -840,13 +804,10 @@ github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs
github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0=
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc=
github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
github.com/pion/webrtc/v4 v4.0.8 h1:T1ZmnT9qxIJIt4d8XoiMOBrTClGHDDXNg9e/fh018Qc=
github.com/pion/webrtc/v4 v4.0.8/go.mod h1:HHBeUVBAC+j4ZFnYhovEFStF02Arb1EyD4G7e7HBTJw=
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -869,16 +830,16 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -886,8 +847,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -896,8 +857,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y=
github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM=
github.com/prysmaticlabs/fastssz v0.0.0-20251103153600-259302269bfc h1:ASmh3y4ALne2OoabF5pPL8OcIpBko8gFMg5018MxkBI=
@@ -911,14 +872,12 @@ github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294 h
github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294/go.mod h1:ZVEbRdnMkGhp/pu35zq4SXxtvUwWK0J1MATtekZpH2Y=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.49.1-0.20250925085836-275c172fec2b h1:x09Agz4ATTMEP3qb5P0MRxNZfd6O9wAyK3qwwqQZVQc=
github.com/quic-go/quic-go v0.49.1-0.20250925085836-275c172fec2b/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s=
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg=
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw=
github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk=
github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U=
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/r3labs/sse/v2 v2.10.0 h1:hFEkLLFY4LDifoHdiCN/LlGBAdVJYsANaLqNYa1l/v0=
github.com/r3labs/sse/v2 v2.10.0/go.mod h1:Igau6Whc+F17QUgML1fYe1VPZzTV6EMCnYktEmkNJ7I=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -971,7 +930,6 @@ github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5k
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -1037,7 +995,6 @@ github.com/trailofbits/go-mutexasserts v0.0.0-20250212181730-4c2b8e9e784b h1:EBo
github.com/trailofbits/go-mutexasserts v0.0.0-20250212181730-4c2b8e9e784b/go.mod h1:4R6Qam+w871wOlyRq59zRLjhb5x9/De/wgPeaCTaCwI=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g=
github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=
@@ -1118,10 +1075,10 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw=
go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg=
go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
@@ -1177,8 +1134,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI=
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
@@ -1279,8 +1236,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20170517211232-f52d1811a629/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1298,7 +1255,6 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1335,7 +1291,6 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1382,7 +1337,6 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
@@ -1422,13 +1376,12 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -1613,8 +1566,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610=
gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y=
@@ -1680,8 +1633,8 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE=
lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@@ -0,0 +1,24 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"adapter.go",
"log.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/internal/logrusadapter",
visibility = ["//:__subpackages__"],
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = [
"adapter_test.go",
"adapter_withattrs_test.go",
],
deps = [
":go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,84 @@
package logrusadapter
import (
"context"
"log/slog"
"github.com/sirupsen/logrus"
)
// Handler wraps a logrus.Logger to satisfy slog.Handler.
type Handler struct {
Logger *logrus.Logger
entry *logrus.Entry // carries accumulated fields from WithAttrs, nil if none
}
// Enabled implements slog.Handler.
func (h Handler) Enabled(_ context.Context, level slog.Level) bool {
switch level {
case slog.LevelDebug:
return h.Logger.Level >= logrus.DebugLevel
case slog.LevelInfo:
return h.Logger.Level >= logrus.InfoLevel
case slog.LevelWarn:
return h.Logger.Level >= logrus.WarnLevel
case slog.LevelError:
return h.Logger.Level >= logrus.ErrorLevel
default:
return true
}
}
// logEntry returns the base entry for logging, incorporating any fields
// accumulated via WithAttrs.
func (h Handler) logEntry() *logrus.Entry {
if h.entry != nil {
return h.entry
}
return logrus.NewEntry(h.Logger)
}
// Handle converts slog.Record into a logrus.Entry.
func (h Handler) Handle(_ context.Context, r slog.Record) error {
entry := h.logEntry().WithTime(r.Time)
r.Attrs(func(a slog.Attr) bool {
if a.Value.Kind() == slog.KindLogValuer {
entry = entry.WithField(a.Key, a.Value.LogValuer().LogValue().Any())
} else {
entry = entry.WithField(a.Key, a.Value.Any())
}
return true
})
switch r.Level {
case slog.LevelDebug:
entry.Debug(r.Message)
case slog.LevelInfo:
entry.Info(r.Message)
case slog.LevelWarn:
entry.Warn(r.Message)
case slog.LevelError:
entry.Error(r.Message)
default:
entry.Print(r.Message)
}
return nil
}
// WithAttrs implements slog.Handler.
func (h Handler) WithAttrs(attrs []slog.Attr) slog.Handler {
return Handler{Logger: h.Logger, entry: h.logEntry().WithFields(toFields(attrs))}
}
// WithGroup implements slog.Handler (no-op for simplicity).
func (h Handler) WithGroup(_ string) slog.Handler { return h }
func toFields(attrs []slog.Attr) logrus.Fields {
fields := logrus.Fields{}
for _, a := range attrs {
fields[a.Key] = a.Value.Any()
}
return fields
}

View File

@@ -0,0 +1,170 @@
package logrusadapter_test
import (
"bytes"
"context"
"log/slog"
"strings"
"testing"
"github.com/OffchainLabs/prysm/v7/internal/logrusadapter"
"github.com/sirupsen/logrus"
)
func TestLogrusAdapter(t *testing.T) {
var outBuf bytes.Buffer
l := logrus.Logger{
Out: &outBuf,
Formatter: &logrus.TextFormatter{},
Level: logrus.DebugLevel,
}
slogger := slog.New(logrusadapter.Handler{Logger: &l})
slogger.Error("test")
if !strings.Contains(outBuf.String(), "test") {
t.Errorf("unexpected output: %s", outBuf.String())
}
}
func TestLevelMapping(t *testing.T) {
tests := []struct {
name string
slogLevel slog.Level
logrusLevel logrus.Level
message string
wantInLog string
}{
{
name: "Debug level",
slogLevel: slog.LevelDebug,
logrusLevel: logrus.DebugLevel,
message: "debug message",
wantInLog: "level=debug",
},
{
name: "Info level",
slogLevel: slog.LevelInfo,
logrusLevel: logrus.InfoLevel,
message: "info message",
wantInLog: "level=info",
},
{
name: "Warn level",
slogLevel: slog.LevelWarn,
logrusLevel: logrus.WarnLevel,
message: "warn message",
wantInLog: "level=warning",
},
{
name: "Error level",
slogLevel: slog.LevelError,
logrusLevel: logrus.ErrorLevel,
message: "error message",
wantInLog: "level=error",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var outBuf bytes.Buffer
l := logrus.Logger{
Out: &outBuf,
Formatter: &logrus.TextFormatter{},
Level: tt.logrusLevel,
}
slogger := slog.New(logrusadapter.Handler{Logger: &l})
// Log at the specified level
switch tt.slogLevel {
case slog.LevelDebug:
slogger.Debug(tt.message)
case slog.LevelInfo:
slogger.Info(tt.message)
case slog.LevelWarn:
slogger.Warn(tt.message)
case slog.LevelError:
slogger.Error(tt.message)
}
output := outBuf.String()
if !strings.Contains(output, tt.message) {
t.Errorf("expected message %q not found in output: %s", tt.message, output)
}
if !strings.Contains(output, tt.wantInLog) {
t.Errorf("expected level indicator %q not found in output: %s", tt.wantInLog, output)
}
})
}
}
func TestEnabledLevels(t *testing.T) {
tests := []struct {
shouldBeEnabled bool
logrusLevel logrus.Level
slogLevel slog.Level
name string
}{
// When logrus is at DebugLevel, all levels should be enabled
{name: "Debug logger, debug level", logrusLevel: logrus.DebugLevel, slogLevel: slog.LevelDebug, shouldBeEnabled: true},
{name: "Debug logger, info level", logrusLevel: logrus.DebugLevel, slogLevel: slog.LevelInfo, shouldBeEnabled: true},
{name: "Debug logger, warn level", logrusLevel: logrus.DebugLevel, slogLevel: slog.LevelWarn, shouldBeEnabled: true},
{name: "Debug logger, error level", logrusLevel: logrus.DebugLevel, slogLevel: slog.LevelError, shouldBeEnabled: true},
// When logrus is at InfoLevel, debug should be disabled
{name: "Info logger, debug level", logrusLevel: logrus.InfoLevel, slogLevel: slog.LevelDebug, shouldBeEnabled: false},
{name: "Info logger, info level", logrusLevel: logrus.InfoLevel, slogLevel: slog.LevelInfo, shouldBeEnabled: true},
{name: "Info logger, warn level", logrusLevel: logrus.InfoLevel, slogLevel: slog.LevelWarn, shouldBeEnabled: true},
{name: "Info logger, error level", logrusLevel: logrus.InfoLevel, slogLevel: slog.LevelError, shouldBeEnabled: true},
// When logrus is at WarnLevel, debug and info should be disabled
{name: "Warn logger, debug level", logrusLevel: logrus.WarnLevel, slogLevel: slog.LevelDebug, shouldBeEnabled: false},
{name: "Warn logger, info level", logrusLevel: logrus.WarnLevel, slogLevel: slog.LevelInfo, shouldBeEnabled: false},
{name: "Warn logger, warn level", logrusLevel: logrus.WarnLevel, slogLevel: slog.LevelWarn, shouldBeEnabled: true},
{name: "Warn logger, error level", logrusLevel: logrus.WarnLevel, slogLevel: slog.LevelError, shouldBeEnabled: true},
// When logrus is at ErrorLevel, only error should be enabled
{name: "Error logger, debug level", logrusLevel: logrus.ErrorLevel, slogLevel: slog.LevelDebug, shouldBeEnabled: false},
{name: "Error logger, info level", logrusLevel: logrus.ErrorLevel, slogLevel: slog.LevelInfo, shouldBeEnabled: false},
{name: "Error logger, warn level", logrusLevel: logrus.ErrorLevel, slogLevel: slog.LevelWarn, shouldBeEnabled: false},
{name: "Error logger, error level", logrusLevel: logrus.ErrorLevel, slogLevel: slog.LevelError, shouldBeEnabled: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var outBuf bytes.Buffer
l := logrus.Logger{
Out: &outBuf,
Formatter: &logrus.TextFormatter{},
Level: tt.logrusLevel,
}
handler := logrusadapter.Handler{Logger: &l}
enabled := handler.Enabled(context.Background(), tt.slogLevel)
if enabled != tt.shouldBeEnabled {
t.Errorf("Enabled() = %v, want %v for logrus level %v and slog level %v",
enabled, tt.shouldBeEnabled, tt.logrusLevel, tt.slogLevel)
}
// Verify that disabled logs don't actually produce output
slogger := slog.New(handler)
switch tt.slogLevel {
case slog.LevelDebug:
slogger.Debug("test message")
case slog.LevelInfo:
slogger.Info("test message")
case slog.LevelWarn:
slogger.Warn("test message")
case slog.LevelError:
slogger.Error("test message")
}
hasOutput := strings.Contains(outBuf.String(), "test message")
if hasOutput != tt.shouldBeEnabled {
t.Errorf("Log output presence = %v, want %v", hasOutput, tt.shouldBeEnabled)
}
})
}
}

View File

@@ -0,0 +1,54 @@
package logrusadapter_test
import (
"bytes"
"log/slog"
"strings"
"testing"
"github.com/OffchainLabs/prysm/v7/internal/logrusadapter"
"github.com/sirupsen/logrus"
)
// TestWithAttrsPreservesFields verifies that fields added via slog.Logger.With
// appear in subsequent log output. This catches the bug where WithAttrs
// returns Handler{Logger: entry.Logger}, discarding the entry's fields.
func TestWithAttrsPreservesFields(t *testing.T) {
var buf bytes.Buffer
l := &logrus.Logger{
Out: &buf,
Formatter: &logrus.TextFormatter{DisableTimestamp: true},
Level: logrus.DebugLevel,
}
base := slog.New(logrusadapter.Handler{Logger: l})
child := base.With("component", "test-component")
child.Info("hello")
output := buf.String()
if !strings.Contains(output, "component") || !strings.Contains(output, "test-component") {
t.Errorf("WithAttrs field lost in output.\ngot: %s\nwant output to contain: component=test-component", output)
}
}
// TestWithAttrsChained verifies that chaining multiple With calls accumulates fields.
func TestWithAttrsChained(t *testing.T) {
var buf bytes.Buffer
l := &logrus.Logger{
Out: &buf,
Formatter: &logrus.TextFormatter{DisableTimestamp: true},
Level: logrus.DebugLevel,
}
logger := slog.New(logrusadapter.Handler{Logger: l})
logger = logger.With("a", "1")
logger = logger.With("b", "2")
logger.Info("chained")
output := buf.String()
for _, want := range []string{"a", "1", "b", "2"} {
if !strings.Contains(output, want) {
t.Errorf("chained WithAttrs missing %q in output: %s", want, output)
}
}
}

View File

@@ -0,0 +1,9 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package logrusadapter
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "internal/logrusadapter")

View File

@@ -189,6 +189,8 @@ ssz_fulu_objs = [
"DataColumnIdentifier",
"DataColumnsByRootIdentifier",
"DataColumnSidecar",
"PartialDataColumnPartsMetadata",
"PartialDataColumnSidecar",
"StatusV2",
"SignedBeaconBlockContentsFulu",
"SignedBeaconBlockFulu",
@@ -434,6 +436,7 @@ ssz_proto_files(
"blobs.proto",
"data_columns.proto",
"gloas.proto",
"partial_data_columns.proto",
"light_client.proto",
"sync_committee.proto",
"withdrawals.proto",

View File

@@ -1,4 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: a04fb7ec74508f383f3502e1bf0e7c1c25c3825016e8dbb5a8a98e71615026a6
package eth
import (
@@ -2494,3 +2495,581 @@ func (s *StatusV2) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the PartialDataColumnSidecar object
func (p *PartialDataColumnSidecar) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(p)
}
// MarshalSSZTo ssz marshals the PartialDataColumnSidecar object to a target array
func (p *PartialDataColumnSidecar) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(16)
// Offset (0) 'CellsPresentBitmap'
dst = ssz.WriteOffset(dst, offset)
offset += len(p.CellsPresentBitmap)
// Offset (1) 'PartialColumn'
dst = ssz.WriteOffset(dst, offset)
offset += len(p.PartialColumn) * 2048
// Offset (2) 'KzgProofs'
dst = ssz.WriteOffset(dst, offset)
offset += len(p.KzgProofs) * 48
// Offset (3) 'Header'
dst = ssz.WriteOffset(dst, offset)
for ii := 0; ii < len(p.Header); ii++ {
offset += 4
offset += p.Header[ii].SizeSSZ()
}
// Field (0) 'CellsPresentBitmap'
if size := len(p.CellsPresentBitmap); size > 512 {
err = ssz.ErrBytesLengthFn("--.CellsPresentBitmap", size, 512)
return
}
dst = append(dst, p.CellsPresentBitmap...)
// Field (1) 'PartialColumn'
if size := len(p.PartialColumn); size > 4096 {
err = ssz.ErrListTooBigFn("--.PartialColumn", size, 4096)
return
}
for ii := 0; ii < len(p.PartialColumn); ii++ {
if size := len(p.PartialColumn[ii]); size != 2048 {
err = ssz.ErrBytesLengthFn("--.PartialColumn[ii]", size, 2048)
return
}
dst = append(dst, p.PartialColumn[ii]...)
}
// Field (2) 'KzgProofs'
if size := len(p.KzgProofs); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 4096)
return
}
for ii := 0; ii < len(p.KzgProofs); ii++ {
if size := len(p.KzgProofs[ii]); size != 48 {
err = ssz.ErrBytesLengthFn("--.KzgProofs[ii]", size, 48)
return
}
dst = append(dst, p.KzgProofs[ii]...)
}
// Field (3) 'Header'
if size := len(p.Header); size > 1 {
err = ssz.ErrListTooBigFn("--.Header", size, 1)
return
}
{
offset = 4 * len(p.Header)
for ii := 0; ii < len(p.Header); ii++ {
dst = ssz.WriteOffset(dst, offset)
offset += p.Header[ii].SizeSSZ()
}
}
for ii := 0; ii < len(p.Header); ii++ {
if dst, err = p.Header[ii].MarshalSSZTo(dst); err != nil {
return
}
}
return
}
// UnmarshalSSZ ssz unmarshals the PartialDataColumnSidecar object
func (p *PartialDataColumnSidecar) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 16 {
return ssz.ErrSize
}
tail := buf
var o0, o1, o2, o3 uint64
// Offset (0) 'CellsPresentBitmap'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 16 {
return ssz.ErrInvalidVariableOffset
}
// Offset (1) 'PartialColumn'
if o1 = ssz.ReadOffset(buf[4:8]); o1 > size || o0 > o1 {
return ssz.ErrOffset
}
// Offset (2) 'KzgProofs'
if o2 = ssz.ReadOffset(buf[8:12]); o2 > size || o1 > o2 {
return ssz.ErrOffset
}
// Offset (3) 'Header'
if o3 = ssz.ReadOffset(buf[12:16]); o3 > size || o2 > o3 {
return ssz.ErrOffset
}
// Field (0) 'CellsPresentBitmap'
{
buf = tail[o0:o1]
if err = ssz.ValidateBitlist(buf, 512); err != nil {
return err
}
if cap(p.CellsPresentBitmap) == 0 {
p.CellsPresentBitmap = make([]byte, 0, len(buf))
}
p.CellsPresentBitmap = append(p.CellsPresentBitmap, buf...)
}
// Field (1) 'PartialColumn'
{
buf = tail[o1:o2]
num, err := ssz.DivideInt2(len(buf), 2048, 4096)
if err != nil {
return err
}
p.PartialColumn = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(p.PartialColumn[ii]) == 0 {
p.PartialColumn[ii] = make([]byte, 0, len(buf[ii*2048:(ii+1)*2048]))
}
p.PartialColumn[ii] = append(p.PartialColumn[ii], buf[ii*2048:(ii+1)*2048]...)
}
}
// Field (2) 'KzgProofs'
{
buf = tail[o2:o3]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
}
p.KzgProofs = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(p.KzgProofs[ii]) == 0 {
p.KzgProofs[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
}
p.KzgProofs[ii] = append(p.KzgProofs[ii], buf[ii*48:(ii+1)*48]...)
}
}
// Field (3) 'Header'
{
buf = tail[o3:]
num, err := ssz.DecodeDynamicLength(buf, 1)
if err != nil {
return err
}
p.Header = make([]*PartialDataColumnHeader, num)
err = ssz.UnmarshalDynamic(buf, num, func(indx int, buf []byte) (err error) {
if p.Header[indx] == nil {
p.Header[indx] = new(PartialDataColumnHeader)
}
if err = p.Header[indx].UnmarshalSSZ(buf); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the PartialDataColumnSidecar object
func (p *PartialDataColumnSidecar) SizeSSZ() (size int) {
size = 16
// Field (0) 'CellsPresentBitmap'
size += len(p.CellsPresentBitmap)
// Field (1) 'PartialColumn'
size += len(p.PartialColumn) * 2048
// Field (2) 'KzgProofs'
size += len(p.KzgProofs) * 48
// Field (3) 'Header'
for ii := 0; ii < len(p.Header); ii++ {
size += 4
size += p.Header[ii].SizeSSZ()
}
return
}
// HashTreeRoot ssz hashes the PartialDataColumnSidecar object
func (p *PartialDataColumnSidecar) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(p)
}
// HashTreeRootWith ssz hashes the PartialDataColumnSidecar object with a hasher
func (p *PartialDataColumnSidecar) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'CellsPresentBitmap'
if len(p.CellsPresentBitmap) == 0 {
err = ssz.ErrEmptyBitlist
return
}
hh.PutBitlist(p.CellsPresentBitmap, 512)
// Field (1) 'PartialColumn'
{
if size := len(p.PartialColumn); size > 4096 {
err = ssz.ErrListTooBigFn("--.PartialColumn", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range p.PartialColumn {
if len(i) != 2048 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(p.PartialColumn))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (2) 'KzgProofs'
{
if size := len(p.KzgProofs); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range p.KzgProofs {
if len(i) != 48 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(p.KzgProofs))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (3) 'Header'
{
subIndx := hh.Index()
num := uint64(len(p.Header))
if num > 1 {
err = ssz.ErrIncorrectListSize
return
}
for _, elem := range p.Header {
if err = elem.HashTreeRootWith(hh); err != nil {
return
}
}
hh.MerkleizeWithMixin(subIndx, num, 1)
}
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the PartialDataColumnHeader object
func (p *PartialDataColumnHeader) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(p)
}
// MarshalSSZTo ssz marshals the PartialDataColumnHeader object to a target array
func (p *PartialDataColumnHeader) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(340)
// Offset (0) 'KzgCommitments'
dst = ssz.WriteOffset(dst, offset)
offset += len(p.KzgCommitments) * 48
// Field (1) 'SignedBlockHeader'
if p.SignedBlockHeader == nil {
p.SignedBlockHeader = new(SignedBeaconBlockHeader)
}
if dst, err = p.SignedBlockHeader.MarshalSSZTo(dst); err != nil {
return
}
// Field (2) 'KzgCommitmentsInclusionProof'
if size := len(p.KzgCommitmentsInclusionProof); size != 4 {
err = ssz.ErrVectorLengthFn("--.KzgCommitmentsInclusionProof", size, 4)
return
}
for ii := 0; ii < 4; ii++ {
if size := len(p.KzgCommitmentsInclusionProof[ii]); size != 32 {
err = ssz.ErrBytesLengthFn("--.KzgCommitmentsInclusionProof[ii]", size, 32)
return
}
dst = append(dst, p.KzgCommitmentsInclusionProof[ii]...)
}
// Field (0) 'KzgCommitments'
if size := len(p.KzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
return
}
for ii := 0; ii < len(p.KzgCommitments); ii++ {
if size := len(p.KzgCommitments[ii]); size != 48 {
err = ssz.ErrBytesLengthFn("--.KzgCommitments[ii]", size, 48)
return
}
dst = append(dst, p.KzgCommitments[ii]...)
}
return
}
// UnmarshalSSZ ssz unmarshals the PartialDataColumnHeader object
func (p *PartialDataColumnHeader) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 340 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'KzgCommitments'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 340 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'SignedBlockHeader'
if p.SignedBlockHeader == nil {
p.SignedBlockHeader = new(SignedBeaconBlockHeader)
}
if err = p.SignedBlockHeader.UnmarshalSSZ(buf[4:212]); err != nil {
return err
}
// Field (2) 'KzgCommitmentsInclusionProof'
p.KzgCommitmentsInclusionProof = make([][]byte, 4)
for ii := 0; ii < 4; ii++ {
if cap(p.KzgCommitmentsInclusionProof[ii]) == 0 {
p.KzgCommitmentsInclusionProof[ii] = make([]byte, 0, len(buf[212:340][ii*32:(ii+1)*32]))
}
p.KzgCommitmentsInclusionProof[ii] = append(p.KzgCommitmentsInclusionProof[ii], buf[212:340][ii*32:(ii+1)*32]...)
}
// Field (0) 'KzgCommitments'
{
buf = tail[o0:]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
}
p.KzgCommitments = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(p.KzgCommitments[ii]) == 0 {
p.KzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
}
p.KzgCommitments[ii] = append(p.KzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the PartialDataColumnHeader object
func (p *PartialDataColumnHeader) SizeSSZ() (size int) {
size = 340
// Field (0) 'KzgCommitments'
size += len(p.KzgCommitments) * 48
return
}
// HashTreeRoot ssz hashes the PartialDataColumnHeader object
func (p *PartialDataColumnHeader) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(p)
}
// HashTreeRootWith ssz hashes the PartialDataColumnHeader object with a hasher
func (p *PartialDataColumnHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'KzgCommitments'
{
if size := len(p.KzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range p.KzgCommitments {
if len(i) != 48 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(p.KzgCommitments))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (1) 'SignedBlockHeader'
if err = p.SignedBlockHeader.HashTreeRootWith(hh); err != nil {
return
}
// Field (2) 'KzgCommitmentsInclusionProof'
{
if size := len(p.KzgCommitmentsInclusionProof); size != 4 {
err = ssz.ErrVectorLengthFn("--.KzgCommitmentsInclusionProof", size, 4)
return
}
subIndx := hh.Index()
for _, i := range p.KzgCommitmentsInclusionProof {
if len(i) != 32 {
err = ssz.ErrBytesLength
return
}
hh.Append(i)
}
hh.Merkleize(subIndx)
}
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the PartialDataColumnPartsMetadata object
func (p *PartialDataColumnPartsMetadata) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(p)
}
// MarshalSSZTo ssz marshals the PartialDataColumnPartsMetadata object to a target array
func (p *PartialDataColumnPartsMetadata) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(8)
// Offset (0) 'Available'
dst = ssz.WriteOffset(dst, offset)
offset += len(p.Available)
// Offset (1) 'Requests'
dst = ssz.WriteOffset(dst, offset)
offset += len(p.Requests)
// Field (0) 'Available'
if size := len(p.Available); size > 512 {
err = ssz.ErrBytesLengthFn("--.Available", size, 512)
return
}
dst = append(dst, p.Available...)
// Field (1) 'Requests'
if size := len(p.Requests); size > 512 {
err = ssz.ErrBytesLengthFn("--.Requests", size, 512)
return
}
dst = append(dst, p.Requests...)
return
}
// UnmarshalSSZ ssz unmarshals the PartialDataColumnPartsMetadata object
func (p *PartialDataColumnPartsMetadata) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 8 {
return ssz.ErrSize
}
tail := buf
var o0, o1 uint64
// Offset (0) 'Available'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 8 {
return ssz.ErrInvalidVariableOffset
}
// Offset (1) 'Requests'
if o1 = ssz.ReadOffset(buf[4:8]); o1 > size || o0 > o1 {
return ssz.ErrOffset
}
// Field (0) 'Available'
{
buf = tail[o0:o1]
if err = ssz.ValidateBitlist(buf, 512); err != nil {
return err
}
if cap(p.Available) == 0 {
p.Available = make([]byte, 0, len(buf))
}
p.Available = append(p.Available, buf...)
}
// Field (1) 'Requests'
{
buf = tail[o1:]
if err = ssz.ValidateBitlist(buf, 512); err != nil {
return err
}
if cap(p.Requests) == 0 {
p.Requests = make([]byte, 0, len(buf))
}
p.Requests = append(p.Requests, buf...)
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the PartialDataColumnPartsMetadata object
func (p *PartialDataColumnPartsMetadata) SizeSSZ() (size int) {
size = 8
// Field (0) 'Available'
size += len(p.Available)
// Field (1) 'Requests'
size += len(p.Requests)
return
}
// HashTreeRoot ssz hashes the PartialDataColumnPartsMetadata object
func (p *PartialDataColumnPartsMetadata) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(p)
}
// HashTreeRootWith ssz hashes the PartialDataColumnPartsMetadata object with a hasher
func (p *PartialDataColumnPartsMetadata) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Available'
if len(p.Available) == 0 {
err = ssz.ErrEmptyBitlist
return
}
hh.PutBitlist(p.Available, 512)
// Field (1) 'Requests'
if len(p.Requests) == 0 {
err = ssz.ErrEmptyBitlist
return
}
hh.PutBitlist(p.Requests, 512)
hh.Merkleize(indx)
return
}

327
proto/prysm/v1alpha1/partial_data_columns.pb.go generated Executable file
View File

@@ -0,0 +1,327 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.3
// protoc v3.21.7
// source: proto/prysm/v1alpha1/partial_data_columns.proto
package eth
import (
reflect "reflect"
sync "sync"
github_com_OffchainLabs_go_bitfield "github.com/OffchainLabs/go-bitfield"
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PartialDataColumnSidecar struct {
state protoimpl.MessageState `protogen:"open.v1"`
CellsPresentBitmap github_com_OffchainLabs_go_bitfield.Bitlist `protobuf:"bytes,1,opt,name=cells_present_bitmap,json=cellsPresentBitmap,proto3" json:"cells_present_bitmap,omitempty" cast-type:"github.com/OffchainLabs/go-bitfield.Bitlist" ssz-max:"512"`
PartialColumn [][]byte `protobuf:"bytes,2,rep,name=partial_column,json=partialColumn,proto3" json:"partial_column,omitempty" ssz-max:"4096" ssz-size:"?,2048"`
KzgProofs [][]byte `protobuf:"bytes,3,rep,name=kzg_proofs,json=kzgProofs,proto3" json:"kzg_proofs,omitempty" ssz-max:"4096" ssz-size:"?,48"`
Header []*PartialDataColumnHeader `protobuf:"bytes,4,rep,name=header,proto3" json:"header,omitempty" ssz-max:"1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PartialDataColumnSidecar) Reset() {
*x = PartialDataColumnSidecar{}
mi := &file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PartialDataColumnSidecar) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PartialDataColumnSidecar) ProtoMessage() {}
func (x *PartialDataColumnSidecar) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PartialDataColumnSidecar.ProtoReflect.Descriptor instead.
func (*PartialDataColumnSidecar) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescGZIP(), []int{0}
}
func (x *PartialDataColumnSidecar) GetCellsPresentBitmap() github_com_OffchainLabs_go_bitfield.Bitlist {
if x != nil {
return x.CellsPresentBitmap
}
return github_com_OffchainLabs_go_bitfield.Bitlist(nil)
}
func (x *PartialDataColumnSidecar) GetPartialColumn() [][]byte {
if x != nil {
return x.PartialColumn
}
return nil
}
func (x *PartialDataColumnSidecar) GetKzgProofs() [][]byte {
if x != nil {
return x.KzgProofs
}
return nil
}
func (x *PartialDataColumnSidecar) GetHeader() []*PartialDataColumnHeader {
if x != nil {
return x.Header
}
return nil
}
type PartialDataColumnHeader struct {
state protoimpl.MessageState `protogen:"open.v1"`
KzgCommitments [][]byte `protobuf:"bytes,1,rep,name=kzg_commitments,json=kzgCommitments,proto3" json:"kzg_commitments,omitempty" ssz-max:"4096" ssz-size:"?,48"`
SignedBlockHeader *SignedBeaconBlockHeader `protobuf:"bytes,2,opt,name=signed_block_header,json=signedBlockHeader,proto3" json:"signed_block_header,omitempty"`
KzgCommitmentsInclusionProof [][]byte `protobuf:"bytes,3,rep,name=kzg_commitments_inclusion_proof,json=kzgCommitmentsInclusionProof,proto3" json:"kzg_commitments_inclusion_proof,omitempty" ssz-size:"4,32"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PartialDataColumnHeader) Reset() {
*x = PartialDataColumnHeader{}
mi := &file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PartialDataColumnHeader) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PartialDataColumnHeader) ProtoMessage() {}
func (x *PartialDataColumnHeader) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PartialDataColumnHeader.ProtoReflect.Descriptor instead.
func (*PartialDataColumnHeader) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescGZIP(), []int{1}
}
func (x *PartialDataColumnHeader) GetKzgCommitments() [][]byte {
if x != nil {
return x.KzgCommitments
}
return nil
}
func (x *PartialDataColumnHeader) GetSignedBlockHeader() *SignedBeaconBlockHeader {
if x != nil {
return x.SignedBlockHeader
}
return nil
}
func (x *PartialDataColumnHeader) GetKzgCommitmentsInclusionProof() [][]byte {
if x != nil {
return x.KzgCommitmentsInclusionProof
}
return nil
}
type PartialDataColumnPartsMetadata struct {
state protoimpl.MessageState `protogen:"open.v1"`
Available github_com_OffchainLabs_go_bitfield.Bitlist `protobuf:"bytes,1,opt,name=available,proto3" json:"available,omitempty" cast-type:"github.com/OffchainLabs/go-bitfield.Bitlist" ssz-max:"512"`
Requests github_com_OffchainLabs_go_bitfield.Bitlist `protobuf:"bytes,2,opt,name=requests,proto3" json:"requests,omitempty" cast-type:"github.com/OffchainLabs/go-bitfield.Bitlist" ssz-max:"512"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PartialDataColumnPartsMetadata) Reset() {
*x = PartialDataColumnPartsMetadata{}
mi := &file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PartialDataColumnPartsMetadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PartialDataColumnPartsMetadata) ProtoMessage() {}
func (x *PartialDataColumnPartsMetadata) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PartialDataColumnPartsMetadata.ProtoReflect.Descriptor instead.
func (*PartialDataColumnPartsMetadata) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescGZIP(), []int{2}
}
func (x *PartialDataColumnPartsMetadata) GetAvailable() github_com_OffchainLabs_go_bitfield.Bitlist {
if x != nil {
return x.Available
}
return github_com_OffchainLabs_go_bitfield.Bitlist(nil)
}
func (x *PartialDataColumnPartsMetadata) GetRequests() github_com_OffchainLabs_go_bitfield.Bitlist {
if x != nil {
return x.Requests
}
return github_com_OffchainLabs_go_bitfield.Bitlist(nil)
}
var File_proto_prysm_v1alpha1_partial_data_columns_proto protoreflect.FileDescriptor
var file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDesc = []byte{
0x0a, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x64,
0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x15, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e,
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79,
0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x62, 0x65, 0x61, 0x63,
0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0xbf, 0x02, 0x0a, 0x18, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44,
0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72,
0x12, 0x68, 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e,
0x74, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x36,
0x82, 0xb5, 0x18, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62,
0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x6c, 0x69, 0x73, 0x74, 0x92,
0xb5, 0x18, 0x03, 0x35, 0x31, 0x32, 0x52, 0x12, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x50, 0x72, 0x65,
0x73, 0x65, 0x6e, 0x74, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x12, 0x39, 0x0a, 0x0e, 0x70, 0x61,
0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x02, 0x20, 0x03,
0x28, 0x0c, 0x42, 0x12, 0x8a, 0xb5, 0x18, 0x06, 0x3f, 0x2c, 0x32, 0x30, 0x34, 0x38, 0x92, 0xb5,
0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x43,
0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x2f, 0x0a, 0x0a, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f,
0x6f, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04, 0x3f,
0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x09, 0x6b, 0x7a, 0x67,
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x12, 0x4d, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50,
0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x05, 0x92, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x06, 0x68,
0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x85, 0x02, 0x0a, 0x17, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61,
0x6c, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65,
0x72, 0x12, 0x39, 0x0a, 0x0f, 0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d,
0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04,
0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x0e, 0x6b, 0x7a,
0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x5e, 0x0a, 0x13,
0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x61,
0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x74, 0x68, 0x65,
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c,
0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x11, 0x73, 0x69, 0x67, 0x6e, 0x65,
0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4f, 0x0a, 0x1f,
0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x5f,
0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18,
0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x34, 0x2c, 0x33, 0x32, 0x52,
0x1c, 0x6b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x49,
0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xca, 0x01,
0x0a, 0x1e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c,
0x75, 0x6d, 0x6e, 0x50, 0x61, 0x72, 0x74, 0x73, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0x12, 0x54, 0x0a, 0x09, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0c, 0x42, 0x36, 0x82, 0xb5, 0x18, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73,
0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74,
0x6c, 0x69, 0x73, 0x74, 0x92, 0xb5, 0x18, 0x03, 0x35, 0x31, 0x32, 0x52, 0x09, 0x61, 0x76, 0x61,
0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x52, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x36, 0x82, 0xb5, 0x18, 0x2b, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69,
0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x2e, 0x42, 0x69, 0x74, 0x6c, 0x69, 0x73, 0x74, 0x92, 0xb5, 0x18, 0x03, 0x35, 0x31, 0x32,
0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69,
0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescOnce sync.Once
file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescData = file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDesc
)
func file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescGZIP() []byte {
file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescOnce.Do(func() {
file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescData)
})
return file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescData
}
var file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_proto_prysm_v1alpha1_partial_data_columns_proto_goTypes = []any{
(*PartialDataColumnSidecar)(nil), // 0: ethereum.eth.v1alpha1.PartialDataColumnSidecar
(*PartialDataColumnHeader)(nil), // 1: ethereum.eth.v1alpha1.PartialDataColumnHeader
(*PartialDataColumnPartsMetadata)(nil), // 2: ethereum.eth.v1alpha1.PartialDataColumnPartsMetadata
(*SignedBeaconBlockHeader)(nil), // 3: ethereum.eth.v1alpha1.SignedBeaconBlockHeader
}
var file_proto_prysm_v1alpha1_partial_data_columns_proto_depIdxs = []int32{
1, // 0: ethereum.eth.v1alpha1.PartialDataColumnSidecar.header:type_name -> ethereum.eth.v1alpha1.PartialDataColumnHeader
3, // 1: ethereum.eth.v1alpha1.PartialDataColumnHeader.signed_block_header:type_name -> ethereum.eth.v1alpha1.SignedBeaconBlockHeader
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_proto_prysm_v1alpha1_partial_data_columns_proto_init() }
func file_proto_prysm_v1alpha1_partial_data_columns_proto_init() {
if File_proto_prysm_v1alpha1_partial_data_columns_proto != nil {
return
}
file_proto_prysm_v1alpha1_beacon_core_types_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDesc,
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_proto_prysm_v1alpha1_partial_data_columns_proto_goTypes,
DependencyIndexes: file_proto_prysm_v1alpha1_partial_data_columns_proto_depIdxs,
MessageInfos: file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes,
}.Build()
File_proto_prysm_v1alpha1_partial_data_columns_proto = out.File
file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDesc = nil
file_proto_prysm_v1alpha1_partial_data_columns_proto_goTypes = nil
file_proto_prysm_v1alpha1_partial_data_columns_proto_depIdxs = nil
}

View File

@@ -0,0 +1,57 @@
// Copyright 2025 Offchain Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package ethereum.eth.v1alpha1;
import "proto/eth/ext/options.proto";
import "proto/prysm/v1alpha1/beacon_core_types.proto";
option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
message PartialDataColumnSidecar {
bytes cells_present_bitmap = 1 [
(ethereum.eth.ext.ssz_max) = "max_blob_commitments_bitmap.size",
(ethereum.eth.ext.cast_type) = "github.com/OffchainLabs/go-bitfield.Bitlist"
];
repeated bytes partial_column = 2 [
(ethereum.eth.ext.ssz_size) = "?,bytes_per_cell.size",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
repeated bytes kzg_proofs = 3 [
(ethereum.eth.ext.ssz_size) = "?,48",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
repeated PartialDataColumnHeader header = 4 [(ethereum.eth.ext.ssz_max) = "1"];
}
message PartialDataColumnHeader {
repeated bytes kzg_commitments = 1 [
(ethereum.eth.ext.ssz_size) = "?,48",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
SignedBeaconBlockHeader signed_block_header = 2;
repeated bytes kzg_commitments_inclusion_proof = 3 [(ethereum.eth.ext.ssz_size) = "kzg_commitments_inclusion_proof_depth.size,32"];
}
message PartialDataColumnPartsMetadata {
bytes available = 1 [
(ethereum.eth.ext.ssz_max) = "max_blob_commitments_bitmap.size",
(ethereum.eth.ext.cast_type) = "github.com/OffchainLabs/go-bitfield.Bitlist"
];
bytes requests = 2 [
(ethereum.eth.ext.ssz_max) = "max_blob_commitments_bitmap.size",
(ethereum.eth.ext.cast_type) = "github.com/OffchainLabs/go-bitfield.Bitlist"
];
}

View File

@@ -0,0 +1,24 @@
package eth_test
import (
"os"
"strings"
"testing"
)
// TestPartialDataColumnsProtoGoPackageVersion verifies that the go_package
// option in partial_data_columns.proto uses v7, matching the rest of the
// codebase. A mismatch (e.g. v6) would cause the next codegen run to produce
// code with the wrong import path.
func TestPartialDataColumnsProtoGoPackageVersion(t *testing.T) {
content, err := os.ReadFile("partial_data_columns.proto")
if err != nil {
t.Fatalf("failed to read proto file: %v", err)
}
want := `go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth"`
if !strings.Contains(string(content), want) {
t.Errorf("partial_data_columns.proto has wrong go_package.\nwant line containing: %s\ngot file content:\n%s",
want, string(content))
}
}

View File

@@ -25,6 +25,7 @@ mainnet = {
"extra_data.size": "32",
"max_blobs_per_block.size": "6",
"max_blob_commitments.size": "4096",
"max_blob_commitments_bitmap.size": "512", # MAX_BLOB_COMMITMENTS_PER_BLOCK / 8
"max_cell_proofs_length.size": "33554432", # FIELD_ELEMENTS_PER_EXT_BLOB * MAX_BLOB_COMMITMENTS_PER_BLOCK
"kzg_commitment_inclusion_proof_depth.size": "17",
"max_withdrawal_requests_per_payload.size": "16",
@@ -70,6 +71,7 @@ minimal = {
"extra_data.size": "32",
"max_blobs_per_block.size": "6",
"max_blob_commitments.size": "4096",
"max_blob_commitments_bitmap.size": "512", # MAX_BLOB_COMMITMENTS_PER_BLOCK / 8
"max_cell_proofs_length.size": "33554432", # FIELD_ELEMENTS_PER_EXT_BLOB * MAX_BLOB_COMMITMENTS_PER_BLOCK
"kzg_commitment_inclusion_proof_depth.size": "17",
"max_withdrawal_requests_per_payload.size": "16",

View File

@@ -1,6 +1,7 @@
package util
import (
"slices"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
@@ -65,7 +66,7 @@ func CreateTestVerifiedRoDataColumnSidecars(t *testing.T, params []DataColumnPar
Index: param.Index,
Column: column,
KzgCommitments: param.KzgCommitments,
KzgProofs: param.KzgProofs,
KzgProofs: slices.Clone(param.KzgProofs),
SignedBlockHeader: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: param.Slot,