Compare commits

...

263 Commits

Author SHA1 Message Date
kasey
099b2faf8f mv eip4844->deneb 2023-01-23 11:51:07 -06:00
kasey
b9022a070c fixing lint issues 2023-01-23 11:37:43 -06:00
kasey
9004af48f9 make blob response typedef public 2023-01-23 07:03:08 -06:00
terence tsao
dace0f6a2d Rename eip4844 to deneb 2023-01-19 17:30:36 -08:00
terence tsao
26a5878181 Write bad block and blob to disk 2023-01-18 14:02:09 -08:00
terence tsao
db6474a3e4 Sync with develop 2023-01-18 12:00:48 -08:00
terence tsao
b84851fd0d Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-01-18 08:38:13 -08:00
terence tsao
673702c100 Fix loop referencing for kzgs 2023-01-12 15:50:46 -08:00
terence tsao
520eb6baca Rm unused checks 2023-01-12 10:36:21 -08:00
terence tsao
e6047dc344 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2023-01-11 14:59:00 -08:00
terence tsao
d86a452b15 Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2023-01-11 14:52:26 -08:00
terence tsao
67f9d0b9c4 Fix log 2023-01-11 08:08:33 -08:00
terence tsao
21cd055b84 Better logs 2023-01-11 07:40:19 -08:00
Kasey Kirkham
9f3bb623ec add capella to yaml "template" 2023-01-10 16:30:32 -06:00
Kasey Kirkham
b10a95097e capella state version detection bug fix 2023-01-10 15:54:27 -06:00
terence tsao
4561f5cacb Clean ups 2023-01-09 14:33:05 -08:00
terence tsao
50b672a4db Beacon api: get blobs 2023-01-09 11:40:06 -08:00
Potuz
ffbb73a59b Merge remote-tracking branch 'origin/develop' into capella 2023-01-09 12:07:00 -03:00
Potuz
649974f14d removed duplicated case 2023-01-09 09:54:15 -03:00
Potuz
9ec0bc0734 Merge remote-tracking branch 'origin/historical-summaries' into capella 2023-01-09 08:57:24 -03:00
terence tsao
9649e49658 Passing spec tests 2023-01-07 08:50:20 -08:00
terence tsao
49fdcb7347 Add spec tests 2023-01-07 08:33:48 -08:00
terence tsao
cd6ee956ed Merge branch 'historical-summaries' into eip4844 2023-01-07 08:03:15 -08:00
terence tsao
ef95fd33f8 Uncomment withdrawal stubs 2023-01-07 07:52:31 -08:00
terence tsao
1a488241b0 Sync with capella 2023-01-07 07:48:57 -08:00
terence tsao
5fdd3a3d66 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2023-01-07 07:19:55 -08:00
terence tsao
b6a32c050f Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2023-01-07 07:03:58 -08:00
terence tsao
055e225093 Passing spec tests 2023-01-06 15:18:38 -08:00
terence tsao
144218cb1b Process historical roots test 2023-01-06 11:56:24 -08:00
Potuz
13b575a609 Merge remote-tracking branch 'origin/develop' into capella 2023-01-06 10:59:43 -03:00
terence tsao
b5a414eae9 Rm bad imports 2023-01-04 08:10:27 -08:00
terence tsao
b94b347ace Sync with develop 2023-01-04 07:37:25 -08:00
terence tsao
f5ee225819 Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2023-01-04 07:16:00 -08:00
terence tsao
9cb48be14f Add historical summaries to state and processing 2023-01-03 15:58:39 -08:00
terence tsao
85fa9951eb Fix span name 2022-12-24 23:40:30 +08:00
terence tsao
ec72575fc9 Close stream for by root rpc handler 2022-12-24 23:34:02 +08:00
terence tsao
d9d1bb6d3d Skip on empty 2022-12-24 11:37:58 +08:00
Potuz
ffcdc26618 Merge remote-tracking branch 'origin/develop' into capella 2022-12-23 13:03:11 -03:00
Potuz
96981a07b9 check signatures of BLS changes before capella 2022-12-22 16:10:00 -03:00
Potuz
6b2721b239 Check BLS_TO_EXECUTION_CHANGE as if they were from Capella
In the event we receive a BLS_TO_EXECUTION_CHANGE and our head state is
before Capella, verify it's signature with the Capella fork version.
2022-12-22 11:34:57 -03:00
terence tsao
c79151a574 Fix rate limiter 2022-12-22 22:10:34 +08:00
Inphi
4b20234801 Fix missing context in post-altair /v1 messages (#11807) 2022-12-22 06:46:15 +08:00
terence tsao
911048aa6d Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2022-12-21 13:01:40 +08:00
James He
255e9693ee fixing typo on comments 2022-12-20 22:42:31 -06:00
terence tsao
61c1216e3d Better handling for validate sync msg time 2022-12-21 10:08:44 +08:00
terence tsao
17e1eaf0f3 Set stream write deadline 2022-12-21 09:38:00 +08:00
terence tsao
9940943595 Clean up sync 2022-12-21 09:30:11 +08:00
terence tsao
9a0f941870 Change avgSidecarBlobsTransferBytes 2022-12-21 08:32:47 +08:00
terence tsao
5d0f54d332 Blobs rate limiter 2022-12-21 08:17:54 +08:00
rkapka
d602c94b7b fix 2022-12-20 20:28:53 +01:00
rkapka
6a5ecbd68f Implement getPoolBLSToExecutionChanges API endpoint
(cherry picked from commit cd25d922bc)

# Conflicts:
#	beacon-chain/node/node.go
#	beacon-chain/rpc/eth/beacon/pool.go
#	proto/eth/service/beacon_chain_service.pb.go
#	proto/migration/v1alpha1_to_v2.go
2022-12-20 17:31:21 +01:00
Potuz
29dfcab505 Fix BlockValue marshalling 2022-12-19 11:14:35 -03:00
Potuz
16e5c903cc Merge remote-tracking branch 'origin/develop' into capella 2022-12-19 10:28:05 -03:00
terencechain
66682cb4e5 Clean up block initizliation and remove set block (#11791) 2022-12-19 15:41:28 +08:00
terencechain
52faea8b7d Support 4844 container type queries in the beacon API + update spec tests (#11794) 2022-12-19 15:38:23 +08:00
terence tsao
8a78315682 Save blobs during by range sync 2022-12-18 07:44:30 +08:00
Potuz
cab42a4ae3 Take raw arrays for BLS changes 2022-12-16 18:02:35 -03:00
Potuz
a5bdd42bdd Merge remote-tracking branch 'origin/block-value' into capella 2022-12-16 12:47:29 -03:00
Potuz
a26197f919 take lists for bls changes endpoint 2022-12-16 12:47:18 -03:00
terence tsao
8b9cab457e Got block and blobs gossip working 2022-12-16 13:41:59 +08:00
terence tsao
080ce31395 Add block value to get payload v2 2022-12-15 17:17:58 +08:00
terence tsao
7866e8a196 Got blob syncing to work 2022-12-15 17:02:38 +08:00
Potuz
d5d17e00b3 Merge branch 'develop' into capella 2022-12-14 13:04:23 -03:00
rkapka
9c6a1331cf remove redeclared struct 2022-12-14 16:29:14 +01:00
terence tsao
d89c97634c Merge branch 'eip4844' of github.com:prysmaticlabs/prysm into eip4844 2022-12-14 10:26:47 +08:00
terence tsao
7e95ca3705 Add blobs to initial syncing path 2022-12-14 10:26:39 +08:00
Inphi
abd46b01b7 Fix non-empty kzg commitment in proposal (#11766) 2022-12-14 08:06:14 +08:00
Potuz
8629ac8417 only broadcast bls changes post-capella 2022-12-13 09:53:05 -03:00
terence tsao
304925aabf Add todos for 4844 sync 2022-12-13 16:18:09 +08:00
terence tsao
16d93e47a5 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-12-13 15:12:46 +08:00
rkapka
6dcb2bbf0d Use signed changes in middleware block
(cherry picked from commit e3c9e7bb5c)
2022-12-12 16:21:39 +01:00
Potuz
deb138959a fix validator client 2022-12-12 11:43:48 -03:00
Potuz
45e6f3bd00 fix build 2022-12-12 11:29:57 -03:00
Potuz
55a9e0d51a Merge branch 'develop' into capella 2022-12-12 11:15:39 -03:00
terence tsao
3ddae600fb Merge branch 'capella' of github.com:prysmaticlabs/prysm into capella 2022-12-09 19:35:39 -08:00
terence tsao
861ede8945 Fix subscriptions 2022-12-09 15:29:23 -08:00
terence tsao
93f11f9047 Change target / max blobs to 2 / 4 2022-12-09 10:40:20 -08:00
rkapka
56503110dd Merge branch 'recontruct-capella-blinded' into capella
# Conflicts:
#	testing/spectest/shared/common/forkchoice/service.go
2022-12-09 12:45:23 +01:00
rkapka
f67d35dffd single execution block type 2022-12-09 12:43:35 +01:00
terence tsao
efbca1b5b7 Add v3 engine apis 2022-12-08 16:45:05 -08:00
terence tsao
2de0ebaf8d Merge branch 'roberto-fix-auth' into eip4844 2022-12-08 13:52:11 -08:00
Roberto Bayardo
0815ef94a3 Merge branch 'develop' into roberto-fix-auth 2022-12-08 13:23:29 -08:00
Roberto Bayardo
092ffa99e5 update & fix code around setting auth header for latest geth 2022-12-08 13:14:09 -08:00
rkapka
b05b67b264 reorder checks 2022-12-08 19:48:02 +01:00
rkapka
a5c6518c20 deepsource 2022-12-08 19:48:02 +01:00
Radosław Kapka
da048395ce Merge branch 'develop' into recontruct-capella-blinded 2022-12-08 18:41:12 +01:00
rkapka
f31f7be310 fix engine mock 2022-12-08 18:39:59 +01:00
rkapka
e1a2267f86 Merge remote-tracking branch 'origin/capella' into capella 2022-12-08 18:36:44 +01:00
rkapka
3c9e4ee7f7 Merge branch 'recontruct-capella-blinded' into capella
# Conflicts:
#	beacon-chain/blockchain/pow_block.go
#	beacon-chain/execution/engine_client.go
#	beacon-chain/execution/engine_client_test.go
#	beacon-chain/execution/testing/mock_engine_client.go
#	beacon-chain/rpc/eth/beacon/blocks.go
#	beacon-chain/state/state-native/getters_withdrawal.go
#	consensus-types/blocks/factory.go
#	proto/engine/v1/json_marshal_unmarshal.go
#	proto/engine/v1/json_marshal_unmarshal_test.go
2022-12-08 18:31:25 +01:00
rkapka
9ba32c9acd single ExecutionBlockByHash function 2022-12-08 17:53:02 +01:00
rkapka
d23008452e fix failing tests 2022-12-08 17:29:51 +01:00
terencechain
f397cba1e0 Better proposal RPC (#11721) 2022-12-08 07:40:48 -08:00
terence tsao
3eecbb5b1a Fix enum for cli 2022-12-07 12:12:56 -08:00
rkapka
1583e93b48 bzl 2022-12-07 18:23:43 +01:00
rkapka
849457df81 deepsource
(cherry picked from commit 903cab75ee)

# Conflicts:
#	beacon-chain/execution/testing/mock_engine_client.go
2022-12-07 16:29:34 +01:00
rkapka
903cab75ee deepsource 2022-12-07 16:27:09 +01:00
rkapka
ee108d4aff add doc to interface
(cherry picked from commit a08baf4a14)
2022-12-07 16:22:14 +01:00
rkapka
49bcc58762 rename methods
(cherry picked from commit 8c56dfdd46)
2022-12-07 16:22:09 +01:00
rkapka
a08baf4a14 add doc to interface 2022-12-07 16:20:43 +01:00
rkapka
8c56dfdd46 rename methods 2022-12-07 16:20:31 +01:00
rkapka
dcdd9af9db remove unneeded test 2022-12-07 16:05:44 +01:00
rkapka
a464cf5c60 Merge branch 'reconstruct-capella-block' into capella
(cherry picked from commit b0601580ef)

# Conflicts:
#	beacon-chain/rpc/eth/beacon/blocks.go
#	proto/engine/v1/json_marshal_unmarshal.go
2022-12-07 15:21:58 +01:00
terence tsao
cc55c754dc Fix cli flag 2022-12-06 16:57:50 -08:00
terence tsao
2d483ab09f Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2022-12-06 16:48:44 -08:00
terence tsao
d64e10a337 Interop 2022-12-06 16:06:18 -08:00
terence tsao
1e9ee10674 Merge branch 'better-validator-rpcs' of github.com:prysmaticlabs/prysm into eip4844 2022-12-06 15:14:20 -08:00
terence tsao
3ac395b39e Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-12-06 14:42:49 -08:00
Justin Traglia
6e26a6f128 Replace LastWithdrawalValidatorIndex to updated name (#11725) 2022-12-06 14:40:10 -08:00
Justin Traglia
b512b92a8a Update withdrawal error message to reflect new field name (#11724) 2022-12-06 14:39:17 -08:00
terence tsao
5ff601a1b9 Sync with latest go-ethereum changes 2022-12-06 14:38:14 -08:00
terence tsao
5823054519 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-12-06 14:08:00 -08:00
terence tsao
3d196662bc Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-12-06 14:06:30 -08:00
rkapka
b0601580ef Merge branch 'reconstruct-capella-block' into capella 2022-12-06 22:16:59 +01:00
rkapka
c1f29ea651 remove logs 2022-12-06 22:11:35 +01:00
rkapka
881d1d435a logs 2022-12-06 21:46:41 +01:00
rkapka
d1aae0c941 Merge branch 'capella' into reconstruct-capella-block 2022-12-06 21:26:55 +01:00
terence tsao
468cc23876 Fix interop 2022-12-04 08:40:41 -08:00
terence tsao
d9646a9183 Add builder paths 2022-12-03 07:29:46 -08:00
terence tsao
279cee42f1 Refactor block proposal path 2022-12-02 16:13:01 -08:00
terence tsao
57bdb907cc Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2022-12-02 11:10:39 -08:00
rkapka
15d683c78f Merge branch 'capella' into reconstruct-capella-block 2022-12-02 16:43:56 +01:00
rkapka
bf6c8ced7d working 2022-12-02 16:37:24 +01:00
Potuz
78fb685027 Check BLS changes when requesting from the pool 2022-12-02 10:14:39 -03:00
terence tsao
a87536eba0 Fix minimal spec test 2022-12-01 15:26:18 -08:00
terence tsao
3f05395a00 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-12-01 14:52:49 -08:00
terence tsao
85fc57d41e Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-12-01 14:45:31 -08:00
terence tsao
1e5976d5ce Mainnet spec tests passing 2022-12-01 14:41:34 -08:00
Potuz
98c0b23350 broadcast BLS changes 2022-12-01 11:26:56 -03:00
terence tsao
039a0fffba Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2022-11-30 12:09:47 -08:00
Potuz
90ec640e7a Fix capella operations spectests 2022-11-30 15:35:55 -03:00
Potuz
10acd31d25 check on verify instead of sig 2022-11-30 15:20:23 -03:00
terence tsao
4224014fad Add 4844 block and state 2022-11-30 10:14:35 -08:00
Potuz
df1e8b33d8 BLS Change signature verification 2022-11-30 14:52:31 -03:00
rkapka
cdb4ee42cc not working 2022-11-30 18:22:52 +01:00
rkapka
d29baec77e proper error handling in BuildSignedBeaconBlockFromExecutionPayload 2022-11-30 15:32:57 +01:00
terence tsao
53c189da9b Merge branch 'update-eip4844-objs' into eip4844 2022-11-29 21:16:31 -08:00
terence tsao
277fbce61b Update eip4844 objects 2022-11-29 21:15:14 -08:00
terence tsao
0adc54b7ff Refactor get payload 2022-11-29 12:02:47 -08:00
Potuz
1cbd7e9888 withdraw by default 2022-11-29 13:52:34 -03:00
rkapka
0a9e1658dd move stuff to blinded_blocks.go 2022-11-29 17:36:52 +01:00
rkapka
31d4a4cd11 test other functions 2022-11-29 17:36:52 +01:00
rkapka
fbc4e73d31 refactor and test GetSSZBlockV2 2022-11-29 17:36:52 +01:00
rkapka
c1d4eaa79d refactor and test GetBlockV2 2022-11-29 17:36:52 +01:00
rkapka
760af6428e update ssz 2022-11-29 17:36:52 +01:00
terence tsao
dfa0ccf626 Fix attribute pb nil checks 2022-11-29 07:27:33 -08:00
terence tsao
7a142cf324 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-11-29 07:09:41 -08:00
rkapka
1a51fdbd58 update withdrawals proto 2022-11-29 14:23:11 +01:00
terencechain
368a99ec8d Fix nil attribute for capella branch (#11701) 2022-11-28 17:34:26 -08:00
terence tsao
1c7e734918 Fix some blockchain tests 2022-11-28 13:59:17 -08:00
rkapka
764d1325bf Merge remote-tracking branch 'origin/capella' into capella 2022-11-28 21:07:00 +01:00
rkapka
0cf30e9022 Merge branch '__develop' into capella 2022-11-28 21:06:20 +01:00
Potuz
227b20f368 fix nil block from stream 2022-11-28 16:53:11 -03:00
rkapka
d7d70bc25b support SSZ lol
(cherry picked from commit 52bc2c8d617ac3e1254c493fa053cdce4a1ebd63)
2022-11-28 20:19:24 +01:00
rkapka
82f6ddb693 add Capella version
(cherry picked from commit 5d6fd0bbe663e5dd16df5b2e773f68982bbcd24e)
2022-11-28 20:19:19 +01:00
rkapka
9e4e82d2c5 refactor GetBlindedBlockSSZ
(cherry picked from commit 97483c339f99b0d96bd81846a979383ffd2b0cda)

# Conflicts:
#	beacon-chain/rpc/eth/beacon/blocks.go
2022-11-28 20:19:15 +01:00
rkapka
9838369fe9 fix proto generation 2022-11-28 20:15:05 +01:00
rkapka
6085ad1bfa fix issues 2022-11-28 20:07:37 +01:00
rkapka
d3851b27df Merge branch '__develop' into capella
# Conflicts:
#	beacon-chain/rpc/apimiddleware/structs.go
#	beacon-chain/rpc/eth/beacon/blocks.go
#	proto/eth/v2/BUILD.bazel
#	proto/eth/v2/beacon_block.pb.go
#	proto/eth/v2/beacon_block.proto
#	proto/eth/v2/generated.ssz.go
#	proto/migration/v1alpha1_to_v2.go
#	proto/prysm/v1alpha1/beacon_chain.pb.go
#	proto/prysm/v1alpha1/beacon_chain.proto
2022-11-28 19:31:33 +01:00
Potuz
d6100dfdcb fix spectest 2022-11-28 12:38:14 -03:00
Potuz
c2144dac86 Add BLSToExecutionChangge endpoint 2022-11-27 23:00:16 -03:00
Potuz
a47ff569a8 Add Submit BLSChange endpoint 2022-11-27 20:50:21 -03:00
Potuz
f8be022ef2 Merge branch 'develop' into capella 2022-11-27 20:40:41 -03:00
Potuz
4f39e6b685 Implement REST block API endpoints 2022-11-27 16:20:30 -03:00
Potuz
c67b000633 add test 2022-11-27 13:07:50 -03:00
Potuz
02f7443586 Refactor Sync Committee Rewards 2022-11-27 09:04:03 -03:00
terence tsao
6275e7df4e Clean up execution engine 2022-11-25 17:20:28 -08:00
terencechain
1b6b52fda1 Add PayloadAttribute superset and use it for engine-api (#11691) 2022-11-25 17:12:55 -08:00
Potuz
5fa1fd84b9 Hook the BLSTOExecution Pool to the proposer 2022-11-25 09:33:17 -03:00
nisdas
bd0c9f9e8d fix 2022-11-25 09:06:59 -03:00
Potuz
2532bb370c Merge branch 'develop' into capella 2022-11-25 08:16:32 -03:00
nisdas
12efc6c2c1 make it reject 2022-11-24 22:21:12 +08:00
nisdas
a6cc9ac9c5 add sig validation 2022-11-24 22:19:55 +08:00
nisdas
031f5845a2 add gossip handler for bls change object 2022-11-24 21:22:18 +08:00
nisdas
b88559726c Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into capella 2022-11-24 20:05:41 +08:00
terence tsao
ca6ddf4490 Add and use send blocks and sidecars requests 2022-11-23 16:11:06 -08:00
terence tsao
3ebb2fce94 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-11-23 14:21:45 -08:00
nisdas
62f6b07cba fix gossip registration 2022-11-23 20:10:44 +08:00
terence tsao
f956f1ed6e Handle capella version for packing atts 2022-11-22 17:21:21 -08:00
terence tsao
1c0fa95053 Fix forkchoice test 2022-11-22 17:09:34 -08:00
terence tsao
04bf4a1060 Clean up 2022-11-22 15:38:24 -08:00
terence tsao
ae276fd371 Add mainnet spec tests 2022-11-22 10:53:47 -08:00
terence tsao
104bdaed12 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-11-22 09:43:43 -08:00
terence tsao
089a5d6ac2 Migrate from geth's kzg lib to go-kzg/eth (Thanks @roberto-bayardo) 2022-11-22 08:51:45 -08:00
Potuz
16b0820193 Merge branch 'develop' into capella 2022-11-22 13:43:03 -03:00
Potuz
4b02267e96 add more minimal fixes 2022-11-22 13:34:33 -03:00
Potuz
746584c453 fix missing minimal test 2022-11-22 13:34:33 -03:00
Potuz
b56daaaca2 Fix empty withdrawals slice 2022-11-22 10:38:42 -03:00
terence tsao
b7a6fe88ee Update block and sidecar gossip conditions 2022-11-21 14:38:44 -08:00
terence tsao
22d1c37b92 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-20 19:22:30 -08:00
terence tsao
78a393f825 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-18 13:48:40 -08:00
terence tsao
ac8290c1bf Port over shared kzg functions and updated trusted setup 2022-11-18 10:55:35 -08:00
terence tsao
5d0662b415 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-18 10:00:32 -08:00
terence tsao
931e5e10c3 Fix mainnet fork transition tests 2022-11-16 08:49:06 -05:00
Potuz
c172f838b1 Mark capella fields as dirty 2022-11-15 09:58:34 -05:00
Potuz
c07ae29cd9 move MaxWithdrawalsPerPayload to fieldparams 2022-11-14 22:59:31 -05:00
Potuz
214c9bfd8b fix bls_to_execution_changes 2022-11-14 16:41:17 -05:00
Potuz
716140d64d add bls_to_execution_change tests 2022-11-14 16:26:39 -05:00
Potuz
088cb4ef59 fix expected_withdrawals 2022-11-14 14:50:41 -05:00
terence tsao
fa33e93a8e Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-14 08:53:15 -05:00
Potuz
d1472fc351 Add withdrawals operations tests 2022-11-13 08:54:59 -03:00
terence tsao
5c8c0c31d8 Merge branch 'capella-withdrawal-minimal' into capella 2022-11-12 22:06:43 -08:00
terence tsao
7f3c00c7a2 Can build 2022-11-12 22:06:22 -08:00
terencechain
c180dab791 Merge branch 'develop' into capella 2022-11-12 18:28:18 -08:00
terence tsao
f24acc21c7 Fix bazel 2022-11-12 17:43:19 -08:00
terence tsao
40b637849d Fix miminal capella tests 2022-11-12 17:26:05 -08:00
terence tsao
e7db1685df Add mainnet capella tests 2022-11-12 17:13:26 -08:00
terence tsao
eccbfd1011 Add shared capella spec tests helpers 2022-11-12 17:13:16 -08:00
terence tsao
90211f6769 Fix prev epoch attested precompute 2022-11-12 17:12:29 -08:00
terence tsao
edc32ac18e Fix slashing quotient 2022-11-12 17:12:04 -08:00
terence tsao
fe68e020e3 Add selector with minimal withdrawal size 2022-11-12 16:18:16 -08:00
terence tsao
81e1e3544d Add mainnet ssz vectors 2022-11-12 15:50:07 -08:00
Potuz
09372d5c35 Revert "added mainnet ssz tests"
This reverts commit 078a89e4ca.
2022-11-12 18:40:28 -03:00
Potuz
078a89e4ca added mainnet ssz tests 2022-11-12 18:39:11 -03:00
Potuz
dbc6ae26a6 Add minimal support for capella spec tests
Fixed many issues about hashing
Added fork typing in the state replayer
2022-11-12 18:18:35 -03:00
Potuz
b6f429867a Merge branch 'develop' into capella 2022-11-12 16:35:20 -03:00
Potuz
09f50660ce Merge branch 'develop' into capella 2022-11-12 11:51:06 -03:00
Potuz
189825b495 fix withdrawal hashing 2022-11-11 23:21:41 -03:00
terence tsao
441cad58d4 Merge commit 'e03de47db7b782bdf7dc8d9b42749eb2a236cdea' into eip4844 2022-11-11 09:18:57 -08:00
terence tsao
1277d08f9e Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-11 08:46:39 -08:00
terence tsao
e03de47db7 Ops, wrong branch 2022-11-11 08:45:22 -08:00
Potuz
764b7ff610 Don't build capella payload twice 2022-11-11 10:47:29 -03:00
terence tsao
307be7694e Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-09 16:44:34 -08:00
terence tsao
c76ae1ef39 Add beacon_block_and_blobs_sidecar gossip 2022-11-09 16:43:26 -08:00
Potuz
d499db7f0e Merge branch 'develop' into capella 2022-11-09 21:27:18 -03:00
terence tsao
a894b9f29a Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-09 14:49:18 -08:00
terence tsao
902e6b3905 Update to use latest kzg library 2022-11-09 10:17:27 -08:00
Potuz
ed2d1c7bf9 Merge branch 'develop' into capella 2022-11-09 09:14:55 -03:00
nisdas
14b73cbd47 register flag 2022-11-09 08:48:36 +08:00
terence tsao
a39c7aa864 Add rpc blobs sidecars by range 2022-11-07 14:53:12 -08:00
terence tsao
170bc9c8ec Network changes 2022-11-07 14:43:46 -08:00
terence tsao
365c01fc29 Add tests 2022-11-07 06:58:24 -08:00
Potuz
3124785a08 Merge branch 'develop' into capella 2022-11-07 10:08:09 -03:00
Potuz
60e6306107 working withrawals initial commits 2022-11-06 16:53:57 -03:00
terence tsao
42ccb7830a Add Capella DB changes 2022-11-06 15:25:34 -03:00
Potuz
0bb03b9292 fix marshalling and engine calls 2022-11-06 15:24:22 -03:00
nisdas
ed6fbf1480 stupid bug 2022-11-07 00:18:01 +08:00
nisdas
477cec6021 wei it 2022-11-07 00:13:12 +08:00
nisdas
924500d111 add unmarshal 2022-11-06 23:57:37 +08:00
Potuz
0677504ef1 Revert "proposer changes"
This reverts commit ca2a7c4d9c.
2022-11-06 12:16:53 -03:00
Potuz
ca2a7c4d9c proposer changes 2022-11-06 12:14:17 -03:00
Potuz
28606629ad marshalling stub 2022-11-06 12:03:25 -03:00
Potuz
c817279464 fix capella payload 2022-11-06 11:14:39 -03:00
Potuz
009d6ed8ed proposer logic 2022-11-06 10:49:32 -03:00
Potuz
5cec1282a9 FCU two versions 2022-11-06 10:22:45 -03:00
Potuz
340170fd29 propose block V2 2022-11-06 09:42:20 -03:00
Potuz
7ed0cc139a marshalling first attempt 2022-11-06 07:47:43 -03:00
Potuz
2c822213eb rpc changes 2022-11-06 00:24:56 -03:00
terence tsao
0894b9591c Add Capella DB changes 2022-11-05 13:27:18 -07:00
terence tsao
f0ca45f9a2 Json marshal and unmarshal blob bundle 2022-11-05 12:45:14 -07:00
terence tsao
afc48c6485 State change and config changes for Capella 2022-11-05 12:45:01 -07:00
terence tsao
93dce8a0cb P2p changes for Capella fork 2022-11-05 12:44:46 -07:00
terence tsao
149ccdaf39 Add engine call for get payload 2022-11-05 11:00:13 -07:00
Potuz
c08bb39ffe add fork versions 2022-11-05 13:38:11 -03:00
Potuz
5083d8ab34 propose capella blocks 2022-11-05 12:38:27 -03:00
Potuz
7552a5dd07 capella fork logic 2022-11-05 07:33:20 -03:00
Potuz
c93d68f853 Capella state transition 2022-11-05 06:06:15 -03:00
terence tsao
2b74db2dce Add blob database methods 2022-11-04 13:46:01 -07:00
terence tsao
cc6c91415d Add validate blobs sidecar 2022-11-04 13:06:15 -07:00
terence tsao
6d7d7e0adc Add excessive blobs to execution payload 2022-11-04 09:48:39 -07:00
terence tsao
2105d777f0 Add blobs kzg to Capella beacon block 2022-11-04 09:33:01 -07:00
terence tsao
14338afbdb Update go.mod 2022-11-04 08:52:16 -07:00
Potuz
3e8aa4023d Fix config test and export method 2022-11-04 11:59:49 -03:00
Potuz
b443875e66 Implement get_expected_withdrawals 2022-11-04 11:45:45 -03:00
298 changed files with 27079 additions and 5497 deletions

View File

@@ -45,6 +45,7 @@ go_library(
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/transition/interop:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/kv:go_default_library",

View File

@@ -301,7 +301,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var attr payloadattribute.Attributer
switch st.Version() {
case version.Capella:
case version.Capella, version.Deneb:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")

View File

@@ -60,7 +60,20 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
log = log.WithField("txCount", len(txs))
txsPerSlotCount.Set(float64(len(txs)))
}
if b.Version() == version.Deneb {
k, err := b.Body().BlobKzgCommitments()
if err != nil {
return err
}
log = log.WithField("blobCount", len(k))
}
}
if b.Version() == version.Deneb {
k, err := b.Body().BlobKzgCommitments()
if err != nil {
return err
}
log = log.WithField("blobCount", len(k))
}
log.Info("Finished applying state transition")
return nil
@@ -96,6 +109,7 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"version": version.String(block.Version()),
}).Info("Synced new block")
}
return nil

View File

@@ -68,6 +68,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.BeaconBloc
// during initial syncing. There's no risk given a state summary object is just a
// a subset of the block object.
if !s.cfg.BeaconDB.HasStateSummary(ctx, parentRoot) && !s.cfg.BeaconDB.HasBlock(ctx, parentRoot) {
log.Errorf("requesting blockroot %#x", bytesutil.Trunc(parentRoot[:]))
return errors.New("could not reconstruct parent state")
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
@@ -48,6 +49,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not process block")
tracing.AnnotateError(span, err)
interop.WriteBlockToDisk("state_transition", blockCopy, true /*failed*/)
return err
}

View File

@@ -6,6 +6,7 @@ go_library(
"active_balance.go",
"active_balance_disabled.go", # keep
"attestation_data.go",
"blobs.go",
"checkpoint_state.go",
"committee.go",
"committee_disabled.go", # keep
@@ -40,6 +41,7 @@ go_library(
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",

51
beacon-chain/cache/blobs.go vendored Normal file
View File

@@ -0,0 +1,51 @@
package cache
import (
"sync"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
)
var (
// blobsCacheMiss tracks the number of blobs requests that aren't present in the cache.
blobsCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "total_blobs_cache_miss",
Help: "The number of get requests that aren't present in the cache.",
})
errNoSidecar = errors.New("no sidecar")
)
// BlobsCache caches the blobs for the proposer through life cycle.
type BlobsCache struct {
cache map[types.Slot][]*v1.Blob
sync.Mutex
}
func NewBlobsCache() *BlobsCache {
return &BlobsCache{
cache: map[types.Slot][]*v1.Blob{},
}
}
func (b *BlobsCache) Get(slot types.Slot) ([]*v1.Blob, error) {
b.Lock()
defer b.Unlock()
sc, ok := b.cache[slot]
if !ok {
blobsCacheMiss.Inc()
return nil, errNoSidecar
}
delete(b.cache, slot)
return sc, nil
}
func (b *BlobsCache) Put(slot types.Slot, blobs []*v1.Blob) {
b.Lock()
defer b.Unlock()
b.cache[slot] = blobs
}

View File

@@ -133,6 +133,7 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.
return err
}
if !bytes.Equal(payload.ParentHash(), header.BlockHash()) {
log.Errorf("parent Hash %#x, header Hash: %#x", bytesutil.Trunc(payload.ParentHash()), bytesutil.Trunc(header.BlockHash()))
return ErrInvalidPayloadBlockHash
}
return nil

View File

@@ -9,6 +9,110 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
)
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
epoch := time.CurrentEpoch(state)
currentSyncCommittee, err := state.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := state.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := state.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := state.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := state.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := state.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
txRoot, err := payloadHeader.TransactionsRoot()
if err != nil {
return nil, err
}
wdRoot, err := payloadHeader.WithdrawalsRoot()
if err != nil {
return nil, err
}
wi, err := state.NextWithdrawalIndex()
if err != nil {
return nil, err
}
vi, err := state.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
summarires, err := state.HistoricalSummaries()
if err != nil {
return nil, err
}
s := &ethpb.BeaconStateDeneb{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().DenebForkVersion,
Epoch: epoch,
},
LatestBlockHeader: state.LatestBlockHeader(),
BlockRoots: state.BlockRoots(),
StateRoots: state.StateRoots(),
HistoricalRoots: [][]byte{},
Eth1Data: state.Eth1Data(),
Eth1DataVotes: state.Eth1DataVotes(),
Eth1DepositIndex: state.Eth1DepositIndex(),
Validators: state.Validators(),
Balances: state.Balances(),
RandaoMixes: state.RandaoMixes(),
Slashings: state.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: state.JustificationBits(),
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: state.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
LogsBloom: payloadHeader.LogsBloom(),
PrevRandao: payloadHeader.PrevRandao(),
BlockNumber: payloadHeader.BlockNumber(),
GasLimit: payloadHeader.GasLimit(),
GasUsed: payloadHeader.GasUsed(),
Timestamp: payloadHeader.Timestamp(),
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
ExcessDataGas: make([]byte, 32),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summarires,
}
return state_native.InitializeFromProtoUnsafeDeneb(s)
}
// UpgradeToCapella updates a generic state to return the version Capella state.
func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
epoch := time.CurrentEpoch(state)

View File

@@ -353,7 +353,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
}
// ProcessHistoricalDataUpdate processes the updates to historical data during epoch processing.
// From Capella onward, per spec,state's historical summaries are updated instead of historical roots.
// From Capella onward, per spec, state's historical summaries are updated instead of historical roots.
func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, error) {
currentEpoch := time.CurrentEpoch(state)
nextEpoch := currentEpoch + 1

View File

@@ -81,6 +81,15 @@ func CanUpgradeToCapella(slot types.Slot) bool {
return epochStart && capellaEpoch
}
// CanUpgradeToDeneb returns true if the input `slot` can upgrade to Deneb.
// Spec code:
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == EIP4844_FORK_EPOCH
func CanUpgradeToDeneb(slot types.Slot) bool {
epochStart := slots.IsEpochStart(slot)
DenebEpoch := slots.ToEpoch(slot) == params.BeaconConfig().DenebForkEpoch
return epochStart && DenebEpoch
}
// CanProcessEpoch checks the eligibility to process epoch.
// The epoch can be processed at the end of the last slot of every epoch.
//

View File

@@ -23,7 +23,6 @@ go_library(
"//beacon-chain/core/execution:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition/interop:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -46,6 +45,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_protolambda_go_kzg//eth:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],

View File

@@ -17,6 +17,7 @@ go_library(
"//config/features:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//io/file:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -8,15 +8,16 @@ import (
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/io/file"
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
)
// WriteBlockToDisk as a block ssz. Writes to temp directory. Debug!
func WriteBlockToDisk(block interfaces.SignedBeaconBlock, failed bool) {
func WriteBlockToDisk(prefix string, block interfaces.SignedBeaconBlock, failed bool) {
if !features.Get().WriteSSZStateTransitions {
return
}
filename := fmt.Sprintf("beacon_block_%d.ssz", block.Block().Slot())
filename := fmt.Sprintf(prefix+"_beacon_block_%d.ssz", block.Block().Slot())
if failed {
filename = "failed_" + filename
}
@@ -31,3 +32,21 @@ func WriteBlockToDisk(block interfaces.SignedBeaconBlock, failed bool) {
log.WithError(err).Error("Failed to write to disk")
}
}
func WriteBadBlobsToDisk(prefix string, sideCar *eth.BlobsSidecar) {
if !features.Get().WriteSSZStateTransitions {
return
}
filename := fmt.Sprintf(prefix+"_blobs_%d.ssz", sideCar.BeaconBlockSlot)
fp := path.Join(os.TempDir(), filename)
log.Warnf("Writing blobs to disk at %s", fp)
enc, err := sideCar.MarshalSSZ()
if err != nil {
log.WithError(err).Error("Failed to ssz encode blobs")
return
}
if err := file.WriteFile(fp, enc); err != nil {
log.WithError(err).Error("Failed to write to disk")
}
}

View File

@@ -302,6 +302,14 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
return nil, err
}
}
if time.CanUpgradeToDeneb(state.Slot()) {
state, err = capella.UpgradeToDeneb(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
}
if highestSlot < state.Slot() {

View File

@@ -6,14 +6,15 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/protolambda/go-kzg/eth"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
b "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
v "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"go.opencensus.io/trace"
@@ -57,9 +58,6 @@ func ExecuteStateTransitionNoVerifyAnySig(
defer span.End()
var err error
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
interop.WriteStateToDisk(st)
parentRoot := signed.Block().ParentRoot()
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
if err != nil {
@@ -256,7 +254,7 @@ func ProcessOperationsNoVerifyAttsSigs(
if err != nil {
return nil, err
}
case version.Altair, version.Bellatrix, version.Capella:
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
state, err = altairOperations(ctx, state, signedBeaconBlock)
if err != nil {
return nil, err
@@ -356,9 +354,48 @@ func ProcessBlockForStateRoot(
return nil, errors.Wrap(err, "process_sync_aggregate failed")
}
if signed.Block().Version() == version.Deneb {
err := ValidateBlobKzgs(ctx, signed.Block().Body())
if err != nil {
return nil, errors.Wrap(err, "could not validate blob kzgs")
}
}
return state, nil
}
// ValidateBlobKzgs validates the blob kzgs in the beacon block.
//
// Spec code:
// def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody):
//
// assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments)
func ValidateBlobKzgs(ctx context.Context, body interfaces.BeaconBlockBody) error {
_, span := trace.StartSpan(ctx, "core.state.ValidateBlobKzgs")
defer span.End()
payload, err := body.Execution()
if err != nil {
return errors.Wrap(err, "could not get execution payload from block")
}
blkKzgs, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "could not get blob kzg commitments from block")
}
kzgs := make(eth.KZGCommitmentSequenceImpl, len(blkKzgs))
for i := range blkKzgs {
kzgs[i] = bytesutil.ToBytes48(blkKzgs[i])
}
txs, err := payload.Transactions()
if err != nil {
return errors.Wrap(err, "could not get transactions from payload")
}
if err := eth.VerifyKZGCommitmentsAgainstTransactions(txs, kzgs); err != nil {
return err
}
return nil
}
// This calls altair block operations.
func altairOperations(
ctx context.Context,

View File

@@ -54,6 +54,9 @@ type ReadOnlyDatabase interface {
// Fee recipients operations.
FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error)
RegistrationByValidatorID(ctx context.Context, id types.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
// Blobs related methods.
BlobsSidecar(ctx context.Context, blockRoot [32]byte) (*ethpb.BlobsSidecar, error)
BlobsSidecarsBySlot(ctx context.Context, slot types.Slot) ([]*ethpb.BlobsSidecar, error)
// origin checkpoint sync support
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
@@ -68,6 +71,9 @@ type NoHeadAccessDatabase interface {
SaveBlock(ctx context.Context, block interfaces.SignedBeaconBlock) error
SaveBlocks(ctx context.Context, blocks []interfaces.SignedBeaconBlock) error
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
// Blob related methods.
SaveBlobsSidecar(ctx context.Context, blob *ethpb.BlobsSidecar) error
DeleteBlobsSidecar(ctx context.Context, blockRoot [32]byte) error
// State related methods.
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
SaveStates(ctx context.Context, states []state.ReadOnlyBeaconState, blockRoots [][32]byte) error

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"archived_point.go",
"backup.go",
"blobs.go",
"blocks.go",
"checkpoint.go",
"deposit_contract.go",

172
beacon-chain/db/kv/blobs.go Normal file
View File

@@ -0,0 +1,172 @@
package kv
import (
"bytes"
"context"
"fmt"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
const blobSidecarKeyLength = 48 // slot_to_rotating_buffer(blob.slot) ++ blob.slot ++ blob.block_root
// SaveBlobsSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
//
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
//
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
//
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
// in the rotating keys buffer, we overwrite all elements for that slot.
//
// firstElemKey = getFirstElement(bucket)
// shouldOverwrite = blob.slot > bytes_to_slot(firstElemKey[8:16])
// if shouldOverwrite:
// for existingKey := seek prefix bytes(slot_to_rotating_buffer(blob.slot))
// bucket.delete(existingKey)
// bucket.put(key, blob)
func (s *Store) SaveBlobsSidecar(ctx context.Context, blobSidecar *ethpb.BlobsSidecar) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobsSidecar")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
encodedBlobSidecar, err := encode(ctx, blobSidecar)
if err != nil {
return err
}
bkt := tx.Bucket(blobsBucket)
c := bkt.Cursor()
key := blobSidecarKey(blobSidecar)
rotatingBufferPrefix := key[0:8]
var firstElementKey []byte
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
if len(k) != 0 {
firstElementKey = k
break
}
}
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
// store the blob by key and exit early.
if len(firstElementKey) == 0 {
return bkt.Put(key, encodedBlobSidecar)
} else if len(firstElementKey) != len(key) {
return fmt.Errorf(
"key length %d (%#x) != existing key length %d (%#x)",
len(key),
key,
len(firstElementKey),
firstElementKey,
)
}
slotOfFirstElement := firstElementKey[8:16]
// If we should overwrite old blobs at the spot in the rotating buffer, we clear data at that spot.
shouldOverwrite := blobSidecar.BeaconBlockSlot > bytesutil.BytesToSlotBigEndian(slotOfFirstElement)
if shouldOverwrite {
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
if err := bkt.Delete(k); err != nil {
log.WithError(err).Warnf("Could not delete blob with key %#x", k)
}
}
}
return bkt.Put(key, encodedBlobSidecar)
})
}
// BlobsSidecar retrieves the blobs given a beacon block root.
func (s *Store) BlobsSidecar(ctx context.Context, beaconBlockRoot [32]byte) (*ethpb.BlobsSidecar, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobsSidecar")
defer span.End()
var enc []byte
if err := s.db.View(func(tx *bolt.Tx) error {
c := tx.Bucket(blobsBucket).Cursor()
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
for k, v := c.First(); k != nil; k, v = c.Next() {
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
enc = v
break
}
}
return nil
}); err != nil {
return nil, err
}
if len(enc) == 0 {
return nil, nil
}
blob := &ethpb.BlobsSidecar{}
if err := decode(ctx, enc, blob); err != nil {
return nil, err
}
return blob, nil
}
// BlobsSidecarsBySlot retrieves sidecars from a slot.
func (s *Store) BlobsSidecarsBySlot(ctx context.Context, slot types.Slot) ([]*ethpb.BlobsSidecar, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobsSidecarsBySlot")
defer span.End()
encodedItems := make([][]byte, 0)
if err := s.db.View(func(tx *bolt.Tx) error {
c := tx.Bucket(blobsBucket).Cursor()
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
for k, v := c.First(); k != nil; k, v = c.Next() {
if len(k) != blobSidecarKeyLength {
continue
}
slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16])
if slotInKey == slot {
encodedItems = append(encodedItems, v)
}
}
return nil
}); err != nil {
return nil, err
}
sidecars := make([]*ethpb.BlobsSidecar, len(encodedItems))
if len(encodedItems) == 0 {
return sidecars, nil
}
for i, enc := range encodedItems {
blob := &ethpb.BlobsSidecar{}
if err := decode(ctx, enc, blob); err != nil {
return nil, err
}
sidecars[i] = blob
}
return sidecars, nil
}
// DeleteBlobsSidecar returns true if the blobs are in the db.
func (s *Store) DeleteBlobsSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobsSidecar")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blobsBucket)
c := bkt.Cursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
if err := bkt.Delete(k); err != nil {
return err
}
}
}
return nil
})
}
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
func blobSidecarKey(blob *ethpb.BlobsSidecar) []byte {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
slotInRotatingBuffer := blob.BeaconBlockSlot.ModSlot(maxSlotsToPersistBlobs)
key := bytesutil.SlotToBytesBigEndian(slotInRotatingBuffer)
key = append(key, bytesutil.SlotToBytesBigEndian(blob.BeaconBlockSlot)...)
key = append(key, blob.BeaconBlockRoot...)
return key
}

View File

@@ -241,6 +241,10 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
return err
}
if err := s.DeleteBlobsSidecar(ctx, root); err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if b := bkt.Get(root[:]); b != nil {
@@ -799,6 +803,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
}
case hasDenebKey(enc):
rawBlock = &ethpb.SignedBeaconBlockDeneb{}
if err := rawBlock.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
return nil, err
}
case hasDenebBlindKey(enc):
rawBlock = &ethpb.SignedBlindedBeaconBlockDeneb{}
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
}
default:
// Marshal block bytes to phase 0 beacon block.
rawBlock = &ethpb.SignedBeaconBlock{}
@@ -838,6 +852,11 @@ func marshalBlock(_ context.Context, blk interfaces.SignedBeaconBlock) ([]byte,
}
}
switch blockToSave.Version() {
case version.Deneb:
if blockToSave.IsBlinded() {
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
}
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
case version.Capella:
if blockToSave.IsBlinded() {
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil

View File

@@ -37,3 +37,17 @@ func hasCapellaBlindKey(enc []byte) bool {
}
return bytes.Equal(enc[:len(capellaBlindKey)], capellaBlindKey)
}
func hasDenebKey(enc []byte) bool {
if len(denebKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(denebKey)], denebKey)
}
func hasDenebBlindKey(enc []byte) bool {
if len(denebBlindKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
}

View File

@@ -128,6 +128,8 @@ var Buckets = [][]byte{
feeRecipientBucket,
registrationBucket,
blobsBucket,
}
// NewKVStore initializes a new boltDB key-value store at the directory

View File

@@ -46,6 +46,7 @@ var (
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
blobsBucket = []byte("blobs")
// Below keys are used to identify objects are to be fork compatible.
// Objects that are only compatible with specific forks should be prefixed with such keys.
@@ -54,6 +55,8 @@ var (
bellatrixBlindKey = []byte("blind-bellatrix")
capellaKey = []byte("capella")
capellaBlindKey = []byte("blind-capella")
denebKey = []byte("deneb")
denebBlindKey = []byte("blind-deneb")
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")

View File

@@ -473,6 +473,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
}
switch {
case hasDenebKey(enc):
protoState := &ethpb.BeaconStateDeneb{}
if err := protoState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal encoding for Deneb")
}
ok, err := s.isStateValidatorMigrationOver()
if err != nil {
return nil, err
}
if ok {
protoState.Validators = validatorEntries
}
return statenative.InitializeFromProtoUnsafeDeneb(protoState)
case hasCapellaKey(enc):
// Marshal state bytes to capella beacon state.
protoState := &ethpb.BeaconStateCapella{}
@@ -580,6 +593,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
case *ethpb.BeaconStateDeneb:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
if !ok {
return nil, errors.New("non valid inner state")
}
if rState == nil {
return nil, errors.New("nil state")
}
rawObj, err := rState.MarshalSSZ()
if err != nil {
return nil, err
}
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
default:
return nil, errors.New("invalid inner state")
}

View File

@@ -34,6 +34,7 @@ const (
NewPayloadMethod = "engine_newPayloadV1"
// NewPayloadMethodV2 v2 request string for JSON-RPC.
NewPayloadMethodV2 = "engine_newPayloadV2"
NewPayloadMethodV3 = "engine_newPayloadV3"
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
@@ -42,6 +43,9 @@ const (
GetPayloadMethod = "engine_getPayloadV1"
// GetPayloadMethodV2 v2 request string for JSON-RPC.
GetPayloadMethodV2 = "engine_getPayloadV2"
GetPayloadMethodV3 = "engine_getPayloadV3"
// GetBlobsBundleMethod v1 request string for JSON-RPC.
GetBlobsBundleMethod = "engine_getBlobsBundleV1"
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
// ExecutionBlockByHashMethod request string for JSON-RPC.
@@ -84,6 +88,7 @@ type EngineCaller interface {
) error
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error)
}
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
@@ -121,6 +126,15 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
if err != nil {
return nil, handleRPCError(err)
}
case *pb.ExecutionPayloadDeneb:
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadDeneb)
if !ok {
return nil, errors.New("execution data must be a Deneb execution payload")
}
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb)
if err != nil {
return nil, handleRPCError(err)
}
default:
return nil, errors.New("unknown execution data type")
}
@@ -168,7 +182,7 @@ func (s *Service) ForkchoiceUpdated(
if err != nil {
return nil, nil, handleRPCError(err)
}
case version.Capella:
case version.Capella, version.Deneb:
a, err := attrs.PbV2()
if err != nil {
return nil, nil, err
@@ -210,6 +224,15 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot prysmT
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
result := &pb.ExecutionPayloadDeneb{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, handleRPCError(err)
}
return blocks.WrappedExecutionPayloadDeneb(result)
}
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
result := &pb.ExecutionPayloadCapella{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
@@ -419,6 +442,19 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
return execBlks, nil
}
// GetBlobsBundle calls the engine_getBlobsV1 method via JSON-RPC.
func (s *Service) GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsBundle")
defer span.End()
d := time.Now().Add(defaultEngineTimeout)
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
result := &pb.BlobsBundle{}
err := s.rpcClient.CallContext(ctx, result, GetBlobsBundleMethod, pb.PayloadIDBytes(payloadId))
return result, handleRPCError(err)
}
// HeaderByHash returns the relevant header details for the provided block hash.
func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.HeaderInfo, error) {
var hdr *types.HeaderInfo

View File

@@ -148,7 +148,7 @@ func TestClient_IPC(t *testing.T) {
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
require.Equal(t, true, ok)
arg := common.BytesToHash([]byte("foo"))
resp, err := srv.ExecutionBlockByHash(ctx, arg, true /* with txs */)
resp, err := srv.ExecutionBlockByHash(ctx, version.Bellatrix, arg, true /* with txs */)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
@@ -621,7 +621,7 @@ func TestClient_HTTP(t *testing.T) {
service.rpcClient = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, err := service.ExecutionBlockByHash(ctx, arg, true /* with txs */)
resp, err := service.ExecutionBlockByHash(ctx, version.Bellatrix, arg, true /* with txs */)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})

View File

@@ -479,7 +479,7 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
func (s *Service) retrieveBlockHashAndTime(ctx context.Context, blkNum *big.Int) ([32]byte, uint64, error) {
bHash, err := s.BlockHashByHeight(ctx, blkNum)
if err != nil {
return [32]byte{}, 0, errors.Wrap(err, "could not get eth1 block hash")
return [32]byte{}, 0, nil
}
if bHash == [32]byte{} {
return [32]byte{}, 0, errors.Wrap(err, "got empty block hash")

View File

@@ -3,6 +3,7 @@ package execution
import (
"context"
"fmt"
"net/http"
"net/url"
"strings"
"time"
@@ -14,7 +15,6 @@ import (
contracts "github.com/prysmaticlabs/prysm/v3/contracts/deposit"
"github.com/prysmaticlabs/prysm/v3/io/logs"
"github.com/prysmaticlabs/prysm/v3/network"
"github.com/prysmaticlabs/prysm/v3/network/authorization"
)
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
@@ -34,7 +34,7 @@ func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpo
}
s.depositContractCaller = depositContractCaller
// Ensure we have the correct chain and deposit IDs.
//Ensure we have the correct chain and deposit IDs.
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
client.Close()
errStr := err.Error()
@@ -113,9 +113,21 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
if err != nil {
return nil, err
}
headers := http.Header{}
for _, h := range s.cfg.headers {
if h == "" {
continue
}
keyValue := strings.Split(h, "=")
if len(keyValue) < 2 {
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
continue
}
headers.Set(keyValue[0], strings.Join(keyValue[1:], "="))
}
switch u.Scheme {
case "http", "https":
client, err = gethRPC.DialHTTPWithClient(endpoint.Url, endpoint.HttpClient())
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()), gethRPC.WithHeaders(headers))
if err != nil {
return nil, err
}
@@ -127,13 +139,6 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
default:
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
}
if endpoint.Auth.Method != authorization.None {
header, err := endpoint.Auth.ToHeaderValue()
if err != nil {
return nil, err
}
client.SetHeader("Authorization", header)
}
for _, h := range s.cfg.headers {
if h != "" {
keyValue := strings.Split(h, "=")

View File

@@ -38,6 +38,7 @@ type EngineClient struct {
TerminalBlockHash []byte
TerminalBlockHashExists bool
OverrideValidHash [32]byte
BlobsBundle *pb.BlobsBundle
}
// NewPayload --
@@ -171,3 +172,8 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime
blk = parentBlk
}
}
// GetBlobsBundle --
func (e *EngineClient) GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error) {
return e.BlobsBundle, nil
}

View File

@@ -17,7 +17,8 @@ func (s *Service) forkWatcher() {
currEpoch := slots.ToEpoch(currSlot)
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
currEpoch == params.BeaconConfig().CapellaForkEpoch {
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
currEpoch == params.BeaconConfig().DenebForkEpoch {
// If we are in the fork epoch, we update our enr with
// the updated fork digest. These repeatedly does
// this over the epoch, which might be slightly wasteful

View File

@@ -119,6 +119,9 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
return defaultProposerSlashingTopicParams(), nil
case strings.Contains(topic, GossipAttesterSlashingMessage):
return defaultAttesterSlashingTopicParams(), nil
case strings.Contains(topic, GossipBlockAndBlobsMessage):
// TODO(Deneb): Using the default block scoring. But this should be updated.
return defaultBlockTopicParams(), nil
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
return defaultBlsToExecutionChangeTopicParams(), nil
default:

View File

@@ -20,6 +20,7 @@ var gossipTopicMappings = map[string]proto.Message{
AggregateAndProofSubnetTopicFormat: &ethpb.SignedAggregateAttestationAndProof{},
SyncContributionAndProofSubnetTopicFormat: &ethpb.SignedContributionAndProof{},
SyncCommitteeSubnetTopicFormat: &ethpb.SyncCommitteeMessage{},
BlockAndBlobsSubnetTopicFormat: &ethpb.SignedBeaconBlockAndBlobsSidecar{},
BlsToExecutionChangeSubnetTopicFormat: &ethpb.SignedBLSToExecutionChange{},
}
@@ -27,6 +28,9 @@ var gossipTopicMappings = map[string]proto.Message{
// versioned by epoch.
func GossipTopicMappings(topic string, epoch types.Epoch) proto.Message {
if topic == BlockSubnetTopicFormat {
if epoch >= params.BeaconConfig().DenebForkEpoch {
return &ethpb.SignedBeaconBlockAndBlobsSidecar{}
}
if epoch >= params.BeaconConfig().CapellaForkEpoch {
return &ethpb.SignedBeaconBlockCapella{}
}

View File

@@ -57,12 +57,17 @@ func (s *Service) CanSubscribe(topic string) bool {
log.WithError(err).Error("Could not determine Capella fork digest")
return false
}
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Capella fork digest")
return false
}
switch parts[2] {
case fmt.Sprintf("%x", phase0ForkDigest):
case fmt.Sprintf("%x", altairForkDigest):
case fmt.Sprintf("%x", bellatrixForkDigest):
case fmt.Sprintf("%x", capellaForkDigest):
case fmt.Sprintf("%x", denebForkDigest):
default:
return false
}

View File

@@ -37,6 +37,12 @@ const PingMessageName = "/ping"
// MetadataMessageName specifies the name for the metadata message topic.
const MetadataMessageName = "/metadata"
// BlobsSidecarsByRangeMessageName specifies the name for the blobs sidecars by range message topic.
const BlobsSidecarsByRangeMessageName = "/blobs_sidecars_by_range"
// BeaconBlockAndBlobsSidecarByRootName is a topic for fetching beacon blocks and sidecar blobs by root.
const BeaconBlockAndBlobsSidecarByRootName = "/beacon_block_and_blobs_sidecar_by_root"
const (
// V1 RPC Topics
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
@@ -51,6 +57,10 @@ const (
RPCPingTopicV1 = protocolPrefix + PingMessageName + SchemaVersionV1
// RPCMetaDataTopicV1 defines the v1 topic for the metadata rpc method.
RPCMetaDataTopicV1 = protocolPrefix + MetadataMessageName + SchemaVersionV1
// RPCBlobsSidecarsByRangeTopicV1 defines the v1 topic for the blobs sidecars by range rpc method.
RPCBlobsSidecarsByRangeTopicV1 = protocolPrefix + BlobsSidecarsByRangeMessageName + SchemaVersionV1
// RPCBeaconBlockAndBlobsSidecarByRootTopicV1 defines the v1 topic for the beacon block and blobs sidecar by root rpc method.
RPCBeaconBlockAndBlobsSidecarByRootTopicV1 = protocolPrefix + BeaconBlockAndBlobsSidecarByRootName + SchemaVersionV1
// V2 RPC Topics
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
@@ -83,6 +93,9 @@ var RPCTopicMappings = map[string]interface{}{
// RPC Metadata Message
RPCMetaDataTopicV1: new(interface{}),
RPCMetaDataTopicV2: new(interface{}),
// RPC Blobs Sidecars By Range Message
RPCBlobsSidecarsByRangeTopicV1: new(pb.BlobsSidecarsByRangeRequest),
RPCBeaconBlockAndBlobsSidecarByRootTopicV1: new(p2ptypes.BeaconBlockByRootsReq),
}
// Maps all registered protocol prefixes.
@@ -93,12 +106,14 @@ var protocolMapping = map[string]bool{
// Maps all the protocol message names for the different rpc
// topics.
var messageMapping = map[string]bool{
StatusMessageName: true,
GoodbyeMessageName: true,
BeaconBlocksByRangeMessageName: true,
BeaconBlocksByRootsMessageName: true,
PingMessageName: true,
MetadataMessageName: true,
StatusMessageName: true,
GoodbyeMessageName: true,
BeaconBlocksByRangeMessageName: true,
BeaconBlocksByRootsMessageName: true,
PingMessageName: true,
MetadataMessageName: true,
BlobsSidecarsByRangeMessageName: true,
BeaconBlockAndBlobsSidecarByRootName: true,
}
// Maps all the RPC messages which are to updated in altair.
@@ -113,6 +128,17 @@ var versionMapping = map[string]bool{
SchemaVersionV2: true,
}
var PreAltairV1SchemaMapping = map[string]bool{
StatusMessageName: true,
GoodbyeMessageName: true,
BeaconBlocksByRangeMessageName: true,
BeaconBlocksByRootsMessageName: true,
PingMessageName: true,
MetadataMessageName: true,
BlobsSidecarsByRangeMessageName: false,
BeaconBlockAndBlobsSidecarByRootName: false,
}
// VerifyTopicMapping verifies that the topic and its accompanying
// message type is correct.
func VerifyTopicMapping(topic string, msg interface{}) error {

View File

@@ -26,6 +26,8 @@ const (
GossipAggregateAndProofMessage = "beacon_aggregate_and_proof"
// GossipContributionAndProofMessage is the name for the sync contribution and proof message type.
GossipContributionAndProofMessage = "sync_committee_contribution_and_proof"
// GossipBlockAndBlobsMessage is the name for the block and blobs sidecar message type. (Deneb)
GossipBlockAndBlobsMessage = "beacon_block_and_blobs_sidecar"
// GossipBlsToExecutionChangeMessage is the name for the bls to execution change message type.
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
@@ -47,6 +49,8 @@ const (
AggregateAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipAggregateAndProofMessage
// SyncContributionAndProofSubnetTopicFormat is the topic format for the sync aggregate and proof subnet.
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
// BlockAndBlobsSubnetTopicFormat is the topic format for the block and blobs.
BlockAndBlobsSubnetTopicFormat = GossipProtocolAndDigest + GossipBlockAndBlobsMessage
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
)

View File

@@ -53,6 +53,12 @@ func InitializeDataMaps() {
&ethpb.SignedBeaconBlockCapella{Block: &ethpb.BeaconBlockCapella{Body: &ethpb.BeaconBlockBodyCapella{}}},
)
},
// TODO(Deneb): Is this right?
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (interfaces.SignedBeaconBlock, error) {
return blocks.NewSignedBeaconBlock(
&ethpb.SignedBeaconBlockDeneb{Block: &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{}}},
)
},
}
// Reset our metadata map.
@@ -69,5 +75,8 @@ func InitializeDataMaps() {
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
},
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
},
}
}

View File

@@ -211,6 +211,14 @@ func handlePostSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.E
return true
}
func handleGetBlobsSidecarSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
fileName: "blobs_sidecar.ssz",
responseJson: &SszResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func sszRequested(req *http.Request) (bool, error) {
accept := req.Header.Values("Accept")
if len(accept) == 0 {

View File

@@ -503,6 +503,13 @@ type capellaBlockResponseJson struct {
Finalized bool `json:"finalized"`
}
type denebBlockResponseJson struct {
Version string `json:"version"`
Data *SignedBeaconBlockDenebContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type bellatrixBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *SignedBlindedBeaconBlockBellatrixContainerJson `json:"data"`
@@ -517,6 +524,12 @@ type capellaBlindedBlockResponseJson struct {
Finalized bool `json:"finalized"`
}
type denebBlindedBlockResponseJson struct {
Version string `json:"version"`
Data *SignedBlindedBeaconBlockDenebContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
respContainer, ok := response.(*BlockV2ResponseJson)
if !ok {
@@ -565,6 +578,16 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
ExecutionOptimistic: respContainer.ExecutionOptimistic,
Finalized: respContainer.Finalized,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
actualRespContainer = &denebBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBeaconBlockDenebContainerJson{
Message: respContainer.Data.DenebBlock,
Signature: respContainer.Data.Signature,
},
ExecutionOptimistic: respContainer.ExecutionOptimistic,
Finalized: respContainer.Finalized,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
}
@@ -624,6 +647,15 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
ExecutionOptimistic: respContainer.ExecutionOptimistic,
Finalized: respContainer.Finalized,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
actualRespContainer = &denebBlindedBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBlindedBeaconBlockDenebContainerJson{
Message: respContainer.Data.DenebBlock,
Signature: respContainer.Data.Signature,
},
ExecutionOptimistic: respContainer.ExecutionOptimistic,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
}
@@ -655,6 +687,11 @@ type capellaStateResponseJson struct {
Data *BeaconStateCapellaJson `json:"data"`
}
type denebStateResponseJson struct {
Version string `json:"version"`
Data *BeaconStateDenebJson `json:"data"`
}
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
respContainer, ok := response.(*BeaconStateV2ResponseJson)
if !ok {
@@ -683,6 +720,11 @@ func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, a
Version: respContainer.Version,
Data: respContainer.Data.CapellaState,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
actualRespContainer = &denebStateResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.DenebState,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
}
@@ -719,6 +761,40 @@ type bellatrixProduceBlindedBlockResponseJson struct {
Data *BlindedBeaconBlockBellatrixJson `json:"data"`
}
type tempBlobJson struct {
Data string `json:"data"`
}
type tempBlobsSidecarJson struct {
BeaconBlockRoot string `json:"beacon_block_root"`
BeaconBlockSlot string `json:"beacon_block_slot"`
Blobs []tempBlobJson `json:"blobs"`
AggregatedProof string `json:"kzg_aggregated_proof"`
}
// This takes the blobs list and exposes the data field of each blob as the blob content itself in the json
func prepareBlobsResponse(body []byte, responseContainer interface{}) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
tempContainer := &tempBlobsSidecarJson{}
if err := json.Unmarshal(body, tempContainer); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal response into temp container")
}
container, ok := responseContainer.(*BlobsSidecarResponseJson)
if !ok {
return false, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
}
container.Data = &BlobsSidecarJson{
BeaconBlockRoot: tempContainer.BeaconBlockRoot,
BeaconBlockSlot: tempContainer.BeaconBlockSlot,
Blobs: make([]string, len(tempContainer.Blobs)),
AggregatedProof: tempContainer.AggregatedProof,
}
for i, blob := range tempContainer.Blobs {
container.Data.Blobs[i] = blob.Data
}
return false, nil
}
type capellaProduceBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BlindedBeaconBlockCapellaJson `json:"data"`

View File

@@ -75,6 +75,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/validator/prepare_beacon_proposer",
"/eth/v1/validator/register_validator",
"/eth/v1/validator/liveness/{epoch}",
"/eth/v1/beacon/blobs_sidecars/{block_id}",
}
}
@@ -136,6 +137,12 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
OnPreSerializeMiddlewareResponseIntoJson: serializeV2Block,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZV2}
case " eth/v1/beacon/blobs_sidecars/{block_id}":
endpoint.GetResponse = &BlobsSidecarResponseJson{}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBlobsSidecarSSZ}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeGrpcResponseBodyIntoContainer: prepareBlobsResponse,
}
case "/eth/v1/beacon/blocks/{block_id}/root":
endpoint.GetResponse = &BlockRootResponseJson{}
case "/eth/v1/beacon/blocks/{block_id}/attestations":

View File

@@ -385,6 +385,7 @@ type SignedBeaconBlockContainerV2Json struct {
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
DenebBlock *BeaconBlockDenebJson `json:"deneb_block"`
Signature string `json:"signature" hex:"true"`
}
@@ -393,6 +394,7 @@ type SignedBlindedBeaconBlockContainerJson struct {
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
DenebBlock *BlindedBeaconBlockDenebJson `json:"deneb_block"`
Signature string `json:"signature" hex:"true"`
}
@@ -401,6 +403,7 @@ type BeaconBlockContainerV2Json struct {
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
DenebBlock *BeaconBlockDenebJson `json:"deneb_block"`
}
type BlindedBeaconBlockContainerJson struct {
@@ -408,6 +411,7 @@ type BlindedBeaconBlockContainerJson struct {
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
DenebBlock *BlindedBeaconBlockDenebJson `json:"deneb_block"`
}
type SignedBeaconBlockAltairContainerJson struct {
@@ -425,6 +429,11 @@ type SignedBeaconBlockCapellaContainerJson struct {
Signature string `json:"signature" hex:"true"`
}
type SignedBeaconBlockDenebContainerJson struct {
Message *BeaconBlockDenebJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type SignedBlindedBeaconBlockBellatrixContainerJson struct {
Message *BlindedBeaconBlockBellatrixJson `json:"message"`
Signature string `json:"signature" hex:"true"`
@@ -435,6 +444,11 @@ type SignedBlindedBeaconBlockCapellaContainerJson struct {
Signature string `json:"signature" hex:"true"`
}
type SignedBlindedBeaconBlockDenebContainerJson struct {
Message *BlindedBeaconBlockDenebJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type BeaconBlockAltairJson struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -459,6 +473,14 @@ type BeaconBlockCapellaJson struct {
Body *BeaconBlockBodyCapellaJson `json:"body"`
}
type BeaconBlockDenebJson struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
Body *BeaconBlockBodyDenebJson `json:"body"`
}
type BlindedBeaconBlockBellatrixJson struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -475,6 +497,14 @@ type BlindedBeaconBlockCapellaJson struct {
Body *BlindedBeaconBlockBodyCapellaJson `json:"body"`
}
type BlindedBeaconBlockDenebJson struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
Body *BlindedBeaconBlockBodyDenebJson `json:"body"`
}
type BeaconBlockBodyAltairJson struct {
RandaoReveal string `json:"randao_reveal" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
@@ -514,6 +544,21 @@ type BeaconBlockBodyCapellaJson struct {
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
}
type BeaconBlockBodyDenebJson struct {
RandaoReveal string `json:"randao_reveal" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
Graffiti string `json:"graffiti" hex:"true"`
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
Attestations []*AttestationJson `json:"attestations"`
Deposits []*DepositJson `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
ExecutionPayload *ExecutionPayloadDenebJson `json:"execution_payload"`
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
}
type BlindedBeaconBlockBodyBellatrixJson struct {
RandaoReveal string `json:"randao_reveal" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
@@ -541,6 +586,21 @@ type BlindedBeaconBlockBodyCapellaJson struct {
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
}
type BlindedBeaconBlockBodyDenebJson struct {
RandaoReveal string `json:"randao_reveal" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
Graffiti string `json:"graffiti" hex:"true"`
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
Attestations []*AttestationJson `json:"attestations"`
Deposits []*DepositJson `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"execution_payload_header"`
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
}
type ExecutionPayloadJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
@@ -576,6 +636,25 @@ type ExecutionPayloadCapellaJson struct {
Withdrawals []*WithdrawalJson `json:"withdrawals"`
}
type ExecutionPayloadDenebJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
ReceiptsRoot string `json:"receipts_root" hex:"true"`
LogsBloom string `json:"logs_bloom" hex:"true"`
PrevRandao string `json:"prev_randao" hex:"true"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
TimeStamp string `json:"timestamp"`
ExtraData string `json:"extra_data" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
ExcessDataGas string `json:"excess_data_gas" uint256:"true"`
BlockHash string `json:"block_hash" hex:"true"`
Transactions []string `json:"transactions" hex:"true"`
Withdrawals []*WithdrawalJson `json:"withdrawals"`
}
type ExecutionPayloadHeaderJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
@@ -611,6 +690,25 @@ type ExecutionPayloadHeaderCapellaJson struct {
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
}
type ExecutionPayloadHeaderDenebJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
ReceiptsRoot string `json:"receipts_root" hex:"true"`
LogsBloom string `json:"logs_bloom" hex:"true"`
PrevRandao string `json:"prev_randao" hex:"true"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
TimeStamp string `json:"timestamp"`
ExtraData string `json:"extra_data" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
ExcessDataGas string `json:"excess_data_gas" uint256:"true"`
BlockHash string `json:"block_hash" hex:"true"`
TransactionsRoot string `json:"transactions_root" hex:"true"`
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
}
type SyncAggregateJson struct {
SyncCommitteeBits string `json:"sync_committee_bits" hex:"true"`
SyncCommitteeSignature string `json:"sync_committee_signature" hex:"true"`
@@ -867,11 +965,42 @@ type BeaconStateCapellaJson struct {
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
}
type BeaconStateDenebJson struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
Slot string `json:"slot"`
Fork *ForkJson `json:"fork"`
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
BlockRoots []string `json:"block_roots" hex:"true"`
StateRoots []string `json:"state_roots" hex:"true"`
HistoricalRoots []string `json:"historical_roots" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*ValidatorJson `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes" hex:"true"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits" hex:"true"`
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"latest_execution_payload_header"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
}
type BeaconStateContainerV2Json struct {
Phase0State *BeaconStateJson `json:"phase0_state"`
AltairState *BeaconStateAltairJson `json:"altair_state"`
BellatrixState *BeaconStateBellatrixJson `json:"bellatrix_state"`
CapellaState *BeaconStateCapellaJson `json:"capella_state"`
DenebState *BeaconStateDenebJson `json:"deneb_state"`
}
type ForkJson struct {
@@ -1175,3 +1304,14 @@ type EventErrorJson struct {
StatusCode int `json:"status_code"`
Message string `json:"message"`
}
type BlobsSidecarJson struct {
BeaconBlockRoot string `json:"beacon_block_root" hex:"true"`
BeaconBlockSlot string `json:"beacon_block_slot"`
Blobs []string `json:"blobs" hex:"true"`
AggregatedProof string `json:"kzg_aggregated_proof" hex:"true"`
}
type BlobsSidecarResponseJson struct {
Data *BlobsSidecarJson `json:"data"`
}

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"blinded_blocks.go",
"blobs.go",
"blocks.go",
"config.go",
"log.go",

View File

@@ -1195,3 +1195,555 @@ func TestSubmitBlindedBlock(t *testing.T) {
assert.NoError(t, err)
})
}
func TestServer_GetBlindedBlockSSZ(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
_, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
headBlock := blkContainers[len(blkContainers)-1]
b2 := util.NewBeaconBlock()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: &mock.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
},
}
blks, err := beaconDB.BlocksBySlot(ctx, 30)
require.Equal(t, true, len(blks) > 0)
require.NoError(t, err)
sszBlock, err := blks[0].MarshalSSZ()
require.NoError(t, err)
resp, err := bs.GetBlindedBlockSSZ(ctx, &ethpbv1.BlockRequest{BlockId: []byte("30")})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.DeepEqual(t, sszBlock, resp.Data)
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
_, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
headBlock := blkContainers[len(blkContainers)-1]
b2 := util.NewBeaconBlockAltair()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetAltairBlock())
require.NoError(t, err)
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: &mock.ChainService{
DB: beaconDB,
Block: chainBlk,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
},
}
blks, err := beaconDB.BlocksBySlot(ctx, 30)
require.Equal(t, true, len(blks) > 0)
require.NoError(t, err)
sszBlock, err := blks[0].MarshalSSZ()
require.NoError(t, err)
resp, err := bs.GetBlindedBlockSSZ(ctx, &ethpbv1.BlockRequest{BlockId: []byte("30")})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.DeepEqual(t, sszBlock, resp.Data)
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
})
t.Run("Bellatrix", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
_, blkContainers := fillDBTestBlocksBellatrixBlinded(ctx, t, beaconDB)
headBlock := blkContainers[len(blkContainers)-1]
b2 := util.NewBlindedBeaconBlockBellatrix()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedBellatrixBlock())
require.NoError(t, err)
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: &mock.ChainService{
DB: beaconDB,
Block: chainBlk,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
},
OptimisticModeFetcher: &mock.ChainService{},
}
blks, err := beaconDB.BlocksBySlot(ctx, 30)
require.Equal(t, true, len(blks) > 0)
require.NoError(t, err)
sszBlock, err := blks[0].MarshalSSZ()
require.NoError(t, err)
resp, err := bs.GetBlindedBlockSSZ(ctx, &ethpbv1.BlockRequest{BlockId: []byte("30")})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.DeepEqual(t, sszBlock, resp.Data)
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
})
t.Run("Capella", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
_, blkContainers := fillDBTestBlocksCapellaBlinded(ctx, t, beaconDB)
headBlock := blkContainers[len(blkContainers)-1]
b2 := util.NewBlindedBeaconBlockCapella()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedCapellaBlock())
require.NoError(t, err)
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: &mock.ChainService{
DB: beaconDB,
Block: chainBlk,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
},
OptimisticModeFetcher: &mock.ChainService{},
}
blks, err := beaconDB.BlocksBySlot(ctx, 30)
require.Equal(t, true, len(blks) > 0)
require.NoError(t, err)
sszBlock, err := blks[0].MarshalSSZ()
require.NoError(t, err)
resp, err := bs.GetBlindedBlockSSZ(ctx, &ethpbv1.BlockRequest{BlockId: []byte("30")})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.DeepEqual(t, sszBlock, resp.Data)
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
})
}
func TestServer_SubmitBlindedBlockSSZ_OK(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlock()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "phase0")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockAltair()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlockAltair()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "altair")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Bellatrix", func(t *testing.T) {
// INFO: This code block can be removed once Bellatrix
// fork epoch is set to a value other than math.MaxUint64
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
params.OverrideBeaconConfig(cfg)
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockBellatrix()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
alphaServer := &validator.Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
BlockBuilder: &builderTest.MockBuilderService{},
BlockReceiver: c,
BlockNotifier: &mock.MockBlockNotifier{},
}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
V1Alpha1ValidatorServer: alphaServer,
}
req := util.NewBlindedBeaconBlockBellatrix()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Capella", func(t *testing.T) {
t.Skip("This test needs Capella fork version configured properly")
// INFO: This code block can be removed once Capella
// fork epoch is set to a value other than math.MaxUint64
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch + 1000
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.CapellaForkVersion)] = cfg.BellatrixForkEpoch + 1000
params.OverrideBeaconConfig(cfg)
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockCapella()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
alphaServer := &validator.Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
BlockBuilder: &builderTest.MockBuilderService{},
BlockReceiver: c,
BlockNotifier: &mock.MockBlockNotifier{},
}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
V1Alpha1ValidatorServer: alphaServer,
}
req := util.NewBlindedBeaconBlockCapella()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "capella")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
}
func TestSubmitBlindedBlock(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
}
req := util.NewBeaconBlock()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
v1Block, err := migration.V1Alpha1ToV1SignedBlock(req)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, req)
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: v1Block.Block},
Signature: v1Block.Signature,
}
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockAltair()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
}
req := util.NewBeaconBlockAltair()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
v2Block, err := migration.V1Alpha1BeaconBlockAltairToV2(req.Block)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, req)
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: v2Block},
Signature: req.Signature,
}
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Bellatrix", func(t *testing.T) {
transactions := [][]byte{[]byte("transaction1"), []byte("transaction2")}
transactionsRoot, err := ssz.TransactionsRoot(transactions)
require.NoError(t, err)
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockBellatrix()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
alphaServer := &validator.Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
BlockBuilder: &builderTest.MockBuilderService{},
BlockReceiver: c,
BlockNotifier: &mock.MockBlockNotifier{},
}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
V1Alpha1ValidatorServer: alphaServer,
}
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 5
blk.Block.ParentRoot = bsRoot[:]
blk.Block.Body.ExecutionPayload.Transactions = transactions
blindedBlk := util.NewBlindedBeaconBlockBellatrixV2()
blindedBlk.Message.Slot = 5
blindedBlk.Message.ParentRoot = bsRoot[:]
blindedBlk.Message.Body.ExecutionPayloadHeader.TransactionsRoot = transactionsRoot[:]
util.SaveBlock(t, ctx, beaconDB, blk)
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: blindedBlk.Message},
Signature: blindedBlk.Signature,
}
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
t.Run("Capella", func(t *testing.T) {
transactions := [][]byte{[]byte("transaction1"), []byte("transaction2")}
transactionsRoot, err := ssz.TransactionsRoot(transactions)
require.NoError(t, err)
withdrawals := []*enginev1.Withdrawal{
{
Index: 1,
ValidatorIndex: 1,
Address: bytesutil.PadTo([]byte("address1"), 20),
Amount: 1,
},
{
Index: 2,
ValidatorIndex: 2,
Address: bytesutil.PadTo([]byte("address2"), 20),
Amount: 2,
},
}
withdrawalsRoot, err := ssz.WithdrawalSliceRoot(hash.CustomSHA256Hasher(), withdrawals, 16)
require.NoError(t, err)
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockCapella()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
alphaServer := &validator.Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
BlockBuilder: &builderTest.MockBuilderService{},
BlockReceiver: c,
BlockNotifier: &mock.MockBlockNotifier{},
}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
V1Alpha1ValidatorServer: alphaServer,
}
blk := util.NewBeaconBlockCapella()
blk.Block.Slot = 5
blk.Block.ParentRoot = bsRoot[:]
blk.Block.Body.ExecutionPayload.Transactions = transactions
blk.Block.Body.ExecutionPayload.Withdrawals = withdrawals
blindedBlk := util.NewBlindedBeaconBlockCapellaV2()
blindedBlk.Message.Slot = 5
blindedBlk.Message.ParentRoot = bsRoot[:]
blindedBlk.Message.Body.ExecutionPayloadHeader.TransactionsRoot = transactionsRoot[:]
blindedBlk.Message.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
util.SaveBlock(t, ctx, beaconDB, blk)
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: blindedBlk.Message},
Signature: blindedBlk.Signature,
}
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
}

View File

@@ -0,0 +1,45 @@
package beacon
import (
"context"
"fmt"
"github.com/pkg/errors"
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
)
func (bs *Server) GetBlobsSidecar(ctx context.Context, req *ethpbv1.BlobsRequest) (*ethpbv1.BlobsResponse, error) {
sblk, err := bs.blockFromBlockID(ctx, req.BlockId)
err = handleGetBlockError(sblk, err)
if err != nil {
return nil, errors.Wrap(err, "GetBlobs")
}
block := sblk.Block()
root, err := block.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "failed to htr block")
}
sidecar, err := bs.BeaconDB.BlobsSidecar(ctx, root)
if err != nil {
return nil, fmt.Errorf("failed to get blobs sidecar for block %x", root)
}
var blobs []*ethpbv1.Blob
var aggregatedProof []byte
if sidecar != nil {
aggregatedProof = sidecar.AggregatedProof
for _, b := range sidecar.Blobs {
var data []byte
// go through each element, concat them
for _, el := range b.Data {
data = append(data, el)
}
blobs = append(blobs, &ethpbv1.Blob{Data: data})
}
}
return &ethpbv1.BlobsResponse{
BeaconBlockRoot: root[:],
BeaconBlockSlot: uint64(block.Slot()),
Blobs: blobs,
AggregatedProof: aggregatedProof,
}, nil
}

View File

@@ -379,6 +379,14 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlockDeneb(ctx, blk)
if result != nil {
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
}
@@ -433,6 +441,14 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getSSZBlockDeneb(ctx, blk)
if result != nil {
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
}
@@ -814,6 +830,76 @@ func (bs *Server) getBlockCapella(ctx context.Context, blk interfaces.SignedBeac
}, nil
}
func (bs *Server) getBlockDeneb(ctx context.Context, blk interfaces.SignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
denebBlk, err := blk.PbDenebBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, blocks.ErrUnsupportedGetter) {
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
if blindedDenebBlk == nil {
return nil, errNilBlock
}
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
if err != nil {
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
}
denebBlk, err = signedFullBlock.PbDenebBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
return &ethpbv2.BlockResponseV2{
Version: ethpbv2.Version_Deneb,
Data: &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
return nil, err
}
return nil, err
}
if denebBlk == nil {
return nil, errNilBlock
}
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
return &ethpbv2.BlockResponseV2{
Version: ethpbv2.Version_Deneb,
Data: &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
func getSSZBlockPhase0(blk interfaces.SignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
phase0Blk, err := blk.PbPhase0Block()
if err != nil {
@@ -1009,6 +1095,82 @@ func (bs *Server) getSSZBlockCapella(ctx context.Context, blk interfaces.SignedB
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_CAPELLA, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
}
func (bs *Server) getSSZBlockDeneb(ctx context.Context, blk interfaces.SignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
denebBlk, err := blk.PbDenebBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, blocks.ErrUnsupportedGetter) {
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
if blindedDenebBlk == nil {
return nil, errNilBlock
}
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
if err != nil {
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
}
denebBlk, err = signedFullBlock.PbDenebBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert signed beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
data := &ethpbv2.SignedBeaconBlockDeneb{
Message: v2Blk,
Signature: sig[:],
}
sszData, err := data.MarshalSSZ()
if err != nil {
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_Deneb,
ExecutionOptimistic: isOptimistic,
Data: sszData,
}, nil
}
return nil, err
}
return nil, err
}
if denebBlk == nil {
return nil, errNilBlock
}
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert signed beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
data := &ethpbv2.SignedBeaconBlockDeneb{
Message: v2Blk,
Signature: sig[:],
}
sszData, err := data.MarshalSSZ()
if err != nil {
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
}
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_Deneb, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
}
func (bs *Server) submitPhase0Block(ctx context.Context, phase0Blk *ethpbv1.BeaconBlock, sig []byte) error {
v1alpha1Blk, err := migration.V1ToV1Alpha1SignedBlock(&ethpbv1.SignedBeaconBlock{Block: phase0Blk, Signature: sig})
if err != nil {

View File

@@ -95,6 +95,19 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.BeaconState
},
ExecutionOptimistic: isOptimistic,
}, nil
case version.Deneb:
protoState, err := migration.BeaconStateDenebToProto(beaconSt)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
}
return &ethpbv2.BeaconStateResponseV2{
Version: ethpbv2.Version_Deneb,
Data: &ethpbv2.BeaconStateContainer{
State: &ethpbv2.BeaconStateContainer_DenebState{DenebState: protoState},
},
ExecutionOptimistic: isOptimistic,
}, nil
default:
return nil, status.Error(codes.Internal, "Unsupported state version")
}
@@ -124,6 +137,8 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
ver = ethpbv2.Version_BELLATRIX
case version.Capella:
ver = ethpbv2.Version_CAPELLA
case version.Deneb:
ver = ethpbv2.Version_Deneb
default:
return nil, status.Error(codes.Internal, "Unsupported state version")
}

View File

@@ -15,6 +15,7 @@ go_library(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",

View File

@@ -4,6 +4,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
v1alpha1validator "github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/prysm/v1alpha1/validator"
@@ -24,6 +25,7 @@ type Server struct {
StateFetcher statefetcher.Fetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncCommitteePool synccommittee.Pool
BLSChangesPool blstoexec.PoolManager
V1Alpha1Server *v1alpha1validator.Server
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
}

View File

@@ -6,6 +6,7 @@ go_library(
"assignments.go",
"attestations.go",
"blocks.go",
"blstoexec.go",
"committees.go",
"config.go",
"log.go",
@@ -36,6 +37,7 @@ go_library(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/state:go_default_library",

View File

@@ -122,7 +122,7 @@ func convertToBlockContainer(blk interfaces.SignedBeaconBlock, root [32]byte, is
}
ctr.Block = &ethpb.BeaconBlockContainer_BellatrixBlock{BellatrixBlock: rBlk}
}
case version.Capella:
case version.Capella, version.Deneb:
if blk.IsBlinded() {
rBlk, err := blk.PbBlindedCapellaBlock()
if err != nil {

View File

@@ -0,0 +1,24 @@
package beacon
import (
"context"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// SubmitBLSToExecutionChange receives a withdrawal credential change object via
// RPC and injects it into the beacon node's operations pool.
// Submission into this pool does not guarantee inclusion into a beacon block. If the object passes validation
// the node MUST broadcast it
func (bs *Server) SubmitBLSToExecutionChange(
ctx context.Context,
req *ethpb.SignedBLSToExecutionChange,
) (*ethpb.BLSToExecutionChangeResponse, error) {
bs.BLSChangesPool.InsertBLSToExecChange(req)
if err := bs.Broadcaster.Broadcast(ctx, req); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast SigledBLSToExecutionChange object: %v", err)
}
return nil, nil
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
@@ -40,6 +41,7 @@ type Server struct {
Broadcaster p2p.Broadcaster
AttestationsPool attestations.Pool
SlashingsPool slashings.PoolManager
BLSChangesPool blstoexec.PoolManager
ChainStartChan chan time.Time
ReceivedAttestationsBuffer chan *ethpb.Attestation
CollectedAttestationsBuffer chan []*ethpb.Attestation

View File

@@ -44,13 +44,14 @@ go_library(
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/transition/interop:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
@@ -61,6 +62,7 @@ go_library(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/blobs:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -84,6 +86,7 @@ go_library(
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_protolambda_go_kzg//eth:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",

View File

@@ -100,6 +100,17 @@ func sendVerifiedBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltairServe
return nil
}
b.Block = &ethpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: phBlk}
case version.Deneb:
pb, err := data.SignedBlock.Proto()
if err != nil {
return errors.Wrap(err, "could not get protobuf block")
}
phBlk, ok := pb.(*ethpb.SignedBeaconBlockDeneb)
if !ok {
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockDeneb")
return nil
}
b.Block = &ethpb.StreamBlocksResponse_DenebBlock{DenebBlock: phBlk}
}
if err := stream.Send(b); err != nil {
@@ -149,6 +160,8 @@ func (vs *Server) sendBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltair
b.Block = &ethpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: p}
case *ethpb.SignedBeaconBlockCapella:
b.Block = &ethpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: p}
case *ethpb.SignedBeaconBlockDeneb:
b.Block = &ethpb.StreamBlocksResponse_DenebBlock{DenebBlock: p}
default:
log.Errorf("Unknown block type %T", p)
}

View File

@@ -11,18 +11,22 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
emptypb "github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"github.com/protolambda/go-kzg/eth"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/v3/config/params"
blobs2 "github.com/prysmaticlabs/prysm/v3/consensus-types/blobs"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -36,8 +40,8 @@ var eth1DataNotification bool
const eth1dataTimeout = 2 * time.Second
// GetBeaconBlock is called by a proposer during its assigned slot to request a block to sign
// by passing in the slot and the signed randao reveal of the slot. Returns phase0 beacon blocks
// before the Altair fork epoch and Altair blocks post-fork epoch.
// by passing in the slot and the signed randao reveal of the slot. Returns a full block
// corresponding to the fork epoch
func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.GetBeaconBlock")
defer span.End()
@@ -125,6 +129,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
sr, err := vs.computeStateRoot(ctx, sBlk)
if err != nil {
interop.WriteBlockToDisk("proposer", sBlk, true /*failed*/)
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
}
blk.SetStateRoot(sr)
@@ -133,6 +138,9 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
}
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().DenebForkEpoch {
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Deneb{Deneb: pb.(*ethpb.BeaconBlockDeneb)}}, nil
}
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().CapellaForkEpoch {
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Capella{Capella: pb.(*ethpb.BeaconBlockCapella)}}, nil
}
@@ -263,20 +271,27 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
})
}()
// Broadcast the new block to the network.
blkPb, err := blk.Proto()
if err != nil {
return nil, errors.Wrap(err, "could not get protobuf block")
}
if err := vs.P2P.Broadcast(ctx, blkPb); err != nil {
return nil, fmt.Errorf("could not broadcast block: %v", err)
}
log.WithFields(logrus.Fields{
"blockRoot": hex.EncodeToString(root[:]),
}).Debug("Broadcasting block")
if blk.Version() == version.Deneb {
if err := vs.proposeBlockAndBlobs(ctx, root, blk); err != nil {
return nil, errors.Wrap(err, "could not propose block and blob")
}
} else {
// Broadcast the new block to the network.
blkPb, err := blk.Proto()
if err != nil {
return nil, errors.Wrap(err, "could not get protobuf block")
}
if err := vs.P2P.Broadcast(ctx, blkPb); err != nil {
return nil, fmt.Errorf("could not broadcast block: %v", err)
}
log.WithFields(logrus.Fields{
"blockRoot": hex.EncodeToString(root[:]),
}).Debug("Broadcasting block")
if err := vs.BlockReceiver.ReceiveBlock(ctx, blk, root); err != nil {
return nil, fmt.Errorf("could not process beacon block: %v", err)
}
if err := vs.BlockReceiver.ReceiveBlock(ctx, blk, root); err != nil {
return nil, fmt.Errorf("could not process beacon block: %v", err)
}
return &ethpb.ProposeResponse{
@@ -284,6 +299,42 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
}, nil
}
func (vs *Server) proposeBlockAndBlobs(ctx context.Context, root [32]byte, blk interfaces.SignedBeaconBlock) error {
blkPb, err := blk.PbDenebBlock()
if err != nil {
return errors.Wrap(err, "could not get protobuf block")
}
blobs, err := vs.BlobsCache.Get(blk.Block().Slot())
if err != nil {
return errors.Wrap(err, "could not get blobs from cache")
}
sc := &ethpb.BlobsSidecar{
Blobs: blobs,
BeaconBlockSlot: blk.Block().Slot(),
BeaconBlockRoot: root[:],
}
aggregatedProof, err := eth.ComputeAggregateKZGProof(blobs2.BlobsSequenceImpl(blobs))
if err != nil {
interop.WriteBadBlobsToDisk("proposer", sc)
return fmt.Errorf("failed to compute aggregated kzg proof: %v", err)
}
sc.AggregatedProof = aggregatedProof[:]
if err := vs.P2P.Broadcast(ctx, &ethpb.SignedBeaconBlockAndBlobsSidecar{
BeaconBlock: blkPb,
BlobsSidecar: sc,
}); err != nil {
return fmt.Errorf("could not broadcast block: %v", err)
}
if err := vs.BlockReceiver.ReceiveBlock(ctx, blk, root); err != nil {
return fmt.Errorf("could not process beacon block: %v", err)
}
if err := vs.BeaconDB.SaveBlobsSidecar(ctx, sc); err != nil {
return errors.Wrap(err, "could not save sidecar to DB")
}
return nil
}
// computeStateRoot computes the state root after a block has been processed through a state transition and
// returns it to the validator client.
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedBeaconBlock) ([]byte, error) {

View File

@@ -80,7 +80,7 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
// filter separates attestation list into two groups: valid and invalid attestations.
// The first group passes the all the required checks for attestation to be considered for proposing.
// And attestations from the second group should be deleted.
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts) {
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts, error) {
validAtts := make([]*ethpb.Attestation, 0, len(a))
invalidAtts := make([]*ethpb.Attestation, 0, len(a))
var attestationProcessor func(context.Context, state.BeaconState, *ethpb.Attestation) (state.BeaconState, error)
@@ -98,7 +98,7 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
}
} else {
// Exit early if there is an unknown state type.
return validAtts, invalidAtts
return validAtts, invalidAtts, errors.Errorf("unknown state type: %v", st.Version())
}
for _, att := range a {
@@ -108,7 +108,7 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
}
invalidAtts = append(invalidAtts, att)
}
return validAtts, invalidAtts
return validAtts, invalidAtts, nil
}
// sortByProfitability orders attestations by highest slot and by highest aggregation bit count.
@@ -247,7 +247,10 @@ func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.Beac
ctx, span := trace.StartSpan(ctx, "ProposerServer.validateAndDeleteAttsInPool")
defer span.End()
validAtts, invalidAtts := proposerAtts(atts).filter(ctx, st)
validAtts, invalidAtts, err := proposerAtts(atts).filter(ctx, st)
if err != nil {
return nil, err
}
if err := vs.deleteAttsInPool(ctx, invalidAtts); err != nil {
return nil, err
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"math/big"
"time"
"github.com/pkg/errors"
@@ -14,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
coreBlock "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
@@ -60,10 +62,16 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.BeaconBlo
}
}
}
executionData, err := vs.getExecutionPayload(ctx, slot, idx, blk.ParentRoot(), headState)
executionData, blobsBundle, err := vs.getExecutionPayload(ctx, slot, idx, blk.ParentRoot(), headState)
if err != nil {
return errors.Wrap(err, "failed to get execution payload")
}
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
if err := blk.Body().SetBlobKzgCommitments(blobsBundle.KzgCommitments); err != nil {
return errors.Wrap(err, "could not set blob kzg commitments")
}
vs.BlobsCache.Put(slot, blobsBundle.Blobs)
}
return blk.Body().SetExecution(executionData)
}
@@ -98,7 +106,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
return nil, errors.New("builder returned nil bid")
}
v := bytesutil.LittleEndianBytesToBigInt(bid.Message.Value)
v := new(big.Int).SetBytes(bytesutil.ReverseByteOrder(bid.Message.Value))
if v.String() == "0" {
return nil, errors.New("builder returned header with 0 bid amount")
}
@@ -124,7 +132,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
return nil, fmt.Errorf("incorrect timestamp %d != %d", bid.Message.Header.Timestamp, uint64(t.Unix()))
}
if err := validateBuilderSignature(bid); err != nil {
if err := vs.validateBuilderSignature(bid); err != nil {
return nil, errors.Wrap(err, "could not validate builder signature")
}
@@ -133,6 +141,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
"builderPubKey": fmt.Sprintf("%#x", bid.Message.Pubkey),
"blockHash": fmt.Sprintf("%#x", bid.Message.Header.BlockHash),
}).Info("Received header with bid")
return consensusblocks.WrappedExecutionPayloadHeader(bid.Message.Header)
}
@@ -140,7 +149,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
// bellatrix blind block. The output block will contain the full payload. The original header block
// will be returned the block builder is not configured.
func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
if err := coreBlock.BeaconBlockIsNil(b); err != nil {
return nil, err
}
@@ -248,7 +257,7 @@ func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBe
}
// Validates builder signature and returns an error if the signature is invalid.
func validateBuilderSignature(bid *ethpb.SignedBuilderBid) error {
func (vs *Server) validateBuilderSignature(bid *ethpb.SignedBuilderBid) error {
d, err := signing.ComputeDomain(params.BeaconConfig().DomainApplicationBuilder,
nil, /* fork version */
nil /* genesis val root */)

View File

@@ -30,11 +30,16 @@ func getEmptyBlock(slot types.Slot) (interfaces.SignedBeaconBlock, error) {
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
}
default:
case slots.ToEpoch(slot) < params.BeaconConfig().DenebForkEpoch:
sBlk, err = blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockCapella{Block: &ethpb.BeaconBlockCapella{Body: &ethpb.BeaconBlockBodyCapella{}}})
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
}
default:
sBlk, err = blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockDeneb{Block: &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{}}})
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
}
}
return sBlk, err
}

View File

@@ -40,9 +40,10 @@ var (
})
)
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
// This returns the execution payload of a given slot.
// The function has full awareness of pre and post merge.
// The payload is computed given the respected time of merge.
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx types.ValidatorIndex, headRoot [32]byte, st state.BeaconState) (interfaces.ExecutionData, error) {
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx types.ValidatorIndex, headRoot [32]byte, st state.BeaconState) (interfaces.ExecutionData, *enginev1.BlobsBundle, error) {
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, headRoot)
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
@@ -62,7 +63,7 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
"Please refer to our documentation for instructions")
}
default:
return nil, errors.Wrap(err, "could not get fee recipient in db")
return nil, nil, errors.Wrap(err, "could not get fee recipient in db")
}
if ok && proposerID == vIdx && payloadId != [8]byte{} { // Payload ID is cache hit. Return the cached payload ID.
@@ -73,10 +74,17 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
switch {
case err == nil:
warnIfFeeRecipientDiffers(payload, feeRecipient)
return payload, nil
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
sc, err := vs.ExecutionEngineCaller.GetBlobsBundle(ctx, pid)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get blobs bundle from execution client")
}
return payload, sc, nil
}
return payload, nil, nil
case errors.Is(err, context.DeadlineExceeded):
default:
return nil, errors.Wrap(err, "could not get cached payload from execution client")
return nil, nil, errors.Wrap(err, "could not get cached payload from execution client")
}
}
@@ -84,53 +92,61 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
var hasTerminalBlock bool
mergeComplete, err := blocks.IsMergeTransitionComplete(st)
if err != nil {
return nil, err
return nil, nil, err
}
t, err := slots.ToTime(st.GenesisTime(), slot)
if err != nil {
return nil, err
return nil, nil, err
}
if mergeComplete {
header, err := st.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
return nil, nil, err
}
parentHash = header.BlockHash()
} else {
if activationEpochNotReached(slot) {
return consensusblocks.WrappedExecutionPayload(emptyPayload())
p, err := emptyPayload()
if err != nil {
return nil, nil, err
}
return p, nil, nil
}
parentHash, hasTerminalBlock, err = vs.getTerminalBlockHashIfExists(ctx, uint64(t.Unix()))
if err != nil {
return nil, err
return nil, nil, err
}
if !hasTerminalBlock {
return consensusblocks.WrappedExecutionPayload(emptyPayload())
p, err := emptyPayload()
if err != nil {
return nil, nil, err
}
return p, nil, nil
}
}
payloadIDCacheMiss.Inc()
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
return nil, err
return nil, nil, err
}
finalizedBlockHash := params.BeaconConfig().ZeroHash[:]
finalizedRoot := bytesutil.ToBytes32(st.FinalizedCheckpoint().Root)
if finalizedRoot != [32]byte{} { // finalized root could be zeros before the first finalized block.
finalizedBlock, err := vs.BeaconDB.Block(ctx, bytesutil.ToBytes32(st.FinalizedCheckpoint().Root))
if err != nil {
return nil, err
return nil, nil, err
}
if err := consensusblocks.BeaconBlockIsNil(finalizedBlock); err != nil {
return nil, err
return nil, nil, err
}
switch finalizedBlock.Version() {
case version.Phase0, version.Altair: // Blocks before Bellatrix don't have execution payloads. Use zeros as the hash.
default:
finalizedPayload, err := finalizedBlock.Block().Body().Execution()
if err != nil {
return nil, err
return nil, nil, err
}
finalizedBlockHash = finalizedPayload.BlockHash()
}
@@ -142,30 +158,34 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
FinalizedBlockHash: finalizedBlockHash,
}
p := &enginev1.PayloadAttributes{
p, err := payloadattribute.New(&enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: random,
SuggestedFeeRecipient: feeRecipient.Bytes(),
}
// This will change in subsequent hardforks like Capella.
pa, err := payloadattribute.New(p)
})
if err != nil {
return nil, err
return nil, nil, err
}
payloadID, _, err := vs.ExecutionEngineCaller.ForkchoiceUpdated(ctx, f, pa)
payloadID, _, err := vs.ExecutionEngineCaller.ForkchoiceUpdated(ctx, f, p)
if err != nil {
return nil, errors.Wrap(err, "could not prepare payload")
return nil, nil, errors.Wrap(err, "could not prepare payload")
}
if payloadID == nil {
return nil, fmt.Errorf("nil payload with block hash: %#x", parentHash)
return nil, nil, fmt.Errorf("nil payload with block hash: %#x", parentHash)
}
payload, err := vs.ExecutionEngineCaller.GetPayload(ctx, *payloadID, slot)
if err != nil {
return nil, err
return nil, nil, err
}
warnIfFeeRecipientDiffers(payload, feeRecipient)
return payload, nil
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
sc, err := vs.ExecutionEngineCaller.GetBlobsBundle(ctx, *payloadID)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get blobs bundle from execution client")
}
return payload, sc, nil
}
return payload, nil, nil
}
// warnIfFeeRecipientDiffers logs a warning if the fee recipient in the included payload does not
@@ -227,8 +247,8 @@ func activationEpochNotReached(slot types.Slot) bool {
return false
}
func emptyPayload() *enginev1.ExecutionPayload {
return &enginev1.ExecutionPayload{
func emptyPayload() (interfaces.ExecutionData, error) {
return consensusblocks.WrappedExecutionPayload(&enginev1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
@@ -237,5 +257,5 @@ func emptyPayload() *enginev1.ExecutionPayload {
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
}
})
}

View File

@@ -63,6 +63,7 @@ type Server struct {
SlashingsPool slashings.PoolManager
ExitPool voluntaryexits.PoolManager
SyncCommitteePool synccommittee.Pool
BLSChangesPool blstoexec.PoolManager
BlockReceiver blockchain.BlockReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
@@ -73,7 +74,7 @@ type Server struct {
BeaconDB db.HeadAccessDatabase
ExecutionEngineCaller execution.EngineCaller
BlockBuilder builder.BlockBuilder
BLSChangesPool blstoexec.PoolManager
BlobsCache *cache.BlobsCache
}
// WaitForActivation checks if a validator public key exists in the active validator registry of the current

View File

@@ -98,6 +98,7 @@ type Config struct {
AttestationsPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingsPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
SlashingChecker slasherservice.SlashingChecker
SyncCommitteeObjectPool synccommittee.Pool
BLSChangesPool blstoexec.PoolManager
@@ -218,12 +219,13 @@ func (s *Service) Start() {
SlashingsPool: s.cfg.SlashingsPool,
StateGen: s.cfg.StateGen,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
BLSChangesPool: s.cfg.BLSChangesPool,
ReplayerBuilder: ch,
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
BeaconDB: s.cfg.BeaconDB,
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
BlockBuilder: s.cfg.BlockBuilder,
BLSChangesPool: s.cfg.BLSChangesPool,
BlobsCache: cache.NewBlobsCache(),
}
validatorServerV1 := &validator.Server{
HeadFetcher: s.cfg.HeadFetcher,
@@ -243,6 +245,7 @@ func (s *Service) Start() {
ReplayerBuilder: ch,
},
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
BLSChangesPool: s.cfg.BLSChangesPool,
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
}
@@ -295,6 +298,7 @@ func (s *Service) Start() {
ReceivedAttestationsBuffer: make(chan *ethpbv1alpha1.Attestation, attestationBufferSize),
CollectedAttestationsBuffer: make(chan []*ethpbv1alpha1.Attestation, attestationBufferSize),
ReplayerBuilder: ch,
BLSChangesPool: s.cfg.BLSChangesPool,
}
beaconChainServerV1 := &beacon.Server{
CanonicalHistory: ch,

View File

@@ -48,6 +48,7 @@ type BeaconState struct {
nextSyncCommittee *ethpb.SyncCommittee
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader
latestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella
latestExecutionPayloadHeaderDeneb *enginev1.ExecutionPayloadHeaderDeneb
nextWithdrawalIndex uint64
nextWithdrawalValidatorIndex eth2types.ValidatorIndex

View File

@@ -48,6 +48,7 @@ type BeaconState struct {
nextSyncCommittee *ethpb.SyncCommittee
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader
latestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella
latestExecutionPayloadHeaderDeneb *enginev1.ExecutionPayloadHeaderDeneb
nextWithdrawalIndex uint64
nextWithdrawalValidatorIndex eth2types.ValidatorIndex

View File

@@ -20,7 +20,12 @@ func (b *BeaconState) LatestExecutionPayloadHeader() (interfaces.ExecutionData,
if b.version == version.Bellatrix {
return blocks.WrappedExecutionPayloadHeader(b.latestExecutionPayloadHeaderVal())
}
return blocks.WrappedExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapellaVal())
if b.version == version.Capella {
return blocks.WrappedExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapellaVal())
}
return blocks.WrappedExecutionPayloadHeaderDeneb(b.latestExecutionPayloadHeaderDenebVal())
}
// latestExecutionPayloadHeaderVal of the beacon state.
@@ -34,3 +39,7 @@ func (b *BeaconState) latestExecutionPayloadHeaderVal() *enginev1.ExecutionPaylo
func (b *BeaconState) latestExecutionPayloadHeaderCapellaVal() *enginev1.ExecutionPayloadHeaderCapella {
return ethpb.CopyExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapella)
}
func (b *BeaconState) latestExecutionPayloadHeaderDenebVal() *enginev1.ExecutionPayloadHeaderDeneb {
return ethpb.CopyExecutionPayloadHeaderDeneb(b.latestExecutionPayloadHeaderDeneb)
}

View File

@@ -128,6 +128,37 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
HistoricalSummaries: b.historicalSummaries,
}
case version.Deneb:
return &ethpb.BeaconStateDeneb{
GenesisTime: b.genesisTime,
GenesisValidatorsRoot: gvrCopy[:],
Slot: b.slot,
Fork: b.fork,
LatestBlockHeader: b.latestBlockHeader,
BlockRoots: b.blockRoots.Slice(),
StateRoots: b.stateRoots.Slice(),
HistoricalRoots: b.historicalRoots.Slice(),
Eth1Data: b.eth1Data,
Eth1DataVotes: b.eth1DataVotes,
Eth1DepositIndex: b.eth1DepositIndex,
Validators: b.validators,
Balances: b.balances,
RandaoMixes: b.randaoMixes.Slice(),
Slashings: b.slashings,
PreviousEpochParticipation: b.previousEpochParticipation,
CurrentEpochParticipation: b.currentEpochParticipation,
JustificationBits: b.justificationBits,
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint,
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint,
FinalizedCheckpoint: b.finalizedCheckpoint,
InactivityScores: b.inactivityScores,
CurrentSyncCommittee: b.currentSyncCommittee,
NextSyncCommittee: b.nextSyncCommittee,
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeaderDeneb,
NextWithdrawalIndex: b.nextWithdrawalIndex,
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
HistoricalSummaries: b.historicalSummaries,
}
default:
return nil
}
@@ -255,6 +286,37 @@ func (b *BeaconState) ToProto() interface{} {
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
HistoricalSummaries: b.historicalSummariesVal(),
}
case version.Deneb:
return &ethpb.BeaconStateDeneb{
GenesisTime: b.genesisTime,
GenesisValidatorsRoot: gvrCopy[:],
Slot: b.slot,
Fork: b.forkVal(),
LatestBlockHeader: b.latestBlockHeaderVal(),
BlockRoots: b.blockRoots.Slice(),
StateRoots: b.stateRoots.Slice(),
HistoricalRoots: b.historicalRoots.Slice(),
Eth1Data: b.eth1DataVal(),
Eth1DataVotes: b.eth1DataVotesVal(),
Eth1DepositIndex: b.eth1DepositIndex,
Validators: b.validatorsVal(),
Balances: b.balancesVal(),
RandaoMixes: b.randaoMixes.Slice(),
Slashings: b.slashingsVal(),
PreviousEpochParticipation: b.previousEpochParticipationVal(),
CurrentEpochParticipation: b.currentEpochParticipationVal(),
JustificationBits: b.justificationBitsVal(),
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpointVal(),
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpointVal(),
FinalizedCheckpoint: b.finalizedCheckpointVal(),
InactivityScores: b.inactivityScoresVal(),
CurrentSyncCommittee: b.currentSyncCommitteeVal(),
NextSyncCommittee: b.nextSyncCommitteeVal(),
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeaderDenebVal(),
NextWithdrawalIndex: b.nextWithdrawalIndex,
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
HistoricalSummaries: b.historicalSummariesVal(),
}
default:
return nil
}
@@ -338,3 +400,11 @@ func ProtobufBeaconStateCapella(s interface{}) (*ethpb.BeaconStateCapella, error
}
return pbState, nil
}
func ProtobufBeaconStateDeneb(s interface{}) (*ethpb.BeaconStateDeneb, error) {
pbState, ok := s.(*ethpb.BeaconStateDeneb)
if !ok {
return nil, errors.New("input is not type pb.BeaconStateCapella")
}
return pbState, nil
}

View File

@@ -1,6 +1,7 @@
package state_native
import (
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
@@ -49,7 +50,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, error) {
b.lock.RLock()
defer b.lock.RUnlock()
withdrawals := make([]*enginev1.Withdrawal, 0, params.BeaconConfig().MaxWithdrawalsPerPayload)
withdrawals := make([]*enginev1.Withdrawal, 0, fieldparams.MaxWithdrawalsPerPayload)
validatorIndex := b.nextWithdrawalValidatorIndex
withdrawalIndex := b.nextWithdrawalIndex
epoch := slots.ToEpoch(b.slot)
@@ -75,7 +76,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, error) {
})
withdrawalIndex++
}
if uint64(len(withdrawals)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
if uint64(len(withdrawals)) == fieldparams.MaxWithdrawalsPerPayload {
break
}
validatorIndex += 1

View File

@@ -26,7 +26,7 @@ func TestNextWithdrawalIndex(t *testing.T) {
})
}
func TestLastWithdrawalValidatorIndex(t *testing.T) {
func TestNextWithdrawalValidatorIndex(t *testing.T) {
t.Run("ok", func(t *testing.T) {
s := BeaconState{version: version.Capella, nextWithdrawalValidatorIndex: 123}
i, err := s.NextWithdrawalValidatorIndex()

View File

@@ -35,6 +35,8 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateBellatrixFieldCount)
case version.Capella:
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateCapellaFieldCount)
case version.Deneb:
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateCapellaFieldCount)
}
// Genesis time root.
@@ -264,5 +266,31 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
fieldRoots[nativetypes.HistoricalSummaries.RealPosition()] = historicalSummaryRoot[:]
}
if state.version == version.Deneb {
// Execution payload root.
executionPayloadRoot, err := state.latestExecutionPayloadHeaderDeneb.HashTreeRoot()
if err != nil {
return nil, err
}
fieldRoots[nativetypes.LatestExecutionPayloadHeaderDeneb.RealPosition()] = executionPayloadRoot[:]
// Next withdrawal index root.
nextWithdrawalIndexRoot := make([]byte, 32)
binary.LittleEndian.PutUint64(nextWithdrawalIndexRoot, state.nextWithdrawalIndex)
fieldRoots[nativetypes.NextWithdrawalIndex.RealPosition()] = nextWithdrawalIndexRoot
// Next partial withdrawal validator index root.
nextWithdrawalValidatorIndexRoot := make([]byte, 32)
binary.LittleEndian.PutUint64(nextWithdrawalValidatorIndexRoot, uint64(state.nextWithdrawalValidatorIndex))
fieldRoots[nativetypes.NextWithdrawalValidatorIndex.RealPosition()] = nextWithdrawalValidatorIndexRoot
// Historical summary root.
historicalSummaryRoot, err := stateutil.HistoricalSummariesRoot(state.historicalSummaries)
if err != nil {
return nil, errors.Wrap(err, "could not compute historical summary merkleization")
}
fieldRoots[nativetypes.HistoricalSummaries.RealPosition()] = historicalSummaryRoot[:]
}
return fieldRoots, nil
}

View File

@@ -36,6 +36,14 @@ func (b *BeaconState) SetLatestExecutionPayloadHeader(val interfaces.ExecutionDa
b.latestExecutionPayloadHeaderCapella = latest
b.markFieldAsDirty(nativetypes.LatestExecutionPayloadHeaderCapella)
return nil
case *enginev1.ExecutionPayloadDeneb:
latest, err := consensusblocks.PayloadToHeaderDeneb(val)
if err != nil {
return errors.Wrap(err, "could not convert payload to header")
}
b.latestExecutionPayloadHeaderDeneb = latest
b.markFieldAsDirty(nativetypes.LatestExecutionPayloadHeaderDeneb)
return nil
case *enginev1.ExecutionPayloadHeader:
b.latestExecutionPayloadHeader = header
b.markFieldAsDirty(nativetypes.LatestExecutionPayloadHeader)
@@ -44,6 +52,10 @@ func (b *BeaconState) SetLatestExecutionPayloadHeader(val interfaces.ExecutionDa
b.latestExecutionPayloadHeaderCapella = header
b.markFieldAsDirty(nativetypes.LatestExecutionPayloadHeaderCapella)
return nil
case *enginev1.ExecutionPayloadHeaderDeneb:
b.latestExecutionPayloadHeaderDeneb = header
b.markFieldAsDirty(nativetypes.LatestExecutionPayloadHeaderDeneb)
return nil
default:
return errors.New("value must be an execution payload header")
}

View File

@@ -20,7 +20,7 @@ func (b *BeaconState) SetNextWithdrawalIndex(i uint64) error {
return nil
}
// SetLastWithdrawalValidatorIndex sets the index of the validator which is
// SetNexWithdrawalValidatorIndex sets the index of the validator which is
// next in line for a partial withdrawal.
func (b *BeaconState) SetNextWithdrawalValidatorIndex(i types.ValidatorIndex) error {
if b.version < version.Capella {

View File

@@ -20,7 +20,7 @@ func TestSetNextWithdrawalIndex(t *testing.T) {
require.Equal(t, true, s.dirtyFields[nativetypes.NextWithdrawalIndex])
}
func TestSetLastWithdrawalValidatorIndex(t *testing.T) {
func TestSetNextWithdrawalValidatorIndex(t *testing.T) {
s := BeaconState{
version: version.Capella,
nextWithdrawalValidatorIndex: 3,

View File

@@ -7,7 +7,7 @@ import (
func (b *BeaconState) ProportionalSlashingMultiplier() (uint64, error) {
switch b.version {
case version.Bellatrix, version.Capella:
case version.Bellatrix, version.Capella, version.Deneb:
return params.BeaconConfig().ProportionalSlashingMultiplierBellatrix, nil
case version.Altair:
return params.BeaconConfig().ProportionalSlashingMultiplierAltair, nil
@@ -19,7 +19,7 @@ func (b *BeaconState) ProportionalSlashingMultiplier() (uint64, error) {
func (b *BeaconState) InactivityPenaltyQuotient() (uint64, error) {
switch b.version {
case version.Bellatrix, version.Capella:
case version.Bellatrix, version.Capella, version.Deneb:
return params.BeaconConfig().InactivityPenaltyQuotientBellatrix, nil
case version.Altair:
return params.BeaconConfig().InactivityPenaltyQuotientAltair, nil

View File

@@ -85,10 +85,19 @@ var capellaFields = append(
nativetypes.HistoricalSummaries,
)
var denebFields = append(
altairFields,
nativetypes.LatestExecutionPayloadHeaderDeneb,
nativetypes.NextWithdrawalIndex,
nativetypes.NextWithdrawalValidatorIndex,
nativetypes.HistoricalSummaries,
)
const (
phase0SharedFieldRefCount = 10
altairSharedFieldRefCount = 11
bellatrixSharedFieldRefCount = 12
denebSharedFieldRefCount = 14
capellaSharedFieldRefCount = 14
)
@@ -112,6 +121,11 @@ func InitializeFromProtoCapella(st *ethpb.BeaconStateCapella) (state.BeaconState
return InitializeFromProtoUnsafeCapella(proto.Clone(st).(*ethpb.BeaconStateCapella))
}
// InitializeFromProtoDeneb the beacon state from a protobuf representation.
func InitializeFromProtoDeneb(st *ethpb.BeaconStateDeneb) (state.BeaconState, error) {
return InitializeFromProtoUnsafeDeneb(proto.Clone(st).(*ethpb.BeaconStateDeneb))
}
// InitializeFromProtoUnsafePhase0 directly uses the beacon state protobuf fields
// and sets them as fields of the BeaconState type.
func InitializeFromProtoUnsafePhase0(st *ethpb.BeaconState) (state.BeaconState, error) {
@@ -476,6 +490,102 @@ func InitializeFromProtoUnsafeCapella(st *ethpb.BeaconStateCapella) (state.Beaco
return b, nil
}
// InitializeFromProtoUnsafeDeneb directly uses the beacon state protobuf fields
// and sets them as fields of the BeaconState type.
func InitializeFromProtoUnsafeDeneb(st *ethpb.BeaconStateDeneb) (state.BeaconState, error) {
if st == nil {
return nil, errors.New("received nil state")
}
var bRoots customtypes.BlockRoots
for i, r := range st.BlockRoots {
bRoots[i] = bytesutil.ToBytes32(r)
}
var sRoots customtypes.StateRoots
for i, r := range st.StateRoots {
sRoots[i] = bytesutil.ToBytes32(r)
}
hRoots := customtypes.HistoricalRoots(make([][32]byte, len(st.HistoricalRoots)))
for i, r := range st.HistoricalRoots {
hRoots[i] = bytesutil.ToBytes32(r)
}
var mixes customtypes.RandaoMixes
for i, m := range st.RandaoMixes {
mixes[i] = bytesutil.ToBytes32(m)
}
fieldCount := params.BeaconConfig().BeaconStateCapellaFieldCount
b := &BeaconState{
version: version.Deneb,
genesisTime: st.GenesisTime,
genesisValidatorsRoot: bytesutil.ToBytes32(st.GenesisValidatorsRoot),
slot: st.Slot,
fork: st.Fork,
latestBlockHeader: st.LatestBlockHeader,
blockRoots: &bRoots,
stateRoots: &sRoots,
historicalRoots: hRoots,
eth1Data: st.Eth1Data,
eth1DataVotes: st.Eth1DataVotes,
eth1DepositIndex: st.Eth1DepositIndex,
validators: st.Validators,
balances: st.Balances,
randaoMixes: &mixes,
slashings: st.Slashings,
previousEpochParticipation: st.PreviousEpochParticipation,
currentEpochParticipation: st.CurrentEpochParticipation,
justificationBits: st.JustificationBits,
previousJustifiedCheckpoint: st.PreviousJustifiedCheckpoint,
currentJustifiedCheckpoint: st.CurrentJustifiedCheckpoint,
finalizedCheckpoint: st.FinalizedCheckpoint,
inactivityScores: st.InactivityScores,
currentSyncCommittee: st.CurrentSyncCommittee,
nextSyncCommittee: st.NextSyncCommittee,
latestExecutionPayloadHeaderDeneb: st.LatestExecutionPayloadHeader,
nextWithdrawalIndex: st.NextWithdrawalIndex,
nextWithdrawalValidatorIndex: st.NextWithdrawalValidatorIndex,
historicalSummaries: st.HistoricalSummaries,
dirtyFields: make(map[nativetypes.FieldIndex]bool, fieldCount),
dirtyIndices: make(map[nativetypes.FieldIndex][]uint64, fieldCount),
stateFieldLeaves: make(map[nativetypes.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
sharedFieldReferences: make(map[nativetypes.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount),
rebuildTrie: make(map[nativetypes.FieldIndex]bool, fieldCount),
valMapHandler: stateutil.NewValMapHandler(st.Validators),
}
for _, f := range denebFields {
b.dirtyFields[f] = true
b.rebuildTrie[f] = true
b.dirtyIndices[f] = []uint64{}
trie, err := fieldtrie.NewFieldTrie(f, types.BasicArray, nil, 0)
if err != nil {
return nil, err
}
b.stateFieldLeaves[f] = trie
}
// Initialize field reference tracking for shared data.
b.sharedFieldReferences[nativetypes.BlockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.StateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.HistoricalRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.Eth1DataVotes] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.Validators] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.Balances] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.RandaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.Slashings] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.PreviousEpochParticipationBits] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.CurrentEpochParticipationBits] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.InactivityScores] = stateutil.NewRef(1)
b.sharedFieldReferences[nativetypes.LatestExecutionPayloadHeaderDeneb] = stateutil.NewRef(1) // New in Deneb.
b.sharedFieldReferences[nativetypes.HistoricalSummaries] = stateutil.NewRef(1) // New in Capella.
state.StateCount.Inc()
// Finalizer runs when dst is being destroyed in garbage collection.
runtime.SetFinalizer(b, finalizerCleanup)
return b, nil
}
// Copy returns a deep copy of the beacon state.
func (b *BeaconState) Copy() state.BeaconState {
b.lock.RLock()
@@ -491,6 +601,8 @@ func (b *BeaconState) Copy() state.BeaconState {
fieldCount = params.BeaconConfig().BeaconStateBellatrixFieldCount
case version.Capella:
fieldCount = params.BeaconConfig().BeaconStateCapellaFieldCount
case version.Deneb:
fieldCount = params.BeaconConfig().BeaconStateCapellaFieldCount
}
dst := &BeaconState{
@@ -534,6 +646,7 @@ func (b *BeaconState) Copy() state.BeaconState {
nextSyncCommittee: b.nextSyncCommitteeVal(),
latestExecutionPayloadHeader: b.latestExecutionPayloadHeaderVal(),
latestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapellaVal(),
latestExecutionPayloadHeaderDeneb: b.latestExecutionPayloadHeaderDenebVal(),
dirtyFields: make(map[nativetypes.FieldIndex]bool, fieldCount),
dirtyIndices: make(map[nativetypes.FieldIndex][]uint64, fieldCount),
@@ -553,6 +666,8 @@ func (b *BeaconState) Copy() state.BeaconState {
dst.sharedFieldReferences = make(map[nativetypes.FieldIndex]*stateutil.Reference, bellatrixSharedFieldRefCount)
case version.Capella:
dst.sharedFieldReferences = make(map[nativetypes.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount)
case version.Deneb:
dst.sharedFieldReferences = make(map[nativetypes.FieldIndex]*stateutil.Reference, denebSharedFieldRefCount)
}
for field, ref := range b.sharedFieldReferences {
@@ -642,6 +757,8 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
b.dirtyFields = make(map[nativetypes.FieldIndex]bool, params.BeaconConfig().BeaconStateBellatrixFieldCount)
case version.Capella:
b.dirtyFields = make(map[nativetypes.FieldIndex]bool, params.BeaconConfig().BeaconStateCapellaFieldCount)
case version.Deneb:
b.dirtyFields = make(map[nativetypes.FieldIndex]bool, params.BeaconConfig().BeaconStateCapellaFieldCount)
}
return nil
@@ -833,6 +950,8 @@ func (b *BeaconState) rootSelector(ctx context.Context, field nativetypes.FieldI
return b.latestExecutionPayloadHeader.HashTreeRoot()
case nativetypes.LatestExecutionPayloadHeaderCapella:
return b.latestExecutionPayloadHeaderCapella.HashTreeRoot()
case nativetypes.LatestExecutionPayloadHeaderDeneb:
return b.latestExecutionPayloadHeaderDeneb.HashTreeRoot()
case nativetypes.NextWithdrawalIndex:
return ssz.Uint64Root(b.nextWithdrawalIndex), nil
case nativetypes.NextWithdrawalValidatorIndex:

View File

@@ -130,7 +130,7 @@ func (f FieldIndex) RealPosition() int {
return 22
case NextSyncCommittee:
return 23
case LatestExecutionPayloadHeader, LatestExecutionPayloadHeaderCapella:
case LatestExecutionPayloadHeader, LatestExecutionPayloadHeaderCapella, LatestExecutionPayloadHeaderDeneb:
return 24
case NextWithdrawalIndex:
return 25
@@ -193,6 +193,7 @@ const (
NextSyncCommittee
LatestExecutionPayloadHeader
LatestExecutionPayloadHeaderCapella
LatestExecutionPayloadHeaderDeneb
NextWithdrawalIndex
NextWithdrawalValidatorIndex
HistoricalSummaries

View File

@@ -205,7 +205,7 @@ func ReplayProcessSlots(ctx context.Context, state state.BeaconState, slot types
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, "could not process epoch with optimizations")
}
case version.Altair, version.Bellatrix, version.Capella:
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
state, err = altair.ProcessEpoch(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
@@ -243,6 +243,14 @@ func ReplayProcessSlots(ctx context.Context, state state.BeaconState, slot types
return nil, err
}
}
if prysmtime.CanUpgradeToDeneb(state.Slot()) {
state, err = capella.UpgradeToDeneb(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
}
return state, nil

View File

@@ -94,6 +94,8 @@ func (f FieldIndex) String(stateVersion int) string {
return "latestExecutionPayloadHeader"
case LatestExecutionPayloadHeaderCapella:
return "latestExecutionPayloadHeaderCapella"
case LatestExecutionPayloadHeaderDeneb:
return "latestExecutionPayloadHeaderDeneb"
case NextWithdrawalIndex:
return "nextWithdrawalIndex"
case NextWithdrawalValidatorIndex:
@@ -160,6 +162,7 @@ const (
LatestExecutionPayloadHeader
// State fields added in Capella
LatestExecutionPayloadHeaderCapella
LatestExecutionPayloadHeaderDeneb
NextWithdrawalIndex
NextWithdrawalValidatorIndex
)

View File

@@ -19,8 +19,10 @@ go_library(
"pending_blocks_queue.go",
"rate_limiter.go",
"rpc.go",
"rpc_beacon_blocks_and_sidecars_by_root.go",
"rpc_beacon_blocks_by_range.go",
"rpc_beacon_blocks_by_root.go",
"rpc_blobs_sidecars_by_range.go",
"rpc_chunked_response.go",
"rpc_goodbye.go",
"rpc_metadata.go",
@@ -32,6 +34,7 @@ go_library(
"subscriber_beacon_aggregate_proof.go",
"subscriber_beacon_attestation.go",
"subscriber_beacon_blocks.go",
"subscriber_block_and_blobs_sidecar.go",
"subscriber_bls_to_execution_change.go",
"subscriber_handlers.go",
"subscriber_sync_committee_message.go",
@@ -42,6 +45,7 @@ go_library(
"validate_attester_slashing.go",
"validate_beacon_attestation.go",
"validate_beacon_blocks.go",
"validate_beacon_blocks_and_blobs.go",
"validate_bls_to_execution_change.go",
"validate_proposer_slashing.go",
"validate_sync_committee_message.go",
@@ -88,6 +92,7 @@ go_library(
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blobs:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -95,11 +100,13 @@ go_library(
"//container/leaky-bucket:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/equality:go_default_library",
"//monitoring/tracing:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
@@ -109,6 +116,7 @@ go_library(
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//params:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_libp2p_go_libp2p//core:go_default_library",
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
@@ -120,6 +128,8 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_protolambda_go_kzg//bls:go_default_library",
"@com_github_protolambda_go_kzg//eth:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -52,25 +52,33 @@ func readContextFromStream(stream network.Stream, chain blockchain.ForkFetcher)
// retrieve expected context depending on rpc topic schema version.
func rpcContext(stream network.Stream, chain blockchain.ForkFetcher) ([]byte, error) {
_, _, version, err := p2p.TopicDeconstructor(string(stream.Protocol()))
_, message, version, err := p2p.TopicDeconstructor(string(stream.Protocol()))
if err != nil {
return nil, err
}
var provideCtx bool
switch version {
case p2p.SchemaVersionV1:
// Return empty context for a v1 method.
return []byte{}, nil
// and if it doesn't exist then we assume it's a new topic introduced after altair
provideCtx = !p2p.PreAltairV1SchemaMapping[message]
case p2p.SchemaVersionV2:
currFork := chain.CurrentFork()
genRoot := chain.GenesisValidatorsRoot()
digest, err := signing.ComputeForkDigest(currFork.CurrentVersion, genRoot[:])
if err != nil {
return nil, err
}
return digest[:], nil
provideCtx = true
default:
return nil, errors.New("invalid version of %s registered for topic: %s")
}
if !provideCtx {
return []byte{}, nil
}
currFork := chain.CurrentFork()
genRoot := chain.GenesisValidatorsRoot()
digest, err := signing.ComputeForkDigest(currFork.CurrentVersion, genRoot[:])
if err != nil {
return nil, err
}
return digest[:], nil
}
// Minimal interface for a stream with a protocol.

View File

@@ -66,6 +66,9 @@ func (s *Service) registerForUpcomingFork(currEpoch types.Epoch) error {
if nextEpoch == params.BeaconConfig().AltairForkEpoch {
s.registerRPCHandlersAltair()
}
if nextEpoch == params.BeaconConfig().DenebForkEpoch {
s.registerRPCHandlersDeneb()
}
}
return nil
}

View File

@@ -29,6 +29,7 @@ go_library(
"//beacon-chain/sync:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blobs:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/leaky-bucket:go_default_library",

View File

@@ -20,6 +20,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/crypto/rand"
"github.com/prysmaticlabs/prysm/v3/math"
p2ppb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -108,6 +109,7 @@ type fetchRequestResponse struct {
start types.Slot
count uint64
blocks []interfaces.SignedBeaconBlock
blobs []*p2ppb.BlobsSidecar
err error
}
@@ -249,6 +251,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start types.Slot, cou
start: start,
count: count,
blocks: []interfaces.SignedBeaconBlock{},
blobs: []*p2ppb.BlobsSidecar{},
err: nil,
}
@@ -274,6 +277,19 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start types.Slot, cou
}
response.blocks, response.pid, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
var blobStart types.Slot
var blobCount uint64
for i := types.Slot(0); i < types.Slot(count); i++ {
if slots.WithinDataAvailabilityBound(uint64(f.chain.GenesisTime().Unix()), slots.ToEpoch(start+i)) {
blobStart = start + i
blobCount = count - uint64(i)
break
}
}
if slots.WithinDataAvailabilityBound(uint64(f.chain.GenesisTime().Unix()), slots.ToEpoch(blobStart)) {
response.blobs, response.pid, response.err = f.fetchBlobsFromPeer(ctx, blobStart, blobCount, peers)
}
return response
}
@@ -304,6 +320,32 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
return nil, "", errNoPeersAvailable
}
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
func (f *blocksFetcher) fetchBlobsFromPeer(
ctx context.Context,
start types.Slot, count uint64,
peers []peer.ID,
) ([]*p2ppb.BlobsSidecar, peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
defer span.End()
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
req := &p2ppb.BlobsSidecarsByRangeRequest{
StartSlot: start,
Count: count,
}
for i := 0; i < len(peers); i++ {
blobs, err := f.requestBlobs(ctx, req, peers[i])
if err == nil {
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(peers[i])
return blobs, peers[i], err
} else {
log.WithError(err).Debug("Could not request blobs by range")
}
}
return nil, "", errNoPeersAvailable
}
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
func (f *blocksFetcher) requestBlocks(
ctx context.Context,
@@ -334,6 +376,30 @@ func (f *blocksFetcher) requestBlocks(
return prysmsync.SendBeaconBlocksByRangeRequest(ctx, f.chain, f.p2p, pid, req, nil)
}
func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobsSidecarsByRangeRequest, pid peer.ID) ([]*p2ppb.BlobsSidecar, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
l := f.peerLock(pid)
l.Lock()
log.WithFields(logrus.Fields{
"peer": pid,
"start": req.StartSlot,
"count": req.Count,
"capacity": f.rateLimiter.Remaining(pid.String()),
"score": f.p2p.Peers().Scorers().BlockProviderScorer().FormatScorePretty(pid),
}).Debug("Requesting blobs")
if f.rateLimiter.Remaining(pid.String()) < int64(req.Count) {
if err := f.waitForBandwidth(pid, req.Count); err != nil {
l.Unlock()
return nil, err
}
}
f.rateLimiter.Add(pid.String(), int64(req.Count))
l.Unlock()
return prysmsync.SendBlobsSidecarsByRangeRequest(ctx, f.chain, f.p2p, pid, req, nil)
}
// requestBlocksByRoot is a wrapper for handling BeaconBlockByRootsReq requests/streams.
func (f *blocksFetcher) requestBlocksByRoot(
ctx context.Context,
@@ -363,6 +429,34 @@ func (f *blocksFetcher) requestBlocksByRoot(
return prysmsync.SendBeaconBlocksByRootRequest(ctx, f.chain, f.p2p, pid, req, nil)
}
func (f *blocksFetcher) requestBlockAndSidecarByRoot(
ctx context.Context,
req *p2pTypes.BeaconBlockByRootsReq,
pid peer.ID,
) ([]*p2ppb.SignedBeaconBlockAndBlobsSidecar, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
l := f.peerLock(pid)
l.Lock()
log.WithFields(logrus.Fields{
"peer": pid,
"numRoots": len(*req),
"capacity": f.rateLimiter.Remaining(pid.String()),
"score": f.p2p.Peers().Scorers().BlockProviderScorer().FormatScorePretty(pid),
}).Debug("Requesting blocks (by roots)")
if f.rateLimiter.Remaining(pid.String()) < int64(len(*req)) {
if err := f.waitForBandwidth(pid, uint64(len(*req))); err != nil {
l.Unlock()
return nil, err
}
}
f.rateLimiter.Add(pid.String(), int64(len(*req)))
l.Unlock()
return prysmsync.SendBlocksAndSidecarsByRootRequest(ctx, f.chain, f.p2p, pid, req, nil)
}
// waitForBandwidth blocks up until peer's bandwidth is restored.
func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
log.WithField("peer", pid).Debug("Slowing down for rate limit")

View File

@@ -24,6 +24,7 @@ import (
type forkData struct {
peer peer.ID
blocks []interfaces.SignedBeaconBlock
blobs []*p2ppb.BlobsSidecar
}
// nonSkippedSlotAfter checks slots after the given one in an attempt to find a non-empty future slot.

View File

@@ -11,6 +11,7 @@ import (
beaconsync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
)
@@ -90,6 +91,7 @@ type blocksQueue struct {
type blocksQueueFetchedData struct {
pid peer.ID
blocks []interfaces.SignedBeaconBlock
blobs []*eth.BlobsSidecar
}
// newBlocksQueue creates initialized priority queue.
@@ -333,6 +335,7 @@ func (q *blocksQueue) onDataReceivedEvent(ctx context.Context) eventHandlerFn {
}
m.pid = response.pid
m.blocks = response.blocks
m.blobs = response.blobs
return stateDataParsed, nil
}
}
@@ -355,6 +358,7 @@ func (q *blocksQueue) onReadyToSendEvent(ctx context.Context) eventHandlerFn {
data := &blocksQueueFetchedData{
pid: m.pid,
blocks: m.blocks,
blobs: m.blobs,
}
select {
case <-ctx.Done():

View File

@@ -29,6 +29,7 @@ func (q *blocksQueue) resetFromFork(fork *forkData) error {
fsm := q.smm.addStateMachine(firstBlock.Slot())
fsm.pid = fork.peer
fsm.blocks = fork.blocks
fsm.blobs = fork.blobs
fsm.state = stateDataParsed
// The rest of machines are in skipped state.

View File

@@ -9,6 +9,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v3/time"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
@@ -47,6 +48,7 @@ type stateMachine struct {
state stateID
pid peer.ID
blocks []interfaces.SignedBeaconBlock
blobs []*eth.BlobsSidecar
updated time.Time
}
@@ -79,6 +81,7 @@ func (smm *stateMachineManager) addStateMachine(startSlot types.Slot) *stateMach
start: startSlot,
state: stateNew,
blocks: []interfaces.SignedBeaconBlock{},
blobs: []*eth.BlobsSidecar{},
updated: prysmTime.Now(),
}
smm.recalculateMachineAttribs()
@@ -91,6 +94,7 @@ func (smm *stateMachineManager) removeStateMachine(startSlot types.Slot) error {
return fmt.Errorf("state for machine %v is not found", startSlot)
}
smm.machines[startSlot].blocks = nil
smm.machines[startSlot].blobs = nil
delete(smm.machines, startSlot)
smm.recalculateMachineAttribs()
return nil

View File

@@ -10,8 +10,10 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/paulbellamy/ratecounter"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
blobs2 "github.com/prysmaticlabs/prysm/v3/consensus-types/blobs"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
)
@@ -81,7 +83,15 @@ func (s *Service) syncToFinalizedEpoch(ctx context.Context, genesis time.Time) e
}
for data := range queue.fetchedData {
s.processFetchedData(ctx, genesis, s.cfg.Chain.HeadSlot(), data)
blobsMap := make(map[types.Slot]*eth.BlobsSidecar)
for _, blob := range data.blobs {
blobsMap[blob.BeaconBlockSlot] = blob
}
// If blobs are available. Verify blobs and blocks are consistence.
// We can't import a block if there's no associated blob within DA bound.
// The blob has to pass aggregated proof check.
s.processFetchedData(ctx, genesis, s.cfg.Chain.HeadSlot(), data, blobsMap)
}
log.WithFields(logrus.Fields{
@@ -109,7 +119,12 @@ func (s *Service) syncToNonFinalizedEpoch(ctx context.Context, genesis time.Time
return err
}
for data := range queue.fetchedData {
s.processFetchedDataRegSync(ctx, genesis, s.cfg.Chain.HeadSlot(), data)
blobsMap := make(map[types.Slot]*eth.BlobsSidecar)
for _, blob := range data.blobs {
blobsMap[blob.BeaconBlockSlot] = blob
}
s.processFetchedDataRegSync(ctx, genesis, s.cfg.Chain.HeadSlot(), data, blobsMap)
}
log.WithFields(logrus.Fields{
"syncedSlot": s.cfg.Chain.HeadSlot(),
@@ -124,24 +139,45 @@ func (s *Service) syncToNonFinalizedEpoch(ctx context.Context, genesis time.Time
// processFetchedData processes data received from queue.
func (s *Service) processFetchedData(
ctx context.Context, genesis time.Time, startSlot types.Slot, data *blocksQueueFetchedData) {
ctx context.Context, genesis time.Time, startSlot types.Slot, data *blocksQueueFetchedData, blobs map[types.Slot]*eth.BlobsSidecar) {
defer s.updatePeerScorerStats(data.pid, startSlot)
// Use Batch Block Verify to process and verify batches directly.
if err := s.processBatchedBlocks(ctx, genesis, data.blocks, s.cfg.Chain.ReceiveBlockBatch); err != nil {
if err := s.processBatchedBlocks(ctx, genesis, data.blocks, s.cfg.Chain.ReceiveBlockBatch, blobs); err != nil {
log.WithError(err).Warn("Skip processing batched blocks")
}
}
// processFetchedData processes data received from queue.
func (s *Service) processFetchedDataRegSync(
ctx context.Context, genesis time.Time, startSlot types.Slot, data *blocksQueueFetchedData) {
ctx context.Context, genesis time.Time, startSlot types.Slot, data *blocksQueueFetchedData, blobs map[types.Slot]*eth.BlobsSidecar) {
defer s.updatePeerScorerStats(data.pid, startSlot)
blockReceiver := s.cfg.Chain.ReceiveBlock
invalidBlocks := 0
blksWithoutParentCount := 0
for _, blk := range data.blocks {
blkSlot := blk.Block().Slot()
if slots.WithinDataAvailabilityBound(uint64(s.cfg.Chain.GenesisTime().Unix()), slots.ToEpoch(blkSlot)) {
kzgs, err := blk.Block().Body().BlobKzgCommitments()
if err != nil {
log.WithError(err).Error("Failed to get kzg commitments")
continue
}
if len(kzgs) > 0 {
blob, ok := blobs[blkSlot]
if !ok {
log.Errorf("No blob found for block %d", blkSlot)
}
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
log.WithError(err).Error("Failed to get block root")
}
if err := blobs2.ValidateBlobsSidecar(blkSlot, blkRoot, kzgs, blob); err != nil {
log.WithError(err).Error("Failed to validate blobs sidecar")
}
}
}
if err := s.processBlock(ctx, genesis, blk, blockReceiver); err != nil {
switch {
case errors.Is(err, errBlockAlreadyProcessed):
@@ -245,7 +281,7 @@ func (s *Service) processBlock(
}
func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
blks []interfaces.SignedBeaconBlock, bFunc batchBlockReceiverFn) error {
blks []interfaces.SignedBeaconBlock, bFunc batchBlockReceiverFn, blobs map[types.Slot]*eth.BlobsSidecar) error {
if len(blks) == 0 {
return errors.New("0 blocks provided into method")
}
@@ -284,6 +320,25 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
return err
}
blockRoots[i] = blkRoot
blkSlot := b.Block().Slot()
if slots.WithinDataAvailabilityBound(uint64(s.cfg.Chain.GenesisTime().Unix()), slots.ToEpoch(blkSlot)) {
kzgs, err := b.Block().Body().BlobKzgCommitments()
if err != nil {
return fmt.Errorf("failed to get blob kzg commitments: %w", err)
}
if len(kzgs) > 0 {
blob, ok := blobs[b.Block().Slot()]
if !ok {
return fmt.Errorf("missing sidecar blob for slot %d", b.Block().Slot())
}
if err := blobs2.ValidateBlobsSidecar(blkSlot, blkRoot, kzgs, blob); err != nil {
return err
}
if err := s.cfg.DB.SaveBlobsSidecar(ctx, blob); err != nil {
return err
}
}
}
}
return bFunc(ctx, blks, blockRoots)
}

View File

@@ -35,7 +35,7 @@ type blockchainService interface {
// Config to set up the initial sync service.
type Config struct {
P2P p2p.P2P
DB db.ReadOnlyDatabase
DB db.NoHeadAccessDatabase
Chain blockchainService
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier

View File

@@ -3,6 +3,7 @@ package sync
import (
"context"
"encoding/hex"
"fmt"
"sort"
"sync"
"time"
@@ -12,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain"
p2ptypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v3/config/params"
blobs2 "github.com/prysmaticlabs/prysm/v3/consensus-types/blobs"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
@@ -19,6 +21,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz/equality"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
"github.com/trailofbits/go-mutexasserts"
@@ -84,10 +87,18 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
span.End()
continue
}
var blobs []*ethpb.BlobsSidecar
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
blobs = s.pendingBlobsInCache(slot)
if len(bs) != len(blobs) {
log.Errorf("Processing pending blocks and blobs are not the same length: %d != %d", len(bs), len(blobs))
continue
}
}
s.pendingQueueLock.RUnlock()
// Loop through the pending queue and mark the potential parent blocks as seen.
for _, b := range bs {
for i, b := range bs {
if b == nil || b.IsNil() || b.Block().IsNil() {
span.End()
continue
@@ -175,6 +186,30 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
span.End()
continue
}
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
sc := blobs[i]
_, err := s.validateBlobsSidecar(sc, false)
if err != nil {
log.WithError(err).WithField("slot", b.Block().Slot()).Error("Could not validate blobs sidecar")
s.setBadBlock(ctx, blkRoot)
continue
}
kzgs, err := b.Block().Body().BlobKzgCommitments()
if err != nil {
log.WithError(err).WithField("slot", b.Block().Slot()).Error("Could not get blob kzg commitments")
s.setBadBlock(ctx, blkRoot)
continue
}
if err := blobs2.ValidateBlobsSidecar(b.Block().Slot(), blkRoot, kzgs, sc); err != nil {
log.WithError(err).WithField("slot", b.Block().Slot()).Error("Could not validate blobs sidecar from geth")
s.setBadBlock(ctx, blkRoot)
continue
}
if err := s.cfg.beaconDB.SaveBlobsSidecar(ctx, sc); err != nil {
log.WithError(err).WithField("slot", b.Block().Slot()).Error("Could not save blobs sidecar")
continue
}
}
s.setSeenBlockIndexSlot(b.Block().Slot(), b.Block().ProposerIndex())
@@ -257,10 +292,21 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
if len(roots) > int(params.BeaconNetworkConfig().MaxRequestBlocks) {
req = roots[:params.BeaconNetworkConfig().MaxRequestBlocks]
}
if err := s.sendRecentBeaconBlocksRequest(ctx, &req, pid); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Debug("Could not send recent block request")
if slots.ToEpoch(s.cfg.chain.CurrentSlot()) >= params.BeaconConfig().DenebForkEpoch {
if err := s.sendBlocksAndSidecarsRequest(ctx, &req, pid); err != nil {
log.WithError(err).Error("Could not send recent block and blob request, falling back to block only")
if err := s.sendRecentBeaconBlocksRequest(ctx, &req, pid); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Could not send recent block request after Deneb fork epoch")
}
}
} else {
if err := s.sendRecentBeaconBlocksRequest(ctx, &req, pid); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Debug("Could not send recent block request")
}
}
newRoots := make([][32]byte, 0, len(roots))
s.pendingQueueLock.RLock()
for _, rt := range roots {
@@ -286,15 +332,26 @@ func (s *Service) sortedPendingSlots() []types.Slot {
items := s.slotToPendingBlocks.Items()
ss := make([]types.Slot, 0, len(items))
ss1 := make([]types.Slot, 0, len(items))
for k := range items {
slot := cacheKeyToSlot(k)
ss = append(ss, slot)
ss1 = append(ss1, slot)
}
sort.Slice(ss, func(i, j int) bool {
return ss[i] < ss[j]
sort.Slice(ss1, func(i, j int) bool {
return ss1[i] < ss1[j]
})
return ss
items = s.slotToPendingBlobs.Items()
ss2 := make([]types.Slot, 0, len(items))
for k := range items {
slot := cacheKeyToSlot(k)
ss2 = append(ss2, slot)
}
sort.Slice(ss2, func(i, j int) bool {
return ss2[i] < ss2[j]
})
return ss1
}
// validatePendingSlots validates the pending blocks
@@ -310,10 +367,21 @@ func (s *Service) validatePendingSlots() error {
if s.slotToPendingBlocks == nil {
return errors.New("slotToPendingBlocks cache can't be nil")
}
if s.slotToPendingBlobs == nil {
return errors.New("slotToPendingBlobs cache can't be nil")
}
items := s.slotToPendingBlocks.Items()
for k := range items {
slot := cacheKeyToSlot(k)
blks := s.pendingBlocksInCache(slot)
blobs := s.pendingBlobsInCache(slot)
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
if len(blks) != len(blobs) {
return fmt.Errorf("blks and blobs are different length in pending queue: %d, %d", len(blks), len(blobs))
}
}
for _, b := range blks {
epoch := slots.ToEpoch(slot)
// remove all descendant blocks of old blocks
@@ -348,6 +416,7 @@ func (s *Service) clearPendingSlots() {
s.pendingQueueLock.Lock()
defer s.pendingQueueLock.Unlock()
s.slotToPendingBlocks.Flush()
s.slotToPendingBlobs.Flush()
s.seenPendingBlocks = make(map[[32]byte]bool)
}
@@ -361,13 +430,21 @@ func (s *Service) deleteBlockFromPendingQueue(slot types.Slot, b interfaces.Sign
return nil
}
blobs := s.pendingBlobsInCache(slot)
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
if len(blobs) != len(blks) {
return fmt.Errorf("blobs and blks are different length in pending queue for deletion: %d, %d", len(blobs), len(blks))
}
}
// Defensive check to ignore nil blocks
if err := blocks.BeaconBlockIsNil(b); err != nil {
return err
}
newBlks := make([]interfaces.SignedBeaconBlock, 0, len(blks))
for _, blk := range blks {
newBlobs := make([]*ethpb.BlobsSidecar, 0, len(blobs))
for i, blk := range blks {
blkPb, err := blk.Proto()
if err != nil {
return err
@@ -380,9 +457,11 @@ func (s *Service) deleteBlockFromPendingQueue(slot types.Slot, b interfaces.Sign
continue
}
newBlks = append(newBlks, blk)
newBlobs = append(newBlobs, blobs[i])
}
if len(newBlks) == 0 {
s.slotToPendingBlocks.Delete(slotToCacheKey(slot))
s.slotToPendingBlobs.Delete(slotToCacheKey(slot))
delete(s.seenPendingBlocks, r)
return nil
}
@@ -392,13 +471,17 @@ func (s *Service) deleteBlockFromPendingQueue(slot types.Slot, b interfaces.Sign
if err := s.slotToPendingBlocks.Replace(slotToCacheKey(slot), newBlks, d); err != nil {
return err
}
if err := s.slotToPendingBlobs.Replace(slotToCacheKey(slot), newBlobs, d); err != nil {
return err
}
delete(s.seenPendingBlocks, r)
return nil
}
// Insert block to the list in the pending queue using the slot as key.
// Note: this helper is not thread safe.
func (s *Service) insertBlockToPendingQueue(_ types.Slot, b interfaces.SignedBeaconBlock, r [32]byte) error {
func (s *Service) insertBlkAndBlobToQueue(_ types.Slot, b interfaces.SignedBeaconBlock, r [32]byte, blob *ethpb.BlobsSidecar) error {
mutexasserts.AssertRWMutexLocked(&s.pendingQueueLock)
if s.seenPendingBlocks[r] {
@@ -408,6 +491,9 @@ func (s *Service) insertBlockToPendingQueue(_ types.Slot, b interfaces.SignedBea
if err := s.addPendingBlockToCache(b); err != nil {
return err
}
if err := s.addPeningBlobsToCache(blob); err != nil {
return err
}
s.seenPendingBlocks[r] = true
return nil
@@ -427,6 +513,20 @@ func (s *Service) pendingBlocksInCache(slot types.Slot) []interfaces.SignedBeaco
return blks
}
// This returns blobs given input key from slotToPendingBlobs.
func (s *Service) pendingBlobsInCache(slot types.Slot) []*ethpb.BlobsSidecar {
k := slotToCacheKey(slot)
value, ok := s.slotToPendingBlobs.Get(k)
if !ok {
return []*ethpb.BlobsSidecar{}
}
blobs, ok := value.([]*ethpb.BlobsSidecar)
if !ok {
return []*ethpb.BlobsSidecar{}
}
return blobs
}
// This adds input signed beacon block to slotToPendingBlocks cache.
func (s *Service) addPendingBlockToCache(b interfaces.SignedBeaconBlock) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
@@ -445,6 +545,23 @@ func (s *Service) addPendingBlockToCache(b interfaces.SignedBeaconBlock) error {
return nil
}
// This adds input signed beacon block to slotToPendingBlobs cache.
func (s *Service) addPeningBlobsToCache(b *ethpb.BlobsSidecar) error {
if b == nil {
return nil
}
blobs := s.pendingBlobsInCache(b.BeaconBlockSlot)
if len(blobs) >= maxBlocksPerSlot {
return nil
}
blobs = append(blobs, b)
k := slotToCacheKey(b.BeaconBlockSlot)
s.slotToPendingBlobs.Set(k, blobs, pendingBlockExpTime)
return nil
}
// Returns true if the genesis time has been set in chain service.
// Without the genesis time, the chain does not start.
func (s *Service) isGenesisTimeSet() bool {

View File

@@ -81,7 +81,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
// Add b2 to the cache
wsb, err := blocks.NewSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b2.Block.Slot, wsb, b2Root))
require.NoError(t, r.processPendingBlocks(context.Background()))
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
@@ -90,7 +90,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
// Add b1 to the cache
wsb, err = blocks.NewSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b1.Block.Slot, wsb, b1Root))
util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b1)
nBlock := util.NewBeaconBlock()
@@ -101,7 +101,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
// Insert bad b1 in the cache to verify the good one doesn't get replaced.
wsb, err = blocks.NewSignedBeaconBlock(nBlock)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wsb, nRoot))
require.NoError(t, r.insertBlkAndBlobToQueue(nBlock.Block.Slot, wsb, nRoot))
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
@@ -153,7 +153,7 @@ func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) {
// Add b2 to the cache
wsb, err := blocks.NewSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b2.Block.Slot, wsb, b2Root))
require.NoError(t, r.processPendingBlocks(context.Background()))
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
@@ -162,7 +162,7 @@ func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) {
// Add b1 to the cache
wsb, err = blocks.NewSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b1.Block.Slot, wsb, b1Root))
util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b1)
nBlock := util.NewBeaconBlock()
@@ -173,7 +173,7 @@ func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) {
// Insert bad b1 in the cache to verify the good one doesn't get replaced.
wsb, err = blocks.NewSignedBeaconBlock(nBlock)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wsb, nRoot))
require.NoError(t, r.insertBlkAndBlobToQueue(nBlock.Block.Slot, wsb, nRoot))
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
@@ -225,7 +225,7 @@ func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T)
// Add b2 to the cache
wsb, err := blocks.NewSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b2.Block.Slot, wsb, b2Root))
require.NoError(t, r.processPendingBlocks(context.Background()))
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
@@ -234,7 +234,7 @@ func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T)
// Add b1 to the cache
wsb, err = blocks.NewSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b1.Block.Slot, wsb, b1Root))
util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b1)
nBlock := util.NewBeaconBlock()
@@ -245,7 +245,7 @@ func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T)
// Insert bad b1 in the cache to verify the good one doesn't get replaced.
wsb, err = blocks.NewSignedBeaconBlock(nBlock)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wsb, nRoot))
require.NoError(t, r.insertBlkAndBlobToQueue(nBlock.Block.Slot, wsb, nRoot))
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
@@ -287,24 +287,24 @@ func TestRegularSync_InsertDuplicateBlocks(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(b0)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b0.Block.Slot, wsb, b0r))
require.NoError(t, r.insertBlkAndBlobToQueue(b0.Block.Slot, wsb, b0r))
require.Equal(t, 1, len(r.pendingBlocksInCache(b0.Block.Slot)), "Block was not added to map")
wsb, err = blocks.NewSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1r))
require.NoError(t, r.insertBlkAndBlobToQueue(b1.Block.Slot, wsb, b1r))
require.Equal(t, 1, len(r.pendingBlocksInCache(b1.Block.Slot)), "Block was not added to map")
// Add duplicate block which should not be saved.
wsb, err = blocks.NewSignedBeaconBlock(b0)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b0.Block.Slot, wsb, b0r))
require.NoError(t, r.insertBlkAndBlobToQueue(b0.Block.Slot, wsb, b0r))
require.Equal(t, 1, len(r.pendingBlocksInCache(b0.Block.Slot)), "Block was added to map")
// Add duplicate block which should not be saved.
wsb, err = blocks.NewSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1r))
require.NoError(t, r.insertBlkAndBlobToQueue(b1.Block.Slot, wsb, b1r))
require.Equal(t, 1, len(r.pendingBlocksInCache(b1.Block.Slot)), "Block was added to map")
}
@@ -344,7 +344,7 @@ func TestRegularSyncBeaconBlockSubscriber_DoNotReprocessBlock(t *testing.T) {
// Add b3 to the cache
wsb, err := blocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, wsb, b3Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b3.Block.Slot, wsb, b3Root))
require.NoError(t, r.processPendingBlocks(context.Background()))
assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
@@ -436,10 +436,10 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
wsb, err := blocks.NewSignedBeaconBlock(b4)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b4.Block.Slot, wsb, b4Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b4.Block.Slot, wsb, b4Root))
wsb, err = blocks.NewSignedBeaconBlock(b5)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b5.Block.Slot, wsb, b5Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b5.Block.Slot, wsb, b5Root))
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
@@ -450,7 +450,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
// Add b3 to the cache
wsb, err = blocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, wsb, b3Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b3.Block.Slot, wsb, b3Root))
util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b3)
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
@@ -462,7 +462,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
// Add b2 to the cache
wsb, err = blocks.NewSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b2.Block.Slot, wsb, b2Root))
util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b2)
@@ -535,16 +535,16 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b2.Block.Slot, wsb, b2Root))
wsb, err = blocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, wsb, b3Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b3.Block.Slot, wsb, b3Root))
wsb, err = blocks.NewSignedBeaconBlock(b4)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b4.Block.Slot, wsb, b4Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b4.Block.Slot, wsb, b4Root))
wsb, err = blocks.NewSignedBeaconBlock(b5)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b5.Block.Slot, wsb, b5Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b5.Block.Slot, wsb, b5Root))
require.NoError(t, r.processPendingBlocks(context.Background()))
assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
@@ -560,16 +560,16 @@ func TestService_sortedPendingSlots(t *testing.T) {
var lastSlot types.Slot = math.MaxUint64
wsb, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: lastSlot}}))
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(lastSlot, wsb, [32]byte{1}))
require.NoError(t, r.insertBlkAndBlobToQueue(lastSlot, wsb, [32]byte{1}))
wsb, err = blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: lastSlot - 3}}))
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(lastSlot-3, wsb, [32]byte{2}))
require.NoError(t, r.insertBlkAndBlobToQueue(lastSlot-3, wsb, [32]byte{2}))
wsb, err = blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: lastSlot - 5}}))
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(lastSlot-5, wsb, [32]byte{3}))
require.NoError(t, r.insertBlkAndBlobToQueue(lastSlot-5, wsb, [32]byte{3}))
wsb, err = blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: lastSlot - 2}}))
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(lastSlot-2, wsb, [32]byte{4}))
require.NoError(t, r.insertBlkAndBlobToQueue(lastSlot-2, wsb, [32]byte{4}))
want := []types.Slot{lastSlot - 5, lastSlot - 3, lastSlot - 2, lastSlot}
assert.DeepEqual(t, want, r.sortedPendingSlots(), "Unexpected pending slots list")
@@ -680,19 +680,19 @@ func TestService_AddPendingBlockToQueueOverMax(t *testing.T) {
b2.Block.StateRoot = []byte{'b'}
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(0, wsb, [32]byte{}))
require.NoError(t, r.insertBlkAndBlobToQueue(0, wsb, [32]byte{}))
wsb, err = blocks.NewSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(0, wsb, [32]byte{1}))
require.NoError(t, r.insertBlkAndBlobToQueue(0, wsb, [32]byte{1}))
wsb, err = blocks.NewSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(0, wsb, [32]byte{2}))
require.NoError(t, r.insertBlkAndBlobToQueue(0, wsb, [32]byte{2}))
b3 := ethpb.CopySignedBeaconBlock(b)
b3.Block.StateRoot = []byte{'c'}
wsb, err = blocks.NewSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(0, wsb, [32]byte{3}))
require.NoError(t, r.insertBlkAndBlobToQueue(0, wsb, [32]byte{3}))
require.Equal(t, maxBlocksPerSlot, len(r.pendingBlocksInCache(0)))
}
@@ -758,15 +758,15 @@ func TestService_ProcessPendingBlockOnCorrectSlot(t *testing.T) {
// Add block1 for slot1
wsb, err := blocks.NewSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b1.Block.Slot, wsb, b1Root))
// Add block2 for slot2
wsb, err = blocks.NewSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b2.Block.Slot, wsb, b2Root))
// Add block3 for slot3
wsb, err = blocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, wsb, b3Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b3.Block.Slot, wsb, b3Root))
// processPendingBlocks should process only blocks of the current slot. i.e. slot 1.
// Then check if the other two blocks are still in the pendingQueue.
@@ -828,7 +828,7 @@ func TestService_ProcessBadPendingBlocks(t *testing.T) {
assert.NoError(t, err)
// Add block1 for slot 55
require.NoError(t, r.insertBlockToPendingQueue(b.Block.Slot, bA, b1Root))
require.NoError(t, r.insertBlkAndBlobToQueue(b.Block.Slot, bA, b1Root))
bB, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
assert.NoError(t, err)
// remove with a different block from the same slot.

View File

@@ -20,11 +20,17 @@ const defaultBurstLimit = 5
const leakyBucketPeriod = 1 * time.Second
// Only allow in 2 batches per minute.
const blockBucketPeriod = 30 * time.Second
const blockBucketPeriod = 5 * time.Second
// Dummy topic to validate all incoming rpc requests.
const rpcLimiterTopic = "rpc-limiter-topic"
const (
// TODO: make this configurable
blobsTransferByteRate = 1 << 18
blobsTransferThresholdByteRate = 1 << 20
)
type limiter struct {
limiterMap map[string]*leakybucket.Collector
p2p p2p.P2P
@@ -59,13 +65,17 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
// Collector for V2
blockCollectorV2 := leakybucket.NewCollector(allowedBlocksPerSecond, allowedBlocksBurst, blockBucketPeriod, false /* deleteEmptyBuckets */)
blobCollector := leakybucket.NewCollector(blobsTransferByteRate, blobsTransferThresholdByteRate, blockBucketPeriod, false /* deleteEmptyBuckets */)
// BlocksByRoots requests
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV1)] = blockCollector
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV2)] = blockCollectorV2
topicMap[addEncoding(p2p.RPCBeaconBlockAndBlobsSidecarByRootTopicV1)] = blobCollector
// BlockByRange requests
topicMap[addEncoding(p2p.RPCBlocksByRangeTopicV1)] = blockCollector
topicMap[addEncoding(p2p.RPCBlocksByRangeTopicV2)] = blockCollectorV2
topicMap[addEncoding(p2p.RPCBlobsSidecarsByRangeTopicV1)] = blobCollector
// General topic for all rpc requests.
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)

View File

@@ -49,6 +49,9 @@ func (s *Service) registerRPCHandlers() {
s.pingHandler,
)
s.registerRPCHandlersAltair()
if currEpoch >= params.BeaconConfig().DenebForkEpoch {
s.registerRPCHandlersDeneb()
}
return
}
s.registerRPC(
@@ -83,6 +86,7 @@ func (s *Service) registerRPCHandlersAltair() {
p2p.RPCBlocksByRangeTopicV2,
s.beaconBlocksByRangeRPCHandler,
)
// TODO(Deneb): Unregister this post Deneb fork epoch.
s.registerRPC(
p2p.RPCBlocksByRootTopicV2,
s.beaconBlocksRootRPCHandler,
@@ -93,6 +97,17 @@ func (s *Service) registerRPCHandlersAltair() {
)
}
func (s *Service) registerRPCHandlersDeneb() {
s.registerRPC(
p2p.RPCBlobsSidecarsByRangeTopicV1,
s.blobsSidecarsByRangeRPCHandler,
)
s.registerRPC(
p2p.RPCBeaconBlockAndBlobsSidecarByRootTopicV1,
s.beaconBlockAndBlobsSidecarByRootRPCHandler,
)
}
// Remove all v1 Stream handlers that are no longer supported
// from altair onwards.
func (s *Service) unregisterPhase0Handlers() {

View File

@@ -0,0 +1,102 @@
package sync
import (
"context"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
// beaconBlockAndBlobsSidecarByRootRPCHandler looks up the request beacon block and blobs from the database from a given root
func (s *Service) beaconBlockAndBlobsSidecarByRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
ctx, span := trace.StartSpan(ctx, "sync.beaconBlockAndBlobsSidecarByRootRPCHandler")
defer span.End()
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
defer cancel()
SetRPCStreamDeadlines(stream)
log := log.WithField("handler", "beacon_block_and_blobs_sidecar_by_root")
rawMsg, ok := msg.(*types.BeaconBlockByRootsReq)
if !ok {
return errors.New("message is not type BeaconBlockByRootsReq")
}
blockRoots := *rawMsg
if err := s.rateLimiter.validateRequest(stream, uint64(len(blockRoots))); err != nil {
return err
}
if len(blockRoots) == 0 {
// Add to rate limiter in the event no
// roots are requested.
s.rateLimiter.add(stream, 1)
s.writeErrorResponseToStream(responseCodeInvalidRequest, "no block roots provided in request", stream)
return errors.New("no block roots provided")
}
if uint64(len(blockRoots)) > params.BeaconNetworkConfig().MaxRequestBlocks {
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
s.writeErrorResponseToStream(responseCodeInvalidRequest, "requested more than the max block limit", stream)
return errors.New("requested more than the max block limit")
}
s.rateLimiter.add(stream, int64(len(blockRoots)))
for _, root := range blockRoots {
blk, err := s.cfg.beaconDB.Block(ctx, root)
if err != nil {
log.WithError(err).Debug("Could not fetch block")
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
return err
}
sidecar, err := s.cfg.beaconDB.BlobsSidecar(ctx, root)
if err != nil {
log.WithError(err).Debug("Could not fetch sidecar")
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream) // TODO(Deneb): Return unavailable?
return err
}
// TODO(Deneb): Reconstruct blind block
pb, err := blk.PbDenebBlock()
if err != nil {
log.WithError(err).Debug("Could not fetch block")
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
return err
}
if err := WriteBlockAndBlobsSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), &ethpb.SignedBeaconBlockAndBlobsSidecar{BeaconBlock: pb, BlobsSidecar: sidecar}); err != nil {
log.WithError(err).Debug("Could not send block and sidecar")
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
return err
}
}
closeStream(stream, log)
return nil
}
func (s *Service) sendBlocksAndSidecarsRequest(ctx context.Context, blockRoots *types.BeaconBlockByRootsReq, id peer.ID) error {
ctx, cancel := context.WithTimeout(ctx, respTimeout)
defer cancel()
_, err := SendBlocksAndSidecarsByRootRequest(ctx, s.cfg.chain, s.cfg.p2p, id, blockRoots, func(blkAndSidecar *ethpb.SignedBeaconBlockAndBlobsSidecar) error {
blk, err := blocks.NewSignedBeaconBlock(blkAndSidecar.BeaconBlock)
if err != nil {
return err
}
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
return err
}
s.pendingQueueLock.Lock()
defer s.pendingQueueLock.Unlock()
if err := s.insertBlkAndBlobToQueue(blk.Block().Slot(), blk, blkRoot, blkAndSidecar.BlobsSidecar); err != nil {
return err
}
return nil
})
return err
}

View File

@@ -182,6 +182,8 @@ func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlo
return err
}
copy(blks[blindedIndex:], reconstructedBlks)
// TODO(Deneb): Handle blind construction for Deneb
}
for _, b := range blks {

View File

@@ -26,7 +26,7 @@ func (s *Service) sendRecentBeaconBlocksRequest(ctx context.Context, blockRoots
}
s.pendingQueueLock.Lock()
defer s.pendingQueueLock.Unlock()
if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blkRoot); err != nil {
if err := s.insertBlkAndBlobToQueue(blk.Block().Slot(), blk, blkRoot, nil); err != nil {
return err
}
return nil

View File

@@ -0,0 +1,104 @@
package sync
import (
"context"
"time"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/pkg/errors"
p2ptypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
pb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
// We assume a cost of 512kB per sidecar responded to a range request.
const avgSidecarBlobsTransferBytes = 2 << 18
type BlobsSidecarProcessor func(sidecar *pb.BlobsSidecar) error
// blobsSidecarsByRangeRPCHandler looks up the request blobs from the database from a given start slot index
func (s *Service) blobsSidecarsByRangeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
ctx, span := trace.StartSpan(ctx, "sync.BlobsSidecarsByRangeHandler")
defer span.End()
ctx, cancel := context.WithTimeout(ctx, respTimeout)
defer cancel()
SetRPCStreamDeadlines(stream)
r, ok := msg.(*pb.BlobsSidecarsByRangeRequest)
if !ok {
return errors.New("message is not type *pb.BlobsSidecarsByRangeRequest")
}
startSlot := r.StartSlot
count := r.Count
endSlot := startSlot.Add(count)
var numBlobs uint64
maxRequestBlobsSidecars := params.BeaconNetworkConfig().MaxRequestBlobsSidecars
for slot := startSlot; slot < endSlot && numBlobs < maxRequestBlobsSidecars; slot = slot.Add(1) {
if err := s.rateLimiter.validateRequest(stream, uint64(avgSidecarBlobsTransferBytes)); err != nil {
return err
}
sidecars, err := s.cfg.beaconDB.BlobsSidecarsBySlot(ctx, slot)
if err != nil {
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, err)
return err
}
if len(sidecars) == 0 {
continue
}
var outLen int
for _, sidecar := range sidecars {
outLen += estimateBlobsSidecarCost(sidecar)
SetStreamWriteDeadline(stream, defaultWriteDuration)
if chunkErr := WriteBlobsSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sidecar); chunkErr != nil {
log.WithError(chunkErr).Debug("Could not send a chunked response")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, chunkErr)
return chunkErr
}
}
numBlobs++
s.rateLimiter.add(stream, int64(outLen))
// Short-circuit immediately once we've sent the last blob.
if slot.Add(1) >= endSlot {
break
}
key := stream.Conn().RemotePeer().String()
sidecarLimiter, err := s.rateLimiter.topicCollector(string(stream.Protocol()))
if err != nil {
return err
}
// Throttling - wait until we have enough tokens to send the next blobs
if sidecarLimiter.Remaining(key) < avgSidecarBlobsTransferBytes {
timer := time.NewTimer(sidecarLimiter.TillEmpty(key))
select {
case <-ctx.Done():
timer.Stop()
return ctx.Err()
case <-timer.C:
timer.Stop()
}
}
}
closeStream(stream, log)
return nil
}
func estimateBlobsSidecarCost(sidecar *pb.BlobsSidecar) int {
// This represents the fixed cost (in bytes) of the beacon_block_root and beacon_block_slot fields in the sidecar
const overheadCost = 32 + 8
cost := overheadCost
for _, blob := range sidecar.Blobs {
cost += len(blob.Data)
}
return cost
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
)
@@ -53,6 +54,20 @@ func WriteBlockChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher
return err
}
obtainedCtx = digest[:]
case version.Capella:
valRoot := chain.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, valRoot[:])
if err != nil {
return err
}
obtainedCtx = digest[:]
case version.Deneb:
valRoot := chain.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, valRoot[:])
if err != nil {
return err
}
obtainedCtx = digest[:]
}
if err := writeContextToStream(obtainedCtx, stream, chain); err != nil {
@@ -73,6 +88,105 @@ func ReadChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2
return readResponseChunk(stream, chain, p2p)
}
// WriteBlobsSidecarChunk writes blobs chunk object to stream.
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
func WriteBlobsSidecarChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, encoding encoder.NetworkEncoding, blobs *ethpb.BlobsSidecar) error {
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
return err
}
valRoot := chain.GenesisValidatorsRoot()
ctxBytes, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, valRoot[:])
if err != nil {
return err
}
if err := writeContextToStream(ctxBytes[:], stream, chain); err != nil {
return err
}
_, err = encoding.EncodeWithMaxLength(stream, blobs)
return err
}
func WriteBlockAndBlobsSidecarChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, encoding encoder.NetworkEncoding, b *ethpb.SignedBeaconBlockAndBlobsSidecar) error {
SetStreamWriteDeadline(stream, defaultWriteDuration)
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
return err
}
valRoot := chain.GenesisValidatorsRoot()
ctxBytes, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, valRoot[:])
if err != nil {
return err
}
if err := writeContextToStream(ctxBytes[:], stream, chain); err != nil {
return err
}
_, err = encoding.EncodeWithMaxLength(stream, b)
return err
}
func ReadChunkedBlockAndBlobsSidecar(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, p2p p2p.P2P, isFirstChunk bool) (*ethpb.SignedBeaconBlockAndBlobsSidecar, error) {
var (
code uint8
errMsg string
err error
)
if isFirstChunk {
code, errMsg, err = ReadStatusCode(stream, p2p.Encoding())
} else {
SetStreamReadDeadline(stream, respTimeout)
code, errMsg, err = readStatusCodeNoDeadline(stream, p2p.Encoding())
}
if err != nil {
return nil, err
}
if code != 0 {
return nil, errors.New(errMsg)
}
// No-op for now with the rpc context.
rpcCtx, err := readContextFromStream(stream, chain)
if err != nil {
return nil, err
}
b, err := extractBeaconBlockAndBlobsSidecarDataType(rpcCtx, chain)
if err != nil {
return nil, err
}
err = p2p.Encoding().DecodeWithMaxLength(stream, b)
return b, err
}
func ReadChunkedBlobsSidecar(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider, isFirstChunk bool) (*ethpb.BlobsSidecar, error) {
var (
code uint8
errMsg string
err error
)
if isFirstChunk {
code, errMsg, err = ReadStatusCode(stream, p2p.Encoding())
} else {
SetStreamReadDeadline(stream, respTimeout)
code, errMsg, err = readStatusCodeNoDeadline(stream, p2p.Encoding())
}
if err != nil {
return nil, err
}
if code != 0 {
return nil, errors.New(errMsg)
}
// No-op for now with the rpc context.
rpcCtx, err := readContextFromStream(stream, chain)
if err != nil {
return nil, err
}
sidecar, err := extractBlobsSidecarDataType(rpcCtx, chain)
if err != nil {
return nil, err
}
err = p2p.Encoding().DecodeWithMaxLength(stream, sidecar)
return sidecar, err
}
// readFirstChunkedBlock reads the first chunked block and applies the appropriate deadlines to
// it.
func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider) (interfaces.SignedBeaconBlock, error) {
@@ -142,3 +256,33 @@ func extractBlockDataType(digest []byte, chain blockchain.ForkFetcher) (interfac
}
return nil, errors.New("no valid digest matched")
}
func extractBeaconBlockAndBlobsSidecarDataType(digest []byte, chain blockchain.ForkFetcher) (*ethpb.SignedBeaconBlockAndBlobsSidecar, error) {
if len(digest) != forkDigestLength {
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
}
vRoot := chain.GenesisValidatorsRoot()
rDigest, err := signing.ComputeForkDigest(params.BeaconConfig().DenebForkVersion, vRoot[:])
if err != nil {
return nil, err
}
if rDigest != bytesutil.ToBytes4(digest) {
return nil, errors.Errorf("invalid digest returned, wanted %x but received %x", rDigest, digest)
}
return &ethpb.SignedBeaconBlockAndBlobsSidecar{}, nil
}
func extractBlobsSidecarDataType(digest []byte, chain blockchain.ForkFetcher) (*ethpb.BlobsSidecar, error) {
if len(digest) != forkDigestLength {
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
}
vRoot := chain.GenesisValidatorsRoot()
rDigest, err := signing.ComputeForkDigest(params.BeaconConfig().DenebForkVersion, vRoot[:])
if err != nil {
return nil, err
}
if rDigest != bytesutil.ToBytes4(digest) {
return nil, errors.Errorf("invalid digest returned, wanted %x but received %x", rDigest, digest)
}
return &ethpb.BlobsSidecar{}, nil
}

View File

@@ -23,6 +23,8 @@ var ErrInvalidFetchedData = errors.New("invalid data returned from peer")
// blocks even before all blocks are ready.
type BeaconBlockProcessor func(block interfaces.SignedBeaconBlock) error
type BeaconBlockAndSidecarProcessor func(blockAndSidecar *pb.SignedBeaconBlockAndBlobsSidecar) error
// SendBeaconBlocksByRangeRequest sends BeaconBlocksByRange and returns fetched blocks, if any.
func SendBeaconBlocksByRangeRequest(
ctx context.Context, chain blockchain.ForkFetcher, p2pProvider p2p.SenderEncoder, pid peer.ID,
@@ -129,3 +131,98 @@ func SendBeaconBlocksByRootRequest(
}
return blocks, nil
}
func SendBlocksAndSidecarsByRootRequest(
ctx context.Context, chain blockchain.ChainInfoFetcher, p2pProvider p2p.P2P, pid peer.ID,
req *p2ptypes.BeaconBlockByRootsReq, processor BeaconBlockAndSidecarProcessor,
) ([]*pb.SignedBeaconBlockAndBlobsSidecar, error) {
topic, err := p2p.TopicFromMessage(p2p.BeaconBlockAndBlobsSidecarByRootName, slots.ToEpoch(chain.CurrentSlot()))
if err != nil {
return nil, err
}
stream, err := p2pProvider.Send(ctx, req, topic, pid)
if err != nil {
return nil, err
}
defer closeStream(stream, log)
blkAndSidecars := make([]*pb.SignedBeaconBlockAndBlobsSidecar, 0, len(*req))
process := func(b *pb.SignedBeaconBlockAndBlobsSidecar) error {
blkAndSidecars = append(blkAndSidecars, b)
if processor != nil {
return processor(b)
}
return nil
}
for i := 0; i < len(*req); i++ {
// Exit if peer sends more than max request blocks.
if uint64(i) >= params.BeaconNetworkConfig().MaxRequestBlocks {
break
}
isFirstChunk := i == 0
blkAndSidecar, err := ReadChunkedBlockAndBlobsSidecar(stream, chain, p2pProvider, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, err
}
if err := process(blkAndSidecar); err != nil {
return nil, err
}
}
return blkAndSidecars, nil
}
func SendBlobsSidecarsByRangeRequest(
ctx context.Context, chain blockchain.ForkFetcher, p2pProvider p2p.SenderEncoder, pid peer.ID,
req *pb.BlobsSidecarsByRangeRequest, sidecarProcessor BlobsSidecarProcessor) ([]*pb.BlobsSidecar, error) {
topic, err := p2p.TopicFromMessage(p2p.BlobsSidecarsByRangeMessageName, slots.ToEpoch(chain.CurrentSlot()))
if err != nil {
return nil, err
}
stream, err := p2pProvider.Send(ctx, req, topic, pid)
if err != nil {
return nil, err
}
defer closeStream(stream, log)
var sidecars []*pb.BlobsSidecar
process := func(sidecar *pb.BlobsSidecar) error {
sidecars = append(sidecars, sidecar)
if sidecarProcessor != nil {
return sidecarProcessor(sidecar)
}
return nil
}
var prevSlot types.Slot
for i := uint64(0); ; i++ {
isFirstChunk := len(sidecars) == 0
sidecar, err := ReadChunkedBlobsSidecar(stream, chain, p2pProvider, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, err
}
if i >= req.Count || i >= params.BeaconNetworkConfig().MaxRequestBlobsSidecars {
return nil, ErrInvalidFetchedData
}
if sidecar.BeaconBlockSlot < req.StartSlot || sidecar.BeaconBlockSlot >= req.StartSlot.Add(req.Count) {
return nil, ErrInvalidFetchedData
}
// assert slots aren't out of order and always increasing
if prevSlot >= sidecar.BeaconBlockSlot {
return nil, ErrInvalidFetchedData
}
prevSlot = sidecar.BeaconBlockSlot
if err := process(sidecar); err != nil {
return nil, err
}
}
return sidecars, nil
}

View File

@@ -109,6 +109,7 @@ type Service struct {
ctx context.Context
cancel context.CancelFunc
slotToPendingBlocks *gcache.Cache
slotToPendingBlobs *gcache.Cache
seenPendingBlocks map[[32]byte]bool
blkRootToPendingAtts map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof
subHandler *subTopicHandler
@@ -143,6 +144,7 @@ type Service struct {
// NewService initializes new regular sync service.
func NewService(ctx context.Context, opts ...Option) *Service {
c := gcache.New(pendingBlockExpTime /* exp time */, 2*pendingBlockExpTime /* prune time */)
b := gcache.New(pendingBlockExpTime /* exp time */, 2*pendingBlockExpTime /* prune time */)
ctx, cancel := context.WithCancel(ctx)
r := &Service{
ctx: ctx,
@@ -150,6 +152,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
chainStarted: abool.New(),
cfg: &config{},
slotToPendingBlocks: c,
slotToPendingBlobs: b,
seenPendingBlocks: make(map[[32]byte]bool),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
signatureChan: make(chan *signatureVerifier, verifierLimit),

View File

@@ -13,11 +13,14 @@ import (
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v3/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/container/slice"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
@@ -130,6 +133,16 @@ func (s *Service) registerSubscribers(epoch types.Epoch, digest [4]byte) {
digest,
)
}
// New Gossip Topic in Deneb
if epoch >= params.BeaconConfig().DenebForkEpoch {
s.subscribe(
p2p.BlockAndBlobsSubnetTopicFormat,
s.validateBeaconBlockAndBlobsPubSub,
s.beaconBlockAndBlobsSubscriber,
digest,
)
}
}
// subscribe to a given topic with a given validator and subscription handler.
@@ -279,6 +292,31 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
if features.Get().EnableFullSSZDataLogging {
fields["message"] = hexutil.Encode(msg.Data)
}
d, err := s.decodePubsubMessage(msg)
if err != nil {
log.WithError(err).Warn("Could not decode pubsub message")
} else {
switch d.(type) {
case *ethpb.SignedBeaconBlockAndBlobsSidecar:
bb, ok := d.(*ethpb.SignedBeaconBlockAndBlobsSidecar)
if ok {
interop.WriteBadBlobsToDisk("p2p", bb.BlobsSidecar)
sb, err := blocks.NewSignedBeaconBlock(bb.BeaconBlock)
if err != nil {
log.WithError(err).Warn("Could not decode beacon block")
} else {
interop.WriteBlockToDisk("p2p", sb, true)
}
}
case interfaces.SignedBeaconBlock:
bb, ok := d.(interfaces.SignedBeaconBlock)
if ok {
interop.WriteBlockToDisk("p2p", bb, true)
}
default:
}
}
log.WithError(err).WithFields(fields).Debugf("Gossip message was rejected")
messageFailedValidationCounter.WithLabelValues(topic).Inc()
}

View File

@@ -35,7 +35,7 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
if r != [32]byte{} {
s.setBadBlock(ctx, r) // Setting head block as bad.
} else {
interop.WriteBlockToDisk(signed, true /*failed*/)
interop.WriteBlockToDisk("receive_block", signed, true /*failed*/)
s.setBadBlock(ctx, root)
}
}

View File

@@ -0,0 +1,64 @@
package sync
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"google.golang.org/protobuf/proto"
)
func (s *Service) beaconBlockAndBlobsSubscriber(ctx context.Context, msg proto.Message) error {
m, ok := msg.(*ethpb.SignedBeaconBlockAndBlobsSidecar)
if !ok {
return fmt.Errorf("message was not type *eth.SignedBeaconBlockAndBlobsSidecar, type=%T", msg)
}
if m == nil {
return errors.New("nil blobs sidecar message")
}
signed, err := blocks.NewSignedBeaconBlock(m.BeaconBlock)
if err != nil {
return err
}
if err := blocks.BeaconBlockIsNil(signed); err != nil {
return err
}
s.setSeenBlockIndexSlot(signed.Block().Slot(), signed.Block().ProposerIndex())
block := signed.Block()
root, err := block.HashTreeRoot()
if err != nil {
return err
}
if err := s.cfg.chain.ReceiveBlock(ctx, signed, root); err != nil {
if blockchain.IsInvalidBlock(err) {
r := blockchain.InvalidBlockRoot(err)
if r != [32]byte{} {
s.setBadBlock(ctx, r) // Setting head block as bad.
} else {
interop.WriteBlockToDisk("receive_block", signed, true /*failed*/)
s.setBadBlock(ctx, root)
}
}
// Set the returned invalid ancestors as bad.
for _, root := range blockchain.InvalidAncestorRoots(err) {
s.setBadBlock(ctx, root)
}
return err
}
err = s.cfg.beaconDB.SaveBlobsSidecar(ctx, m.BlobsSidecar)
if err != nil {
return err
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More