Compare commits

..

336 Commits

Author SHA1 Message Date
terence tsao
d9affb927b Init map 2023-04-05 14:41:58 -07:00
Kasey Kirkham
b3d509a5a1 fixing several initial-sync blob bugs 2023-04-05 16:13:45 -05:00
Kasey Kirkham
a8d154f75e 🤦 2023-04-05 14:39:01 -05:00
Kasey Kirkham
8bd5a265dd unblinded blocks had serious commitment issues 2023-04-05 14:30:04 -05:00
Kasey Kirkham
8dc84411b2 bug fixes and chatty logs 2023-04-04 16:50:52 -05:00
Kasey Kirkham
0d0147b0cb deneb changes for exec engine api
Unblinding blocks without these changes resulted in mtuating the block
type, transforming deneb blocks into capella blocks.
note debug logs in beacon-chain/sync/rpc_chunked_response.go
2023-04-04 15:45:07 -05:00
Kasey Kirkham
60ab9f22c1 simplify batch block logic 2023-04-03 14:34:56 -05:00
Kasey Kirkham
eede6ec2c4 WIP debugging 2023-04-03 13:38:46 -05:00
Kasey Kirkham
bea41af396 fixes 2023-03-31 09:42:23 -05:00
Kasey Kirkham
8a0ef4a85c janky initial-sync blob downloading 2023-03-30 20:39:53 -05:00
terence tsao
7c9bb47f99 Implement send blob by range request 2023-03-30 13:39:54 -05:00
Radosław Kapka
d5dadc75e4 Block rewards API endpoint (#12020)
Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-30 13:50:33 +02:00
kasey
12d0a4ceeb Separate EE blob type (#12203)
* BlobSidecar.Blob should be a byte slice

* remove engine Blob type from db

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-03-28 20:12:44 -05:00
terencechain
18743ec30f Blob by root fixes (#12202) 2023-03-28 16:39:30 -07:00
kasey
dc8ba012e9 MarshalSSZ/UnmarshalSSZ for blob sidecar req (#12200)
* ser/des methods for BlobSidecarsByRootReq

* lint

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-03-28 14:43:36 +00:00
Kasey Kirkham
8dddf1eaeb simplifying blinded block reconstruction code 2023-03-27 20:20:14 -05:00
kasey
66727a853f separate block bather code from blocks rpc 2023-03-27 16:30:49 -05:00
terence tsao
10fadd0ac3 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-03-27 13:51:32 -07:00
terence tsao
b9ec6837ab Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-03-27 13:48:54 -07:00
kasey
0254d31810 WIP: BlobDB changes (#12193)
* hide ethpb.BlobSidecars in kv pkg only

* Add variadic indices arg to blob retrieval methods

* raul review

---------

Co-authored-by: kasey <kasey@users.noreply.github.com>
2023-03-27 19:52:00 +00:00
terencechain
25ce2ac4dc Fix eip4844 unit tests (#12190) 2023-03-27 09:20:56 -07:00
kasey
80e9042c6b BlobsByRange RPC (#12092) 2023-03-24 13:06:55 -07:00
terencechain
6b7e8ac00e Use blob getters from DB and fix tests (#12166) 2023-03-24 09:40:47 -07:00
terencechain
ca770f12ee EIP4844: gossip fixes (#12179) 2023-03-23 09:07:31 -07:00
terence tsao
74bebd9244 Set execution when there's a blob 2023-03-22 11:34:13 -07:00
terencechain
447b42044a Fix interop bugs (#12172) 2023-03-21 09:18:45 -07:00
kasey
6ec8d23d4f add missing field to proto copy helper (#12171) 2023-03-21 08:57:00 -07:00
terence tsao
a41d80a03d Add capella fork epoch 2023-03-20 18:52:25 -07:00
terencechain
849c1dd25b Add blob db methods (#12157) 2023-03-19 16:38:37 -07:00
terence tsao
d430266b70 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-03-18 07:54:49 -07:00
terence tsao
6d52516638 Update v3 to v4 2023-03-17 12:55:16 -07:00
terence tsao
2ff3a82eac Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-03-17 12:35:54 -07:00
terence tsao
d403b68d76 Clean up old code 2023-03-17 12:10:11 -07:00
terence tsao
b0768e39c3 Use copy 2023-03-16 16:53:14 -07:00
terence tsao
cae57ee9f6 Fix spec tests 2023-03-16 13:54:47 -07:00
terence tsao
3104fd217d Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-03-16 13:28:00 -07:00
terencechain
d1b9954566 Use block blobs queue for sync (#12129) 2023-03-16 13:23:56 -07:00
terencechain
033b165c08 Add KZG proof and new gossip condition (#12136) 2023-03-16 12:53:15 -07:00
terencechain
78175ee0fd Update and clean up validators for devnet5 (#12131) 2023-03-15 11:54:57 -07:00
terence tsao
a34cc7f7bf Update geth] 2023-03-15 10:51:52 -07:00
terence tsao
6f22cd7963 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-03-14 14:34:45 -07:00
terencechain
58967e4516 Rm blobs sidecar usages (#12118) 2023-03-13 11:34:20 -07:00
terence tsao
947c9fbe60 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-03-13 08:04:23 -07:00
terence tsao
8cc1e67e6c Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-03-10 08:23:09 -08:00
terencechain
2f3deac8b0 Make eip4844 green again (#12093) 2023-03-10 07:32:09 -08:00
terencechain
aa44ba40ab Add blob gossip (#12007) 2023-03-07 20:05:43 -08:00
terencechain
8b268d646c Add block and blobs cache (#12088) 2023-03-07 15:32:36 -08:00
kasey
d6ecadb471 BlobsByRoot RPC (#12011)
* boilerplate for block&block-by-root->blob-by-root

* add db interface to service and use in handler

* rm unused requestBlockAndSidecarByRoot

* add test for base case of sidecar by root

* test blob slot < min req epoch, other fixes

* cleaning up test mess

* rm unused func

* add BroadcastBlob method stubs to fix build

* handler name consistent with path

* initialize blob queue for test

* lint & gaz

* update mock to satisfy interface

* fix wrong sig for mock

* clean up min req epoch, no underflow, + tests

---------

Co-authored-by: kasey <kasey@users.noreply.github.com>
2023-03-07 09:45:01 -06:00
terence tsao
9a8bde448e Sync with develop 2023-03-06 16:16:18 -08:00
terence tsao
ce71b3b6b1 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-03-06 15:56:29 -08:00
terence tsao
5fa30bf73a Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-02-23 15:45:32 -08:00
terencechain
b93aba6126 Add validator signing decoupled blobs (#12015) 2023-02-23 13:26:45 -08:00
terence tsao
51ed80df69 Fix build 2023-02-23 09:22:38 -08:00
terencechain
a614c4ac8c Add broadcast blob method (#12016) 2023-02-23 07:52:58 -08:00
terencechain
7b777a10a5 EIP4844: update excessive data gas field and pass spec tests (#12032) 2023-02-22 10:19:32 -08:00
terence tsao
bf1ab9951f Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-02-22 06:59:31 -08:00
terencechain
5cea2b3855 Revert block gossip changes to before coupling (#12012) 2023-02-21 17:06:11 -08:00
terencechain
ab63757fe5 Add decoupled blob protobufs (#12002) 2023-02-16 11:10:55 -08:00
terencechain
736ed1e003 Fix spec tests after renamed EIP4844 to Deneb (#12005) 2023-02-16 11:10:37 -08:00
terence tsao
6ee9707fd7 Sync with develop 2023-02-16 08:49:00 -08:00
terence tsao
e40835b1a9 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-02-16 08:20:20 -08:00
terence tsao
216a420bbc Sync with master 2023-01-28 14:19:50 +01:00
terence tsao
4845abecb8 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-01-28 13:09:08 +01:00
James He
08a910e44f renaming to proto endpoint 2023-01-27 16:30:24 +01:00
terence tsao
67ba2c4fe3 Add slash 2023-01-27 15:36:10 +01:00
terence tsao
7efa501bdc Fix space 2023-01-27 15:35:57 +01:00
terence tsao
879a694ab3 Another nil check 2023-01-27 10:15:04 +01:00
terence tsao
4e3b1881ed Add check for nil block 2023-01-27 09:44:15 +01:00
Inphi
35d3707de7 sync: Fix BlobsSidecarByRange rate limiting (#11920) 2023-01-26 07:48:39 +01:00
terence tsao
1f468bd3f5 Rm field params 2023-01-25 10:49:06 +01:00
terence tsao
ac32098c86 Merge branch 'init-nil-withdrawals' into eip4844 2023-01-25 10:32:11 +01:00
terence tsao
e0af005c42 Initialize nil withdrawals at marshal / unmarshal 2023-01-25 10:29:44 +01:00
Francis Li
f08af1bdbf Fix eip4844 branch errors (#11901) 2023-01-24 11:52:05 +01:00
terence tsao
6d0420fde5 Merge branch 'eip4844' of github.com:prysmaticlabs/prysm into eip4844 2023-01-24 11:41:40 +01:00
terence tsao
5172e6e362 Rate limiter 2023-01-24 11:41:08 +01:00
Inphi
42df0f70b6 sync: Re-broadcast block and blobs sidecar (#11907) 2023-01-24 11:15:57 +01:00
terence tsao
dace0f6a2d Rename eip4844 to deneb 2023-01-19 17:30:36 -08:00
terence tsao
26a5878181 Write bad block and blob to disk 2023-01-18 14:02:09 -08:00
terence tsao
db6474a3e4 Sync with develop 2023-01-18 12:00:48 -08:00
terence tsao
b84851fd0d Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2023-01-18 08:38:13 -08:00
terence tsao
673702c100 Fix loop referencing for kzgs 2023-01-12 15:50:46 -08:00
terence tsao
520eb6baca Rm unused checks 2023-01-12 10:36:21 -08:00
terence tsao
e6047dc344 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2023-01-11 14:59:00 -08:00
terence tsao
d86a452b15 Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2023-01-11 14:52:26 -08:00
terence tsao
67f9d0b9c4 Fix log 2023-01-11 08:08:33 -08:00
terence tsao
21cd055b84 Better logs 2023-01-11 07:40:19 -08:00
Kasey Kirkham
9f3bb623ec add capella to yaml "template" 2023-01-10 16:30:32 -06:00
Kasey Kirkham
b10a95097e capella state version detection bug fix 2023-01-10 15:54:27 -06:00
terence tsao
4561f5cacb Clean ups 2023-01-09 14:33:05 -08:00
terence tsao
50b672a4db Beacon api: get blobs 2023-01-09 11:40:06 -08:00
Potuz
ffbb73a59b Merge remote-tracking branch 'origin/develop' into capella 2023-01-09 12:07:00 -03:00
Potuz
649974f14d removed duplicated case 2023-01-09 09:54:15 -03:00
Potuz
9ec0bc0734 Merge remote-tracking branch 'origin/historical-summaries' into capella 2023-01-09 08:57:24 -03:00
terence tsao
9649e49658 Passing spec tests 2023-01-07 08:50:20 -08:00
terence tsao
49fdcb7347 Add spec tests 2023-01-07 08:33:48 -08:00
terence tsao
cd6ee956ed Merge branch 'historical-summaries' into eip4844 2023-01-07 08:03:15 -08:00
terence tsao
ef95fd33f8 Uncomment withdrawal stubs 2023-01-07 07:52:31 -08:00
terence tsao
1a488241b0 Sync with capella 2023-01-07 07:48:57 -08:00
terence tsao
5fdd3a3d66 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2023-01-07 07:19:55 -08:00
terence tsao
b6a32c050f Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2023-01-07 07:03:58 -08:00
terence tsao
055e225093 Passing spec tests 2023-01-06 15:18:38 -08:00
terence tsao
144218cb1b Process historical roots test 2023-01-06 11:56:24 -08:00
Potuz
13b575a609 Merge remote-tracking branch 'origin/develop' into capella 2023-01-06 10:59:43 -03:00
terence tsao
b5a414eae9 Rm bad imports 2023-01-04 08:10:27 -08:00
terence tsao
b94b347ace Sync with develop 2023-01-04 07:37:25 -08:00
terence tsao
f5ee225819 Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2023-01-04 07:16:00 -08:00
terence tsao
9cb48be14f Add historical summaries to state and processing 2023-01-03 15:58:39 -08:00
terence tsao
85fa9951eb Fix span name 2022-12-24 23:40:30 +08:00
terence tsao
ec72575fc9 Close stream for by root rpc handler 2022-12-24 23:34:02 +08:00
terence tsao
d9d1bb6d3d Skip on empty 2022-12-24 11:37:58 +08:00
Potuz
ffcdc26618 Merge remote-tracking branch 'origin/develop' into capella 2022-12-23 13:03:11 -03:00
Potuz
96981a07b9 check signatures of BLS changes before capella 2022-12-22 16:10:00 -03:00
Potuz
6b2721b239 Check BLS_TO_EXECUTION_CHANGE as if they were from Capella
In the event we receive a BLS_TO_EXECUTION_CHANGE and our head state is
before Capella, verify it's signature with the Capella fork version.
2022-12-22 11:34:57 -03:00
terence tsao
c79151a574 Fix rate limiter 2022-12-22 22:10:34 +08:00
Inphi
4b20234801 Fix missing context in post-altair /v1 messages (#11807) 2022-12-22 06:46:15 +08:00
terence tsao
911048aa6d Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2022-12-21 13:01:40 +08:00
James He
255e9693ee fixing typo on comments 2022-12-20 22:42:31 -06:00
terence tsao
61c1216e3d Better handling for validate sync msg time 2022-12-21 10:08:44 +08:00
terence tsao
17e1eaf0f3 Set stream write deadline 2022-12-21 09:38:00 +08:00
terence tsao
9940943595 Clean up sync 2022-12-21 09:30:11 +08:00
terence tsao
9a0f941870 Change avgSidecarBlobsTransferBytes 2022-12-21 08:32:47 +08:00
terence tsao
5d0f54d332 Blobs rate limiter 2022-12-21 08:17:54 +08:00
rkapka
d602c94b7b fix 2022-12-20 20:28:53 +01:00
rkapka
6a5ecbd68f Implement getPoolBLSToExecutionChanges API endpoint
(cherry picked from commit cd25d922bc)

# Conflicts:
#	beacon-chain/node/node.go
#	beacon-chain/rpc/eth/beacon/pool.go
#	proto/eth/service/beacon_chain_service.pb.go
#	proto/migration/v1alpha1_to_v2.go
2022-12-20 17:31:21 +01:00
Potuz
29dfcab505 Fix BlockValue marshalling 2022-12-19 11:14:35 -03:00
Potuz
16e5c903cc Merge remote-tracking branch 'origin/develop' into capella 2022-12-19 10:28:05 -03:00
terencechain
66682cb4e5 Clean up block initizliation and remove set block (#11791) 2022-12-19 15:41:28 +08:00
terencechain
52faea8b7d Support 4844 container type queries in the beacon API + update spec tests (#11794) 2022-12-19 15:38:23 +08:00
terence tsao
8a78315682 Save blobs during by range sync 2022-12-18 07:44:30 +08:00
Potuz
cab42a4ae3 Take raw arrays for BLS changes 2022-12-16 18:02:35 -03:00
Potuz
a5bdd42bdd Merge remote-tracking branch 'origin/block-value' into capella 2022-12-16 12:47:29 -03:00
Potuz
a26197f919 take lists for bls changes endpoint 2022-12-16 12:47:18 -03:00
terence tsao
8b9cab457e Got block and blobs gossip working 2022-12-16 13:41:59 +08:00
terence tsao
080ce31395 Add block value to get payload v2 2022-12-15 17:17:58 +08:00
terence tsao
7866e8a196 Got blob syncing to work 2022-12-15 17:02:38 +08:00
Potuz
d5d17e00b3 Merge branch 'develop' into capella 2022-12-14 13:04:23 -03:00
rkapka
9c6a1331cf remove redeclared struct 2022-12-14 16:29:14 +01:00
terence tsao
d89c97634c Merge branch 'eip4844' of github.com:prysmaticlabs/prysm into eip4844 2022-12-14 10:26:47 +08:00
terence tsao
7e95ca3705 Add blobs to initial syncing path 2022-12-14 10:26:39 +08:00
Inphi
abd46b01b7 Fix non-empty kzg commitment in proposal (#11766) 2022-12-14 08:06:14 +08:00
Potuz
8629ac8417 only broadcast bls changes post-capella 2022-12-13 09:53:05 -03:00
terence tsao
304925aabf Add todos for 4844 sync 2022-12-13 16:18:09 +08:00
terence tsao
16d93e47a5 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-12-13 15:12:46 +08:00
rkapka
6dcb2bbf0d Use signed changes in middleware block
(cherry picked from commit e3c9e7bb5c)
2022-12-12 16:21:39 +01:00
Potuz
deb138959a fix validator client 2022-12-12 11:43:48 -03:00
Potuz
45e6f3bd00 fix build 2022-12-12 11:29:57 -03:00
Potuz
55a9e0d51a Merge branch 'develop' into capella 2022-12-12 11:15:39 -03:00
terence tsao
3ddae600fb Merge branch 'capella' of github.com:prysmaticlabs/prysm into capella 2022-12-09 19:35:39 -08:00
terence tsao
861ede8945 Fix subscriptions 2022-12-09 15:29:23 -08:00
terence tsao
93f11f9047 Change target / max blobs to 2 / 4 2022-12-09 10:40:20 -08:00
rkapka
56503110dd Merge branch 'recontruct-capella-blinded' into capella
# Conflicts:
#	testing/spectest/shared/common/forkchoice/service.go
2022-12-09 12:45:23 +01:00
rkapka
f67d35dffd single execution block type 2022-12-09 12:43:35 +01:00
terence tsao
efbca1b5b7 Add v3 engine apis 2022-12-08 16:45:05 -08:00
terence tsao
2de0ebaf8d Merge branch 'roberto-fix-auth' into eip4844 2022-12-08 13:52:11 -08:00
Roberto Bayardo
0815ef94a3 Merge branch 'develop' into roberto-fix-auth 2022-12-08 13:23:29 -08:00
Roberto Bayardo
092ffa99e5 update & fix code around setting auth header for latest geth 2022-12-08 13:14:09 -08:00
rkapka
b05b67b264 reorder checks 2022-12-08 19:48:02 +01:00
rkapka
a5c6518c20 deepsource 2022-12-08 19:48:02 +01:00
Radosław Kapka
da048395ce Merge branch 'develop' into recontruct-capella-blinded 2022-12-08 18:41:12 +01:00
rkapka
f31f7be310 fix engine mock 2022-12-08 18:39:59 +01:00
rkapka
e1a2267f86 Merge remote-tracking branch 'origin/capella' into capella 2022-12-08 18:36:44 +01:00
rkapka
3c9e4ee7f7 Merge branch 'recontruct-capella-blinded' into capella
# Conflicts:
#	beacon-chain/blockchain/pow_block.go
#	beacon-chain/execution/engine_client.go
#	beacon-chain/execution/engine_client_test.go
#	beacon-chain/execution/testing/mock_engine_client.go
#	beacon-chain/rpc/eth/beacon/blocks.go
#	beacon-chain/state/state-native/getters_withdrawal.go
#	consensus-types/blocks/factory.go
#	proto/engine/v1/json_marshal_unmarshal.go
#	proto/engine/v1/json_marshal_unmarshal_test.go
2022-12-08 18:31:25 +01:00
rkapka
9ba32c9acd single ExecutionBlockByHash function 2022-12-08 17:53:02 +01:00
rkapka
d23008452e fix failing tests 2022-12-08 17:29:51 +01:00
terencechain
f397cba1e0 Better proposal RPC (#11721) 2022-12-08 07:40:48 -08:00
terence tsao
3eecbb5b1a Fix enum for cli 2022-12-07 12:12:56 -08:00
rkapka
1583e93b48 bzl 2022-12-07 18:23:43 +01:00
rkapka
849457df81 deepsource
(cherry picked from commit 903cab75ee)

# Conflicts:
#	beacon-chain/execution/testing/mock_engine_client.go
2022-12-07 16:29:34 +01:00
rkapka
903cab75ee deepsource 2022-12-07 16:27:09 +01:00
rkapka
ee108d4aff add doc to interface
(cherry picked from commit a08baf4a14)
2022-12-07 16:22:14 +01:00
rkapka
49bcc58762 rename methods
(cherry picked from commit 8c56dfdd46)
2022-12-07 16:22:09 +01:00
rkapka
a08baf4a14 add doc to interface 2022-12-07 16:20:43 +01:00
rkapka
8c56dfdd46 rename methods 2022-12-07 16:20:31 +01:00
rkapka
dcdd9af9db remove unneeded test 2022-12-07 16:05:44 +01:00
rkapka
a464cf5c60 Merge branch 'reconstruct-capella-block' into capella
(cherry picked from commit b0601580ef)

# Conflicts:
#	beacon-chain/rpc/eth/beacon/blocks.go
#	proto/engine/v1/json_marshal_unmarshal.go
2022-12-07 15:21:58 +01:00
terence tsao
cc55c754dc Fix cli flag 2022-12-06 16:57:50 -08:00
terence tsao
2d483ab09f Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2022-12-06 16:48:44 -08:00
terence tsao
d64e10a337 Interop 2022-12-06 16:06:18 -08:00
terence tsao
1e9ee10674 Merge branch 'better-validator-rpcs' of github.com:prysmaticlabs/prysm into eip4844 2022-12-06 15:14:20 -08:00
terence tsao
3ac395b39e Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-12-06 14:42:49 -08:00
Justin Traglia
6e26a6f128 Replace LastWithdrawalValidatorIndex to updated name (#11725) 2022-12-06 14:40:10 -08:00
Justin Traglia
b512b92a8a Update withdrawal error message to reflect new field name (#11724) 2022-12-06 14:39:17 -08:00
terence tsao
5ff601a1b9 Sync with latest go-ethereum changes 2022-12-06 14:38:14 -08:00
terence tsao
5823054519 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-12-06 14:08:00 -08:00
terence tsao
3d196662bc Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-12-06 14:06:30 -08:00
rkapka
b0601580ef Merge branch 'reconstruct-capella-block' into capella 2022-12-06 22:16:59 +01:00
rkapka
c1f29ea651 remove logs 2022-12-06 22:11:35 +01:00
rkapka
881d1d435a logs 2022-12-06 21:46:41 +01:00
rkapka
d1aae0c941 Merge branch 'capella' into reconstruct-capella-block 2022-12-06 21:26:55 +01:00
terence tsao
468cc23876 Fix interop 2022-12-04 08:40:41 -08:00
terence tsao
d9646a9183 Add builder paths 2022-12-03 07:29:46 -08:00
terence tsao
279cee42f1 Refactor block proposal path 2022-12-02 16:13:01 -08:00
terence tsao
57bdb907cc Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2022-12-02 11:10:39 -08:00
rkapka
15d683c78f Merge branch 'capella' into reconstruct-capella-block 2022-12-02 16:43:56 +01:00
rkapka
bf6c8ced7d working 2022-12-02 16:37:24 +01:00
Potuz
78fb685027 Check BLS changes when requesting from the pool 2022-12-02 10:14:39 -03:00
terence tsao
a87536eba0 Fix minimal spec test 2022-12-01 15:26:18 -08:00
terence tsao
3f05395a00 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-12-01 14:52:49 -08:00
terence tsao
85fc57d41e Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-12-01 14:45:31 -08:00
terence tsao
1e5976d5ce Mainnet spec tests passing 2022-12-01 14:41:34 -08:00
Potuz
98c0b23350 broadcast BLS changes 2022-12-01 11:26:56 -03:00
terence tsao
039a0fffba Merge branch 'develop' of github.com:prysmaticlabs/prysm into capella 2022-11-30 12:09:47 -08:00
Potuz
90ec640e7a Fix capella operations spectests 2022-11-30 15:35:55 -03:00
Potuz
10acd31d25 check on verify instead of sig 2022-11-30 15:20:23 -03:00
terence tsao
4224014fad Add 4844 block and state 2022-11-30 10:14:35 -08:00
Potuz
df1e8b33d8 BLS Change signature verification 2022-11-30 14:52:31 -03:00
rkapka
cdb4ee42cc not working 2022-11-30 18:22:52 +01:00
rkapka
d29baec77e proper error handling in BuildSignedBeaconBlockFromExecutionPayload 2022-11-30 15:32:57 +01:00
terence tsao
53c189da9b Merge branch 'update-eip4844-objs' into eip4844 2022-11-29 21:16:31 -08:00
terence tsao
277fbce61b Update eip4844 objects 2022-11-29 21:15:14 -08:00
terence tsao
0adc54b7ff Refactor get payload 2022-11-29 12:02:47 -08:00
Potuz
1cbd7e9888 withdraw by default 2022-11-29 13:52:34 -03:00
rkapka
0a9e1658dd move stuff to blinded_blocks.go 2022-11-29 17:36:52 +01:00
rkapka
31d4a4cd11 test other functions 2022-11-29 17:36:52 +01:00
rkapka
fbc4e73d31 refactor and test GetSSZBlockV2 2022-11-29 17:36:52 +01:00
rkapka
c1d4eaa79d refactor and test GetBlockV2 2022-11-29 17:36:52 +01:00
rkapka
760af6428e update ssz 2022-11-29 17:36:52 +01:00
terence tsao
dfa0ccf626 Fix attribute pb nil checks 2022-11-29 07:27:33 -08:00
terence tsao
7a142cf324 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-11-29 07:09:41 -08:00
rkapka
1a51fdbd58 update withdrawals proto 2022-11-29 14:23:11 +01:00
terencechain
368a99ec8d Fix nil attribute for capella branch (#11701) 2022-11-28 17:34:26 -08:00
terence tsao
1c7e734918 Fix some blockchain tests 2022-11-28 13:59:17 -08:00
rkapka
764d1325bf Merge remote-tracking branch 'origin/capella' into capella 2022-11-28 21:07:00 +01:00
rkapka
0cf30e9022 Merge branch '__develop' into capella 2022-11-28 21:06:20 +01:00
Potuz
227b20f368 fix nil block from stream 2022-11-28 16:53:11 -03:00
rkapka
d7d70bc25b support SSZ lol
(cherry picked from commit 52bc2c8d617ac3e1254c493fa053cdce4a1ebd63)
2022-11-28 20:19:24 +01:00
rkapka
82f6ddb693 add Capella version
(cherry picked from commit 5d6fd0bbe663e5dd16df5b2e773f68982bbcd24e)
2022-11-28 20:19:19 +01:00
rkapka
9e4e82d2c5 refactor GetBlindedBlockSSZ
(cherry picked from commit 97483c339f99b0d96bd81846a979383ffd2b0cda)

# Conflicts:
#	beacon-chain/rpc/eth/beacon/blocks.go
2022-11-28 20:19:15 +01:00
rkapka
9838369fe9 fix proto generation 2022-11-28 20:15:05 +01:00
rkapka
6085ad1bfa fix issues 2022-11-28 20:07:37 +01:00
rkapka
d3851b27df Merge branch '__develop' into capella
# Conflicts:
#	beacon-chain/rpc/apimiddleware/structs.go
#	beacon-chain/rpc/eth/beacon/blocks.go
#	proto/eth/v2/BUILD.bazel
#	proto/eth/v2/beacon_block.pb.go
#	proto/eth/v2/beacon_block.proto
#	proto/eth/v2/generated.ssz.go
#	proto/migration/v1alpha1_to_v2.go
#	proto/prysm/v1alpha1/beacon_chain.pb.go
#	proto/prysm/v1alpha1/beacon_chain.proto
2022-11-28 19:31:33 +01:00
Potuz
d6100dfdcb fix spectest 2022-11-28 12:38:14 -03:00
Potuz
c2144dac86 Add BLSToExecutionChangge endpoint 2022-11-27 23:00:16 -03:00
Potuz
a47ff569a8 Add Submit BLSChange endpoint 2022-11-27 20:50:21 -03:00
Potuz
f8be022ef2 Merge branch 'develop' into capella 2022-11-27 20:40:41 -03:00
Potuz
4f39e6b685 Implement REST block API endpoints 2022-11-27 16:20:30 -03:00
Potuz
c67b000633 add test 2022-11-27 13:07:50 -03:00
Potuz
02f7443586 Refactor Sync Committee Rewards 2022-11-27 09:04:03 -03:00
terence tsao
6275e7df4e Clean up execution engine 2022-11-25 17:20:28 -08:00
terencechain
1b6b52fda1 Add PayloadAttribute superset and use it for engine-api (#11691) 2022-11-25 17:12:55 -08:00
Potuz
5fa1fd84b9 Hook the BLSTOExecution Pool to the proposer 2022-11-25 09:33:17 -03:00
nisdas
bd0c9f9e8d fix 2022-11-25 09:06:59 -03:00
Potuz
2532bb370c Merge branch 'develop' into capella 2022-11-25 08:16:32 -03:00
nisdas
12efc6c2c1 make it reject 2022-11-24 22:21:12 +08:00
nisdas
a6cc9ac9c5 add sig validation 2022-11-24 22:19:55 +08:00
nisdas
031f5845a2 add gossip handler for bls change object 2022-11-24 21:22:18 +08:00
nisdas
b88559726c Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into capella 2022-11-24 20:05:41 +08:00
terence tsao
ca6ddf4490 Add and use send blocks and sidecars requests 2022-11-23 16:11:06 -08:00
terence tsao
3ebb2fce94 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-11-23 14:21:45 -08:00
nisdas
62f6b07cba fix gossip registration 2022-11-23 20:10:44 +08:00
terence tsao
f956f1ed6e Handle capella version for packing atts 2022-11-22 17:21:21 -08:00
terence tsao
1c0fa95053 Fix forkchoice test 2022-11-22 17:09:34 -08:00
terence tsao
04bf4a1060 Clean up 2022-11-22 15:38:24 -08:00
terence tsao
ae276fd371 Add mainnet spec tests 2022-11-22 10:53:47 -08:00
terence tsao
104bdaed12 Merge branch 'capella' of github.com:prysmaticlabs/prysm into eip4844 2022-11-22 09:43:43 -08:00
terence tsao
089a5d6ac2 Migrate from geth's kzg lib to go-kzg/eth (Thanks @roberto-bayardo) 2022-11-22 08:51:45 -08:00
Potuz
16b0820193 Merge branch 'develop' into capella 2022-11-22 13:43:03 -03:00
Potuz
4b02267e96 add more minimal fixes 2022-11-22 13:34:33 -03:00
Potuz
746584c453 fix missing minimal test 2022-11-22 13:34:33 -03:00
Potuz
b56daaaca2 Fix empty withdrawals slice 2022-11-22 10:38:42 -03:00
terence tsao
b7a6fe88ee Update block and sidecar gossip conditions 2022-11-21 14:38:44 -08:00
terence tsao
22d1c37b92 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-20 19:22:30 -08:00
terence tsao
78a393f825 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-18 13:48:40 -08:00
terence tsao
ac8290c1bf Port over shared kzg functions and updated trusted setup 2022-11-18 10:55:35 -08:00
terence tsao
5d0662b415 Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-18 10:00:32 -08:00
terence tsao
931e5e10c3 Fix mainnet fork transition tests 2022-11-16 08:49:06 -05:00
Potuz
c172f838b1 Mark capella fields as dirty 2022-11-15 09:58:34 -05:00
Potuz
c07ae29cd9 move MaxWithdrawalsPerPayload to fieldparams 2022-11-14 22:59:31 -05:00
Potuz
214c9bfd8b fix bls_to_execution_changes 2022-11-14 16:41:17 -05:00
Potuz
716140d64d add bls_to_execution_change tests 2022-11-14 16:26:39 -05:00
Potuz
088cb4ef59 fix expected_withdrawals 2022-11-14 14:50:41 -05:00
terence tsao
fa33e93a8e Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-14 08:53:15 -05:00
Potuz
d1472fc351 Add withdrawals operations tests 2022-11-13 08:54:59 -03:00
terence tsao
5c8c0c31d8 Merge branch 'capella-withdrawal-minimal' into capella 2022-11-12 22:06:43 -08:00
terence tsao
7f3c00c7a2 Can build 2022-11-12 22:06:22 -08:00
terencechain
c180dab791 Merge branch 'develop' into capella 2022-11-12 18:28:18 -08:00
terence tsao
f24acc21c7 Fix bazel 2022-11-12 17:43:19 -08:00
terence tsao
40b637849d Fix miminal capella tests 2022-11-12 17:26:05 -08:00
terence tsao
e7db1685df Add mainnet capella tests 2022-11-12 17:13:26 -08:00
terence tsao
eccbfd1011 Add shared capella spec tests helpers 2022-11-12 17:13:16 -08:00
terence tsao
90211f6769 Fix prev epoch attested precompute 2022-11-12 17:12:29 -08:00
terence tsao
edc32ac18e Fix slashing quotient 2022-11-12 17:12:04 -08:00
terence tsao
fe68e020e3 Add selector with minimal withdrawal size 2022-11-12 16:18:16 -08:00
terence tsao
81e1e3544d Add mainnet ssz vectors 2022-11-12 15:50:07 -08:00
Potuz
09372d5c35 Revert "added mainnet ssz tests"
This reverts commit 078a89e4ca.
2022-11-12 18:40:28 -03:00
Potuz
078a89e4ca added mainnet ssz tests 2022-11-12 18:39:11 -03:00
Potuz
dbc6ae26a6 Add minimal support for capella spec tests
Fixed many issues about hashing
Added fork typing in the state replayer
2022-11-12 18:18:35 -03:00
Potuz
b6f429867a Merge branch 'develop' into capella 2022-11-12 16:35:20 -03:00
Potuz
09f50660ce Merge branch 'develop' into capella 2022-11-12 11:51:06 -03:00
Potuz
189825b495 fix withdrawal hashing 2022-11-11 23:21:41 -03:00
terence tsao
441cad58d4 Merge commit 'e03de47db7b782bdf7dc8d9b42749eb2a236cdea' into eip4844 2022-11-11 09:18:57 -08:00
terence tsao
1277d08f9e Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-11 08:46:39 -08:00
terence tsao
e03de47db7 Ops, wrong branch 2022-11-11 08:45:22 -08:00
Potuz
764b7ff610 Don't build capella payload twice 2022-11-11 10:47:29 -03:00
terence tsao
307be7694e Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-09 16:44:34 -08:00
terence tsao
c76ae1ef39 Add beacon_block_and_blobs_sidecar gossip 2022-11-09 16:43:26 -08:00
Potuz
d499db7f0e Merge branch 'develop' into capella 2022-11-09 21:27:18 -03:00
terence tsao
a894b9f29a Merge branch 'develop' of github.com:prysmaticlabs/prysm into eip4844 2022-11-09 14:49:18 -08:00
terence tsao
902e6b3905 Update to use latest kzg library 2022-11-09 10:17:27 -08:00
Potuz
ed2d1c7bf9 Merge branch 'develop' into capella 2022-11-09 09:14:55 -03:00
nisdas
14b73cbd47 register flag 2022-11-09 08:48:36 +08:00
terence tsao
a39c7aa864 Add rpc blobs sidecars by range 2022-11-07 14:53:12 -08:00
terence tsao
170bc9c8ec Network changes 2022-11-07 14:43:46 -08:00
terence tsao
365c01fc29 Add tests 2022-11-07 06:58:24 -08:00
Potuz
3124785a08 Merge branch 'develop' into capella 2022-11-07 10:08:09 -03:00
Potuz
60e6306107 working withrawals initial commits 2022-11-06 16:53:57 -03:00
terence tsao
42ccb7830a Add Capella DB changes 2022-11-06 15:25:34 -03:00
Potuz
0bb03b9292 fix marshalling and engine calls 2022-11-06 15:24:22 -03:00
nisdas
ed6fbf1480 stupid bug 2022-11-07 00:18:01 +08:00
nisdas
477cec6021 wei it 2022-11-07 00:13:12 +08:00
nisdas
924500d111 add unmarshal 2022-11-06 23:57:37 +08:00
Potuz
0677504ef1 Revert "proposer changes"
This reverts commit ca2a7c4d9c.
2022-11-06 12:16:53 -03:00
Potuz
ca2a7c4d9c proposer changes 2022-11-06 12:14:17 -03:00
Potuz
28606629ad marshalling stub 2022-11-06 12:03:25 -03:00
Potuz
c817279464 fix capella payload 2022-11-06 11:14:39 -03:00
Potuz
009d6ed8ed proposer logic 2022-11-06 10:49:32 -03:00
Potuz
5cec1282a9 FCU two versions 2022-11-06 10:22:45 -03:00
Potuz
340170fd29 propose block V2 2022-11-06 09:42:20 -03:00
Potuz
7ed0cc139a marshalling first attempt 2022-11-06 07:47:43 -03:00
Potuz
2c822213eb rpc changes 2022-11-06 00:24:56 -03:00
terence tsao
0894b9591c Add Capella DB changes 2022-11-05 13:27:18 -07:00
terence tsao
f0ca45f9a2 Json marshal and unmarshal blob bundle 2022-11-05 12:45:14 -07:00
terence tsao
afc48c6485 State change and config changes for Capella 2022-11-05 12:45:01 -07:00
terence tsao
93dce8a0cb P2p changes for Capella fork 2022-11-05 12:44:46 -07:00
terence tsao
149ccdaf39 Add engine call for get payload 2022-11-05 11:00:13 -07:00
Potuz
c08bb39ffe add fork versions 2022-11-05 13:38:11 -03:00
Potuz
5083d8ab34 propose capella blocks 2022-11-05 12:38:27 -03:00
Potuz
7552a5dd07 capella fork logic 2022-11-05 07:33:20 -03:00
Potuz
c93d68f853 Capella state transition 2022-11-05 06:06:15 -03:00
terence tsao
2b74db2dce Add blob database methods 2022-11-04 13:46:01 -07:00
terence tsao
cc6c91415d Add validate blobs sidecar 2022-11-04 13:06:15 -07:00
terence tsao
6d7d7e0adc Add excessive blobs to execution payload 2022-11-04 09:48:39 -07:00
terence tsao
2105d777f0 Add blobs kzg to Capella beacon block 2022-11-04 09:33:01 -07:00
terence tsao
14338afbdb Update go.mod 2022-11-04 08:52:16 -07:00
Potuz
3e8aa4023d Fix config test and export method 2022-11-04 11:59:49 -03:00
Potuz
b443875e66 Implement get_expected_withdrawals 2022-11-04 11:45:45 -03:00
837 changed files with 35751 additions and 25609 deletions

View File

@@ -43,12 +43,4 @@ build --flaky_test_attempts=5
# Better caching
build:nostamp --nostamp
# Build metadata
build --build_metadata=ROLE=CI
build --build_metadata=REPO_URL=https://github.com/prysmaticlabs/prysm.git
build --workspace_status_command=./hack/workspace_status_ci.sh
# Buildbuddy
build --bes_results_url=https://app.buildbuddy.io/invocation/
build --bes_backend=grpcs://remote.buildbuddy.io
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh

View File

@@ -26,14 +26,14 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.20
- name: Set up Go 1.19
uses: actions/setup-go@v3
with:
go-version: '1.20'
go-version: 1.19
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
lint:
@@ -43,16 +43,16 @@ jobs:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.20
- name: Set up Go 1.19
uses: actions/setup-go@v3
with:
go-version: '1.20'
go-version: 1.19
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.52.2
version: v1.50.1
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
@@ -62,7 +62,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: '1.20'
go-version: 1.19
id: go
- name: Check out code into the Go module directory

View File

@@ -17,15 +17,11 @@ linters:
- errcheck
- gosimple
- gocognit
- dupword
- nilerr
- whitespace
- misspell
linters-settings:
gocognit:
# TODO: We should target for < 50
min-complexity: 65
min-complexity: 69
output:
print-issued-lines: true

View File

@@ -1,6 +1,6 @@
# Contribution Guidelines
Note: The latest and most up-to-date documentation can be found on our [docs portal](https://docs.prylabs.network/docs/contribute/contribution-guidelines).
Note: The latest and most up to date documenation can be found on our [docs portal](https://docs.prylabs.network/docs/contribute/contribution-guidelines).
Excited by our work and want to get involved in building out our sharding releases? Or maybe you haven't learned as much about the Ethereum protocol but are a savvy developer?
@@ -10,9 +10,9 @@ You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues)
**1. Set up Prysm following the instructions in README.md.**
**2. Fork the Prysm repo.**
**2. Fork the prysm repo.**
Sign in to your GitHub account or create a new account if you do not have one already. Then navigate your browser to https://github.com/prysmaticlabs/prysm/. In the upper right hand corner of the page, click “fork”. This will create a copy of the Prysm repo in your account.
Sign in to your Github account or create a new account if you do not have one already. Then navigate your browser to https://github.com/prysmaticlabs/prysm/. In the upper right hand corner of the page, click “fork”. This will create a copy of the Prysm repo in your account.
**3. Create a local clone of Prysm.**
@@ -23,7 +23,7 @@ $ git clone https://github.com/prysmaticlabs/prysm.git
$ cd $GOPATH/src/github.com/prysmaticlabs/prysm
```
**4. Link your local clone to the fork on your GitHub repo.**
**4. Link your local clone to the fork on your Github repo.**
```
$ git remote add myprysmrepo https://github.com/<your_github_user_name>/prysm.git
@@ -68,7 +68,7 @@ $ go test <file_you_are_working_on>
$ git add --all
```
This command stages all the files that you have changed. You can add individual files by specifying the file name or names and eliminating the “-- all”.
This command stages all of the files that you have changed. You can add individual files by specifying the file name or names and eliminating the “-- all”.
**11. Commit the file or files.**
@@ -96,7 +96,8 @@ If there are conflicts between your edits and those made by others since you sta
$ git status
```
Open those files one at a time, and you will see lines inserted by Git that identify the conflicts:
Open those files one at a time and you
will see lines inserted by Git that identify the conflicts:
```
<<<<<< HEAD
@@ -118,7 +119,7 @@ $ git push myrepo feature-in-progress-branch
**15. Check to be sure your fork of the Prysm repo contains your feature branch with the latest edits.**
Navigate to your fork of the repo on GitHub. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
Navigate to your fork of the repo on Github. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
**16. Create a pull request.**
@@ -150,7 +151,7 @@ pick hash fix a bug
pick hash add a feature
```
Replace the word pick with the word “squash” for every line but the first, so you end with ….
Replace the word pick with the word “squash” for every line but the first so you end with ….
```
pick hash do some work
@@ -177,7 +178,7 @@ We consider two types of contributions to our repo and categorize them as follow
Anyone can become a part-time contributor and help out on implementing Ethereum consensus. The responsibilities of a part-time contributor include:
- Engaging in Gitter conversations, asking the questions on how to begin contributing to the project
- Opening up GitHub issues to express interest in code to implement
- Opening up github issues to express interest in code to implement
- Opening up PRs referencing any open issue in the repo. PRs should include:
- Detailed context of what would be required for merge
- Tests that are consistent with how other tests are written in our implementation
@@ -187,12 +188,12 @@ Anyone can become a part-time contributor and help out on implementing Ethereum
### Core Contributors
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all the responsibilities of part-time contributors plus the majority of the following:
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all of the responsibilities of part-time contributors plus the majority of the following:
- Stay up to date on the latest beacon chain specification
- Monitor GitHub issues and PRs to make sure owner, labels, descriptions are correct
- Monitor github issues and PRs to make sure owner, labels, descriptions are correct
- Formulate independent ideas, suggest new work to do, point out improvements to existing approaches
- Participate in code review, ensure code quality is excellent, and ensure high code coverage
- Participate in code review, ensure code quality is excellent, and have ensure high code coverage
- Help with social media presence, write bi-weekly development update
- Represent Prysmatic Labs at events to help spread the word on scalability research and solutions

View File

@@ -4,14 +4,14 @@
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
### Getting Started
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/prysmaticlabs).
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
### Staking on Mainnet

View File

@@ -86,10 +86,10 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
sha256 = "dd926a88a564a9246713a9c00b35315f54cbd46b31a26d5d8fb264c07045f05d",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip",
],
)
@@ -164,7 +164,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.20.3",
go_version = "1.19.7",
nogo = "@//:nogo",
)
@@ -205,7 +205,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0-alpha.1"
consensus_spec_version = "v1.3.0-rc.5"
bls_test_version = "v0.1.1"
@@ -221,7 +221,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "1118a663be4a00ba00f0635eb20287157f2b2f993aed64335bfbcd04af424c2b",
sha256 = "266006512e71e62396e8f31be01639560c9d59a93c38220fd8f51fabefc8f5f3",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -237,7 +237,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "acde6e10940d14f22277eda5b55b65a24623ac88e4c7a2e34134a6069f5eea82",
sha256 = "2ebf483830165909cb7961562fd369dedf079997a4832cc215a543898a73aa46",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -253,7 +253,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "49c022f3a3478cea849ba8f877a9f7e4c1ded549edddc09993550bbc5bb192e1",
sha256 = "333718ba5c907e0a99580caa8d28dd710543b3b271e4251581006d0e101fbce9",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -268,7 +268,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "c3e246ff01f6b7b9e9e41939954a6ff89dfca7297415f88781809165fa83267c",
sha256 = "78b6925b5a4208e32385fa4387d2c27b381a8ddd18d66d5a7787e7846b86bfc8",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -1,20 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"client.go",
"errors.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client",
visibility = ["//visibility:public"],
deps = ["@com_github_pkg_errors//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["client_test.go"],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -6,11 +6,11 @@ go_library(
"checkpoint.go",
"client.go",
"doc.go",
"errors.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -39,7 +39,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -6,12 +6,10 @@ import (
"path"
"github.com/pkg/errors"
base "github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/io/file"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
@@ -20,8 +18,6 @@ import (
"golang.org/x/mod/semver"
)
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
@@ -78,40 +74,37 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
}
if s.Slot() != s.LatestBlockHeader().Slot {
return nil, fmt.Errorf("finalized state slot does not match latest block header slot %d != %d", s.Slot(), s.LatestBlockHeader().Slot)
}
slot := s.LatestBlockHeader().Slot
bb, err := client.GetBlock(ctx, IdFromSlot(slot))
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
header := s.LatestBlockHeader()
header.StateRoot = sr[:]
br, err := header.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error while computing block root using state data")
}
bb, err := client.GetBlock(ctx, IdFromRoot(br))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by root = %#x", br)
}
b, err := vu.UnmarshalBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
br, err := b.Block().HashTreeRoot()
realBlockRoot, err := b.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
}
bodyRoot, err := b.Block().Body().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
}
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
if sbr != bodyRoot {
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
}
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
log.
WithField("block_slot", b.Block().Slot()).
WithField("state_slot", s.Slot()).
WithField("state_root", sr).
WithField("block_root", br).
Info("Downloaded checkpoint sync state and block.")
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
log.Printf("BeaconState htr=%#x, Block state_root=%#x", sr, b.Block().StateRoot())
log.Printf("BeaconState latest_block_header htr=%#x, block htr=%#x", br, realBlockRoot)
return &OriginData{
st: s,
b: b,
@@ -133,7 +126,7 @@ type WeakSubjectivityData struct {
}
// CheckpointString returns the standard string representation of a Checkpoint.
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
func (wsd *WeakSubjectivityData) CheckpointString() string {
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
@@ -147,7 +140,7 @@ func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*We
ws, err := client.GetWeakSubjectivity(ctx)
if err != nil {
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
if !errors.Is(err, base.ErrNotOK) {
if !errors.Is(err, ErrNotOK) {
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
}
// fall back to vanilla Beacon Node API method

View File

@@ -7,9 +7,9 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
@@ -66,7 +66,11 @@ func TestMarshalToEnvelope(t *testing.T) {
}
func TestFallbackVersionCheck(t *testing.T) {
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
c := &Client{
hc: &http.Client{},
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
@@ -84,13 +88,12 @@ func TestFallbackVersionCheck(t *testing.T) {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
ctx := context.Background()
_, err = ComputeWeakSubjectivityCheckpoint(ctx, c)
_, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
}
@@ -167,41 +170,44 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
Epoch: epoch,
}
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
@@ -260,39 +266,42 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
@@ -306,16 +315,21 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
if req.URL.Path == renderGetStatePath(IdHead) {
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
require.NoError(t, err)
require.Equal(t, expectedEpoch, actualEpoch)
@@ -434,24 +448,29 @@ func TestDownloadFinalizedData(t *testing.T) {
ms, err := st.MarshalSSZ()
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromSlot(b.Block().Slot())):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromRoot(br)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
// sanity check before we go through checkpoint
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
sb, err := c.GetState(ctx, IdFinalized)

View File

@@ -5,6 +5,8 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"path"
@@ -12,8 +14,8 @@ import (
"sort"
"strconv"
"text/template"
"time"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/network/forks"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
@@ -52,6 +54,8 @@ const (
IdFinalized StateOrBlockId = "finalized"
)
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromRoot(r [32]byte) StateOrBlockId {
@@ -81,22 +85,96 @@ func idTemplate(ts string) func(StateOrBlockId) string {
return f
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
*client.Client
hc *http.Client
baseURL *url.URL
}
// NewClient returns a new Client that includes functions for rest calls to Beacon API.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
if err != nil {
return nil, err
}
return &Client{c}, nil
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: fmt.Sprintf("%s:%s", host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
type reqOption func(*http.Request)
func withSSZEncoding() reqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
}
return b, nil
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
}
// GetBlock retrieves the SignedBeaconBlock for the given block id.
@@ -106,7 +184,7 @@ func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
// The return value contains the ssz-encoded bytes.
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
blockPath := renderGetBlockPath(blockId)
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
b, err := c.get(ctx, blockPath, withSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
}
@@ -121,7 +199,7 @@ var getBlockRootTpl = idTemplate(getBlockRootPath)
// for the named identifiers.
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
rootPath := getBlockRootTpl(blockId)
b, err := c.Get(ctx, rootPath)
b, err := c.get(ctx, rootPath)
if err != nil {
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
}
@@ -144,7 +222,7 @@ var getForkTpl = idTemplate(getForkForStatePath)
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
body, err := c.Get(ctx, getForkTpl(stateId))
body, err := c.get(ctx, getForkTpl(stateId))
if err != nil {
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
}
@@ -160,7 +238,7 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.Get(ctx, getForkSchedulePath)
body, err := c.get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
@@ -178,7 +256,7 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
body, err := c.Get(ctx, getConfigSpecPath)
body, err := c.get(ctx, getConfigSpecPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting configSpecPath")
}
@@ -201,7 +279,7 @@ var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*
func parseNodeVersion(v string) (*NodeVersion, error) {
groups := versionRE.FindStringSubmatch(v)
if len(groups) != 4 {
return nil, errors.Wrapf(client.ErrInvalidNodeVersion, "could not be parsed: %s", v)
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
}
return &NodeVersion{
implementation: groups[1],
@@ -213,7 +291,7 @@ func parseNodeVersion(v string) (*NodeVersion, error) {
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
b, err := c.Get(ctx, getNodeVersionPath)
b, err := c.get(ctx, getNodeVersionPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting node version")
}
@@ -240,7 +318,7 @@ func renderGetStatePath(id StateOrBlockId) string {
// The return value contains the ssz-encoded bytes.
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
statePath := path.Join(getStatePath, string(stateId))
b, err := c.Get(ctx, statePath, client.WithSSZEncoding())
b, err := c.get(ctx, statePath, withSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
}
@@ -253,7 +331,7 @@ func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte,
// - finds the highest non-skipped block preceding the epoch
// - returns the htr of the found block and returns this + the value of state_root from the block
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
body, err := c.Get(ctx, getWeakSubjectivityPath)
body, err := c.get(ctx, getWeakSubjectivityPath)
if err != nil {
return nil, err
}
@@ -284,7 +362,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
body, err := json.Marshal(request)
if err != nil {
return errors.Wrap(err, "failed to marshal JSON")
@@ -294,7 +372,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
return errors.Wrap(err, "invalid format, failed to create new POST request object")
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.Do(req)
resp, err := c.hc.Do(req)
if err != nil {
return err
}
@@ -323,7 +401,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
// Returns a struct representation of json response.
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
body, err := c.Get(ctx, changeBLStoExecutionPath)
body, err := c.get(ctx, changeBLStoExecutionPath)
if err != nil {
return nil, err
}
@@ -335,6 +413,23 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.B
return poolResponse, nil
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}
type forkResponse struct {
PreviousVersion string `json:"previous_version"`
CurrentVersion string `json:"current_version"`
@@ -388,7 +483,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(uint64(epoch)),
Epoch: primitives.Epoch(epoch),
})
}
sort.Sort(ofs)

View File

@@ -4,7 +4,6 @@ import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
@@ -18,17 +17,17 @@ func TestParseNodeVersion(t *testing.T) {
{
name: "empty string",
v: "",
err: client.ErrInvalidNodeVersion,
err: ErrInvalidNodeVersion,
},
{
name: "Prysm as the version string",
v: "Prysm",
err: client.ErrInvalidNodeVersion,
err: ErrInvalidNodeVersion,
},
{
name: "semver only",
v: "v2.0.6",
err: client.ErrInvalidNodeVersion,
err: ErrInvalidNodeVersion,
},
{
name: "complete version",
@@ -92,7 +91,7 @@ func TestValidHostname(t *testing.T) {
{
name: "hostname without port",
hostArg: "mydomain.org",
err: client.ErrMalformedHostname,
err: ErrMalformedHostname,
},
{
name: "hostname with port",
@@ -133,7 +132,7 @@ func TestValidHostname(t *testing.T) {
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
require.Equal(t, c.joined, cl.baseURL.ResolveReference(&url.URL{Path: c.path}).String())
})
}
}

View File

@@ -0,0 +1,13 @@
package beacon
import "github.com/pkg/errors"
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version api response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")

View File

@@ -11,12 +11,10 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
visibility = ["//visibility:public"],
deps = [
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",

View File

@@ -4,11 +4,9 @@ import (
"math/big"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -40,7 +38,7 @@ type signedBuilderBid struct {
func WrappedSignedBuilderBid(p *ethpb.SignedBuilderBid) (SignedBid, error) {
w := signedBuilderBid{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
return nil, blocks.ErrNilObjectWrapped
}
return w, nil
}
@@ -73,7 +71,7 @@ type signedBuilderBidCapella struct {
func WrappedSignedBuilderBidCapella(p *ethpb.SignedBuilderBidCapella) (SignedBid, error) {
w := signedBuilderBidCapella{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
return nil, blocks.ErrNilObjectWrapped
}
return w, nil
}
@@ -106,7 +104,7 @@ type builderBid struct {
func WrappedBuilderBid(p *ethpb.BuilderBid) (Bid, error) {
w := builderBid{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
return nil, blocks.ErrNilObjectWrapped
}
return w, nil
}
@@ -154,7 +152,7 @@ type builderBidCapella struct {
func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
w := builderBidCapella{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
return nil, blocks.ErrNilObjectWrapped
}
return w, nil
}
@@ -162,8 +160,8 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
v := bytesutil.ReverseByteOrder(b.p.Value)
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, big.NewInt(0).SetBytes(v))
}
// Version --

View File

@@ -6,11 +6,13 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
"strings"
"text/template"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -35,6 +37,7 @@ const (
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
var errMalformedRequest = errors.New("required request data are missing")
var errNotBlinded = errors.New("submitted block is not blinded")
var submitBlindedBlockTimeout = 3 * time.Second
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
@@ -154,7 +157,6 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
if err != nil {
return
}
req.Header.Add("User-Agent", version.BuildData())
for _, o := range opts {
o(req)
}
@@ -243,6 +245,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
default:
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
}
}
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
@@ -290,6 +293,8 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
}
@@ -302,9 +307,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
}
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
return nil, errors.New("not a bellatrix payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
@@ -321,6 +323,8 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
}
@@ -333,14 +337,11 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
}
if strings.ToLower(ep.Version) != version.String(version.Capella) {
return nil, errors.New("not a capella payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
return blocks.WrappedExecutionPayloadCapella(p, 0)
return blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
default:
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
}

View File

@@ -313,26 +313,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
})
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)), // send a Capella payload
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
_, err = c.SubmitBlindedBlock(ctx, sbbb)
require.ErrorContains(t, "not a bellatrix payload", err)
})
t.Run("not blinded", func(t *testing.T) {
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{}}})
require.NoError(t, err)

View File

@@ -14,17 +14,14 @@ import (
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// SignedValidatorRegistration a struct for signed validator registrations.
type SignedValidatorRegistration struct {
*eth.SignedValidatorRegistrationV1
}
// ValidatorRegistration a struct for validator registrations.
type ValidatorRegistration struct {
*eth.ValidatorRegistrationV1
}
// MarshalJSON returns a json representation copy of signed validator registration.
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *ValidatorRegistration `json:"message"`
@@ -35,7 +32,6 @@ func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
})
}
// UnmarshalJSON returns a byte representation of signed validator registration from json.
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
if r.SignedValidatorRegistrationV1 == nil {
r.SignedValidatorRegistrationV1 = &eth.SignedValidatorRegistrationV1{}
@@ -52,7 +48,6 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
return nil
}
// MarshalJSON returns a json representation copy of validator registration.
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
@@ -67,7 +62,6 @@ func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
})
}
// UnmarshalJSON returns a byte representation of validator registration from json.
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
if r.ValidatorRegistrationV1 == nil {
r.ValidatorRegistrationV1 = &eth.ValidatorRegistrationV1{}
@@ -98,7 +92,6 @@ func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
var errInvalidUint256 = errors.New("invalid Uint256")
var errDecodeUint256 = errors.New("unable to decode into Uint256")
// Uint256 a wrapper representation of big.Int
type Uint256 struct {
*big.Int
}
@@ -125,7 +118,7 @@ func sszBytesToUint256(b []byte) (Uint256, error) {
return Uint256{Int: bi}, nil
}
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256.
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
func (s Uint256) SSZBytes() []byte {
if !isValidUint256(s.Int) {
return []byte{}
@@ -133,19 +126,18 @@ func (s Uint256) SSZBytes() []byte {
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
}
// UnmarshalJSON takes in a byte array and unmarshals the value in Uint256
func (s *Uint256) UnmarshalJSON(t []byte) error {
start := 0
end := len(t)
if len(t) < 2 {
return errors.Errorf("provided Uint256 json string is too short: %s", string(t))
if t[0] == '"' {
start += 1
}
if t[0] != '"' || t[end-1] != '"' {
return errors.Errorf("provided Uint256 json string is malformed: %s", string(t))
if t[end-1] == '"' {
end -= 1
}
return s.UnmarshalText(t[1 : end-1])
return s.UnmarshalText(t[start:end])
}
// UnmarshalText takes in a byte array and unmarshals the text in Uint256
func (s *Uint256) UnmarshalText(t []byte) error {
if s.Int == nil {
s.Int = big.NewInt(0)
@@ -161,7 +153,6 @@ func (s *Uint256) UnmarshalText(t []byte) error {
return nil
}
// MarshalJSON returns a json byte representation of Uint256.
func (s Uint256) MarshalJSON() ([]byte, error) {
t, err := s.MarshalText()
if err != nil {
@@ -172,7 +163,6 @@ func (s Uint256) MarshalJSON() ([]byte, error) {
return t, nil
}
// MarshalText returns a text byte representation of Uint256.
func (s Uint256) MarshalText() ([]byte, error) {
if !isValidUint256(s.Int) {
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
@@ -180,27 +170,22 @@ func (s Uint256) MarshalText() ([]byte, error) {
return []byte(s.String()), nil
}
// Uint64String is a custom type that allows marshalling from text to uint64 and vice versa.
type Uint64String uint64
// UnmarshalText takes a byte array and unmarshals the text in Uint64String.
func (s *Uint64String) UnmarshalText(t []byte) error {
u, err := strconv.ParseUint(string(t), 10, 64)
*s = Uint64String(u)
return err
}
// MarshalText returns a byte representation of the text from Uint64String.
func (s Uint64String) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("%d", s)), nil
}
// VersionResponse is a JSON representation of a field in the builder API header response.
type VersionResponse struct {
Version string `json:"version"`
}
// ExecHeaderResponse is a JSON representation of the builder API header response for Bellatrix.
type ExecHeaderResponse struct {
Version string `json:"version"`
Data struct {
@@ -209,7 +194,6 @@ type ExecHeaderResponse struct {
} `json:"data"`
}
// ToProto returns a SignedBuilderBid from ExecHeaderResponse for Bellatrix.
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
@@ -221,7 +205,6 @@ func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
}, nil
}
// ToProto returns a BuilderBid Proto for Bellatrix.
func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
header, err := bb.Header.ToProto()
if err != nil {
@@ -234,34 +217,31 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
}, nil
}
// ToProto returns a ExecutionPayloadHeader for Bellatrix.
func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
return &v1.ExecutionPayloadHeader{
ParentHash: bytesutil.SafeCopyBytes(h.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(h.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(h.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(h.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(h.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(h.PrevRandao),
ParentHash: h.ParentHash,
FeeRecipient: h.FeeRecipient,
StateRoot: h.StateRoot,
ReceiptsRoot: h.ReceiptsRoot,
LogsBloom: h.LogsBloom,
PrevRandao: h.PrevRandao,
BlockNumber: uint64(h.BlockNumber),
GasLimit: uint64(h.GasLimit),
GasUsed: uint64(h.GasUsed),
Timestamp: uint64(h.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(h.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(h.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(h.BlockHash),
TransactionsRoot: bytesutil.SafeCopyBytes(h.TransactionsRoot),
ExtraData: h.ExtraData,
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
BlockHash: h.BlockHash,
TransactionsRoot: h.TransactionsRoot,
}, nil
}
// BuilderBid is part of ExecHeaderResponse for Bellatrix.
type BuilderBid struct {
Header *ExecutionPayloadHeader `json:"header"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
// ExecutionPayloadHeader is a field in BuilderBid.
type ExecutionPayloadHeader struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
@@ -280,7 +260,6 @@ type ExecutionPayloadHeader struct {
*v1.ExecutionPayloadHeader
}
// MarshalJSON returns the JSON bytes representation of ExecutionPayloadHeader.
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
type MarshalCaller ExecutionPayloadHeader
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeader.BaseFeePerGas)
@@ -305,7 +284,6 @@ func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
})
}
// UnmarshalJSON takes in a JSON byte array and sets ExecutionPayloadHeader.
func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
type UnmarshalCaller ExecutionPayloadHeader
uc := &UnmarshalCaller{}
@@ -319,13 +297,11 @@ func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
return err
}
// ExecPayloadResponse is the builder API /eth/v1/builder/blinded_blocks for Bellatrix.
type ExecPayloadResponse struct {
Version string `json:"version"`
Data ExecutionPayload `json:"data"`
}
// ExecutionPayload is a field of ExecPayloadResponse
type ExecutionPayload struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
@@ -343,104 +319,33 @@ type ExecutionPayload struct {
Transactions []hexutil.Bytes `json:"transactions"`
}
// ToProto returns a ExecutionPayload Proto from ExecPayloadResponse
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
return r.Data.ToProto()
}
// ToProto returns a ExecutionPayload Proto
func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
txs := make([][]byte, len(p.Transactions))
for i := range p.Transactions {
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
txs[i] = p.Transactions[i]
}
return &v1.ExecutionPayload{
ParentHash: bytesutil.SafeCopyBytes(p.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(p.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(p.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(p.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(p.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(p.PrevRandao),
ParentHash: p.ParentHash,
FeeRecipient: p.FeeRecipient,
StateRoot: p.StateRoot,
ReceiptsRoot: p.ReceiptsRoot,
LogsBloom: p.LogsBloom,
PrevRandao: p.PrevRandao,
BlockNumber: uint64(p.BlockNumber),
GasLimit: uint64(p.GasLimit),
GasUsed: uint64(p.GasUsed),
Timestamp: uint64(p.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(p.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(p.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(p.BlockHash),
ExtraData: p.ExtraData,
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
BlockHash: p.BlockHash,
Transactions: txs,
}, nil
}
// FromProto converts a proto execution payload type to our builder
// compatible payload type.
func FromProto(payload *v1.ExecutionPayload) (ExecutionPayload, error) {
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
if err != nil {
return ExecutionPayload{}, err
}
txs := make([]hexutil.Bytes, len(payload.Transactions))
for i := range payload.Transactions {
txs[i] = bytesutil.SafeCopyBytes(payload.Transactions[i])
}
return ExecutionPayload{
ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
BlockNumber: Uint64String(payload.BlockNumber),
GasLimit: Uint64String(payload.GasLimit),
GasUsed: Uint64String(payload.GasUsed),
Timestamp: Uint64String(payload.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData),
BaseFeePerGas: bFee,
BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash),
Transactions: txs,
}, nil
}
// FromProtoCapella converts a proto execution payload type for capella to our
// builder compatible payload type.
func FromProtoCapella(payload *v1.ExecutionPayloadCapella) (ExecutionPayloadCapella, error) {
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
if err != nil {
return ExecutionPayloadCapella{}, err
}
txs := make([]hexutil.Bytes, len(payload.Transactions))
for i := range payload.Transactions {
txs[i] = bytesutil.SafeCopyBytes(payload.Transactions[i])
}
withdrawals := make([]Withdrawal, len(payload.Withdrawals))
for i, w := range payload.Withdrawals {
withdrawals[i] = Withdrawal{
Index: Uint256{Int: big.NewInt(0).SetUint64(w.Index)},
ValidatorIndex: Uint256{Int: big.NewInt(0).SetUint64(uint64(w.ValidatorIndex))},
Address: bytesutil.SafeCopyBytes(w.Address),
Amount: Uint256{Int: big.NewInt(0).SetUint64(w.Amount)},
}
}
return ExecutionPayloadCapella{
ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
BlockNumber: Uint64String(payload.BlockNumber),
GasLimit: Uint64String(payload.GasLimit),
GasUsed: Uint64String(payload.GasUsed),
Timestamp: Uint64String(payload.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData),
BaseFeePerGas: bFee,
BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash),
Transactions: txs,
Withdrawals: withdrawals,
}, nil
}
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
type ExecHeaderResponseCapella struct {
Data struct {
Signature hexutil.Bytes `json:"signature"`
@@ -448,7 +353,6 @@ type ExecHeaderResponseCapella struct {
} `json:"data"`
}
// ToProto returns a SignedBuilderBidCapella Proto from ExecHeaderResponseCapella.
func (ehr *ExecHeaderResponseCapella) ToProto() (*eth.SignedBuilderBidCapella, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
@@ -456,11 +360,10 @@ func (ehr *ExecHeaderResponseCapella) ToProto() (*eth.SignedBuilderBidCapella, e
}
return &eth.SignedBuilderBidCapella{
Message: bb,
Signature: bytesutil.SafeCopyBytes(ehr.Data.Signature),
Signature: ehr.Data.Signature,
}, nil
}
// ToProto returns a BuilderBidCapella Proto.
func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
header, err := bb.Header.ToProto()
if err != nil {
@@ -468,40 +371,37 @@ func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
}
return &eth.BuilderBidCapella{
Header: header,
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
Value: bb.Value.SSZBytes(),
Pubkey: bb.Pubkey,
}, nil
}
// ToProto returns a ExecutionPayloadHeaderCapella Proto
func (h *ExecutionPayloadHeaderCapella) ToProto() (*v1.ExecutionPayloadHeaderCapella, error) {
return &v1.ExecutionPayloadHeaderCapella{
ParentHash: bytesutil.SafeCopyBytes(h.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(h.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(h.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(h.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(h.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(h.PrevRandao),
ParentHash: h.ParentHash,
FeeRecipient: h.FeeRecipient,
StateRoot: h.StateRoot,
ReceiptsRoot: h.ReceiptsRoot,
LogsBloom: h.LogsBloom,
PrevRandao: h.PrevRandao,
BlockNumber: uint64(h.BlockNumber),
GasLimit: uint64(h.GasLimit),
GasUsed: uint64(h.GasUsed),
Timestamp: uint64(h.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(h.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(h.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(h.BlockHash),
TransactionsRoot: bytesutil.SafeCopyBytes(h.TransactionsRoot),
WithdrawalsRoot: bytesutil.SafeCopyBytes(h.WithdrawalsRoot),
ExtraData: h.ExtraData,
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
BlockHash: h.BlockHash,
TransactionsRoot: h.TransactionsRoot,
WithdrawalsRoot: h.WithdrawalsRoot,
}, nil
}
// BuilderBidCapella is field of ExecHeaderResponseCapella.
type BuilderBidCapella struct {
Header *ExecutionPayloadHeaderCapella `json:"header"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
// ExecutionPayloadHeaderCapella is a field in BuilderBidCapella.
type ExecutionPayloadHeaderCapella struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
@@ -521,7 +421,6 @@ type ExecutionPayloadHeaderCapella struct {
*v1.ExecutionPayloadHeaderCapella
}
// MarshalJSON returns a JSON byte representation of ExecutionPayloadHeaderCapella.
func (h *ExecutionPayloadHeaderCapella) MarshalJSON() ([]byte, error) {
type MarshalCaller ExecutionPayloadHeaderCapella
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeaderCapella.BaseFeePerGas)
@@ -547,7 +446,6 @@ func (h *ExecutionPayloadHeaderCapella) MarshalJSON() ([]byte, error) {
})
}
// UnmarshalJSON takes a JSON byte array and sets ExecutionPayloadHeaderCapella.
func (h *ExecutionPayloadHeaderCapella) UnmarshalJSON(b []byte) error {
type UnmarshalCaller ExecutionPayloadHeaderCapella
uc := &UnmarshalCaller{}
@@ -561,13 +459,11 @@ func (h *ExecutionPayloadHeaderCapella) UnmarshalJSON(b []byte) error {
return err
}
// ExecPayloadResponseCapella is the builder API /eth/v1/builder/blinded_blocks for Capella.
type ExecPayloadResponseCapella struct {
Version string `json:"version"`
Data ExecutionPayloadCapella `json:"data"`
}
// ExecutionPayloadCapella is a field of ExecPayloadResponseCapella.
type ExecutionPayloadCapella struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
@@ -586,46 +482,43 @@ type ExecutionPayloadCapella struct {
Withdrawals []Withdrawal `json:"withdrawals"`
}
// ToProto returns a ExecutionPayloadCapella Proto.
func (r *ExecPayloadResponseCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
return r.Data.ToProto()
}
// ToProto returns a ExecutionPayloadCapella Proto.
func (p *ExecutionPayloadCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
txs := make([][]byte, len(p.Transactions))
for i := range p.Transactions {
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
txs[i] = p.Transactions[i]
}
withdrawals := make([]*v1.Withdrawal, len(p.Withdrawals))
for i, w := range p.Withdrawals {
withdrawals[i] = &v1.Withdrawal{
Index: w.Index.Uint64(),
ValidatorIndex: types.ValidatorIndex(w.ValidatorIndex.Uint64()),
Address: bytesutil.SafeCopyBytes(w.Address),
Address: w.Address,
Amount: w.Amount.Uint64(),
}
}
return &v1.ExecutionPayloadCapella{
ParentHash: bytesutil.SafeCopyBytes(p.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(p.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(p.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(p.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(p.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(p.PrevRandao),
ParentHash: p.ParentHash,
FeeRecipient: p.FeeRecipient,
StateRoot: p.StateRoot,
ReceiptsRoot: p.ReceiptsRoot,
LogsBloom: p.LogsBloom,
PrevRandao: p.PrevRandao,
BlockNumber: uint64(p.BlockNumber),
GasLimit: uint64(p.GasLimit),
GasUsed: uint64(p.GasUsed),
Timestamp: uint64(p.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(p.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(p.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(p.BlockHash),
ExtraData: p.ExtraData,
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
BlockHash: p.BlockHash,
Transactions: txs,
Withdrawals: withdrawals,
}, nil
}
// Withdrawal is a field of ExecutionPayloadCapella.
type Withdrawal struct {
Index Uint256 `json:"index"`
ValidatorIndex Uint256 `json:"validator_index"`
@@ -633,22 +526,18 @@ type Withdrawal struct {
Amount Uint256 `json:"amount"`
}
// SignedBlindedBeaconBlockBellatrix is the request object for builder API /eth/v1/builder/blinded_blocks.
type SignedBlindedBeaconBlockBellatrix struct {
*eth.SignedBlindedBeaconBlockBellatrix
}
// BlindedBeaconBlockBellatrix is a field in SignedBlindedBeaconBlockBellatrix.
type BlindedBeaconBlockBellatrix struct {
*eth.BlindedBeaconBlockBellatrix
}
// BlindedBeaconBlockBodyBellatrix is a field in BlindedBeaconBlockBellatrix.
type BlindedBeaconBlockBodyBellatrix struct {
*eth.BlindedBeaconBlockBodyBellatrix
}
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockBellatrix.
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BlindedBeaconBlockBellatrix `json:"message"`
@@ -659,7 +548,6 @@ func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
})
}
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBellatrix.
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
@@ -676,12 +564,10 @@ func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
})
}
// ProposerSlashing is a field in BlindedBeaconBlockBodyCapella.
type ProposerSlashing struct {
*eth.ProposerSlashing
}
// MarshalJSON returns a JSON byte array representation of ProposerSlashing.
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1"`
@@ -692,12 +578,10 @@ func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
})
}
// SignedBeaconBlockHeader is a field of ProposerSlashing.
type SignedBeaconBlockHeader struct {
*eth.SignedBeaconBlockHeader
}
// MarshalJSON returns a JSON byte array representation of SignedBeaconBlockHeader.
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Header *BeaconBlockHeader `json:"message"`
@@ -708,12 +592,10 @@ func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
})
}
// BeaconBlockHeader is a field of SignedBeaconBlockHeader.
type BeaconBlockHeader struct {
*eth.BeaconBlockHeader
}
// MarshalJSON returns a JSON byte array representation of BeaconBlockHeader.
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
@@ -730,12 +612,10 @@ func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
})
}
// IndexedAttestation is a field of AttesterSlashing.
type IndexedAttestation struct {
*eth.IndexedAttestation
}
// MarshalJSON returns a JSON byte array representation of IndexedAttestation.
func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
indices := make([]string, len(a.IndexedAttestation.AttestingIndices))
for i := range a.IndexedAttestation.AttestingIndices {
@@ -752,12 +632,10 @@ func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
})
}
// AttesterSlashing is a field of a Beacon Block Body.
type AttesterSlashing struct {
*eth.AttesterSlashing
}
// MarshalJSON returns a JSON byte array representation of AttesterSlashing.
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Attestation1 *IndexedAttestation `json:"attestation_1"`
@@ -768,12 +646,10 @@ func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
})
}
// Checkpoint is a field of AttestationData.
type Checkpoint struct {
*eth.Checkpoint
}
// MarshalJSON returns a JSON byte array representation of Checkpoint.
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch"`
@@ -784,12 +660,10 @@ func (c *Checkpoint) MarshalJSON() ([]byte, error) {
})
}
// AttestationData is a field of IndexedAttestation.
type AttestationData struct {
*eth.AttestationData
}
// MarshalJSON returns a JSON byte array representation of AttestationData.
func (a *AttestationData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
@@ -806,12 +680,10 @@ func (a *AttestationData) MarshalJSON() ([]byte, error) {
})
}
// Attestation is a field of Beacon Block Body.
type Attestation struct {
*eth.Attestation
}
// MarshalJSON returns a JSON byte array representation of Attestation.
func (a *Attestation) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
AggregationBits hexutil.Bytes `json:"aggregation_bits"`
@@ -824,12 +696,10 @@ func (a *Attestation) MarshalJSON() ([]byte, error) {
})
}
// DepositData is a field of Deposit.
type DepositData struct {
*eth.Deposit_Data
}
// MarshalJSON returns a JSON byte array representation of DepositData.
func (d *DepositData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
PublicKey hexutil.Bytes `json:"pubkey"`
@@ -844,12 +714,10 @@ func (d *DepositData) MarshalJSON() ([]byte, error) {
})
}
// Deposit is a field of Beacon Block Body.
type Deposit struct {
*eth.Deposit
}
// MarshalJSON returns a JSON byte array representation of Deposit.
func (d *Deposit) MarshalJSON() ([]byte, error) {
proof := make([]hexutil.Bytes, len(d.Proof))
for i := range d.Proof {
@@ -864,12 +732,10 @@ func (d *Deposit) MarshalJSON() ([]byte, error) {
})
}
// SignedVoluntaryExit is a field of Beacon Block Body.
type SignedVoluntaryExit struct {
*eth.SignedVoluntaryExit
}
// MarshalJSON returns a JSON byte array representation of SignedVoluntaryExit.
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *VoluntaryExit `json:"message"`
@@ -880,12 +746,10 @@ func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
})
}
// VoluntaryExit is a field in SignedVoluntaryExit
type VoluntaryExit struct {
*eth.VoluntaryExit
}
// MarshalJSON returns a JSON byte array representation of VoluntaryExit
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch"`
@@ -896,12 +760,10 @@ func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
})
}
// SyncAggregate is a field of Beacon Block Body.
type SyncAggregate struct {
*eth.SyncAggregate
}
// MarshalJSON returns a JSON byte array representation of SyncAggregate.
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits"`
@@ -912,12 +774,10 @@ func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
})
}
// Eth1Data is a field of Beacon Block Body.
type Eth1Data struct {
*eth.Eth1Data
}
// MarshalJSON returns a JSON byte array representation of Eth1Data.
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
DepositRoot hexutil.Bytes `json:"deposit_root"`
@@ -930,7 +790,6 @@ func (e *Eth1Data) MarshalJSON() ([]byte, error) {
})
}
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyBellatrix.
func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
sve := make([]*SignedVoluntaryExit, len(b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits))
for i := range b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits {
@@ -977,12 +836,10 @@ func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
})
}
// SignedBLSToExecutionChange is a field in Beacon Block Body for capella and above.
type SignedBLSToExecutionChange struct {
*eth.SignedBLSToExecutionChange
}
// MarshalJSON returns a JSON byte array representation of SignedBLSToExecutionChange.
func (ch *SignedBLSToExecutionChange) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BLSToExecutionChange `json:"message"`
@@ -993,12 +850,10 @@ func (ch *SignedBLSToExecutionChange) MarshalJSON() ([]byte, error) {
})
}
// BLSToExecutionChange is a field in SignedBLSToExecutionChange.
type BLSToExecutionChange struct {
*eth.BLSToExecutionChange
}
// MarshalJSON returns a JSON byte array representation of BLSToExecutionChange.
func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
ValidatorIndex string `json:"validator_index"`
@@ -1011,22 +866,18 @@ func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
})
}
// SignedBlindedBeaconBlockCapella is part of the request object sent to builder API /eth/v1/builder/blinded_blocks for Capella.
type SignedBlindedBeaconBlockCapella struct {
*eth.SignedBlindedBeaconBlockCapella
}
// BlindedBeaconBlockCapella is a field in SignedBlindedBeaconBlockCapella.
type BlindedBeaconBlockCapella struct {
*eth.BlindedBeaconBlockCapella
}
// BlindedBeaconBlockBodyCapella is a field in BlindedBeaconBlockCapella.
type BlindedBeaconBlockBodyCapella struct {
*eth.BlindedBeaconBlockBodyCapella
}
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockCapella.
func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BlindedBeaconBlockCapella `json:"message"`
@@ -1037,7 +888,6 @@ func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
})
}
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockCapella
func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
@@ -1054,7 +904,6 @@ func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
})
}
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyCapella
func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
sve := make([]*SignedVoluntaryExit, len(b.VoluntaryExits))
for i := range b.VoluntaryExits {
@@ -1107,7 +956,6 @@ func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
})
}
// ErrorMessage is a JSON representation of the builder API's returned error message.
type ErrorMessage struct {
Code int `json:"code"`
Message string `json:"message"`

View File

@@ -1156,14 +1156,6 @@ func TestUint256Unmarshal(t *testing.T) {
require.Equal(t, expected, string(m))
}
func TestUint256Unmarshal_BadData(t *testing.T) {
var bigNum Uint256
assert.ErrorContains(t, "provided Uint256 json string is too short", bigNum.UnmarshalJSON([]byte{'"'}))
assert.ErrorContains(t, "provided Uint256 json string is malformed", bigNum.UnmarshalJSON([]byte{'"', '1', '2'}))
}
func TestUint256UnmarshalNegative(t *testing.T) {
m := "-1"
var value Uint256

View File

@@ -1,97 +0,0 @@
package client
import (
"context"
"io"
"net"
"net/http"
"net/url"
"github.com/pkg/errors"
)
// Client is a wrapper object around the HTTP client.
type Client struct {
hc *http.Client
baseURL *url.URL
token string
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
// Token returns the bearer token used for jwt authentication
func (c *Client) Token() string {
return c.token
}
// BaseURL returns the base url of the client
func (c *Client) BaseURL() *url.URL {
return c.baseURL
}
// Do execute the request against the http client
func (c *Client) Do(req *http.Request) (*http.Response, error) {
return c.hc.Do(req)
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, Non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body")
}
return b, nil
}

View File

@@ -1,48 +0,0 @@
package client
import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestValidHostname(t *testing.T) {
cases := []struct {
name string
hostArg string
path string
joined string
err error
}{
{
name: "hostname without port",
hostArg: "mydomain.org",
err: ErrMalformedHostname,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cl, err := NewClient(c.hostArg)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
})
}
}
func TestWithAuthenticationToken(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500", WithAuthenticationToken("my token"))
require.NoError(t, err)
require.Equal(t, cl.Token(), "my token")
}
func TestBaseURL(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500")
require.NoError(t, err)
require.Equal(t, "www.offchainlabs.com", cl.BaseURL().Hostname())
require.Equal(t, "3500", cl.BaseURL().Port())
}

View File

@@ -1,40 +0,0 @@
package client
import (
"fmt"
"io"
"net/http"
"github.com/pkg/errors"
)
// ErrMalformedHostname is used to indicate if a host name's format is incorrect.
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}

View File

@@ -1,48 +0,0 @@
package client
import (
"fmt"
"net/http"
"time"
)
// ReqOption is a request functional option.
type ReqOption func(*http.Request)
// WithSSZEncoding is a request functional option that adds SSZ encoding header.
func WithSSZEncoding() ReqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// WithAuthorizationToken is a request functional option that adds header for authorization token.
func WithAuthorizationToken(token string) ReqOption {
return func(req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// WithRoundTripper replaces the underlying HTTP's transport with a custom one.
func WithRoundTripper(t http.RoundTripper) ClientOpt {
return func(c *Client) {
c.hc.Transport = t
}
}
// WithAuthenticationToken sets an oauth token to be used.
func WithAuthenticationToken(token string) ClientOpt {
return func(c *Client) {
c.token = token
}
}

View File

@@ -1,13 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["client.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/validator",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//validator/rpc/apimiddleware:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -1,121 +0,0 @@
package validator
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
)
const (
localKeysPath = "/eth/v1/keystores"
remoteKeysPath = "/eth/v1/remotekeys"
feeRecipientPath = "/eth/v1/validator/{pubkey}/feerecipient"
)
// Client provides a collection of helper methods for calling the Keymanager API endpoints.
type Client struct {
*client.Client
}
// NewClient returns a new Client that includes functions for REST calls to keymanager APIs.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
if err != nil {
return nil, err
}
return &Client{c}, nil
}
// GetValidatorPubKeys gets the current list of web3signer or the local validator public keys in hex format.
func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
jsonlocal, err := c.GetLocalValidatorKeys(ctx)
if err != nil {
return nil, err
}
jsonremote, err := c.GetRemoteValidatorKeys(ctx)
if err != nil {
return nil, err
}
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Keystores) == 0 {
return nil, errors.New("there are no local keys or remote keys on the validator")
}
hexKeys := make(map[string]bool)
for index := range jsonlocal.Keystores {
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
}
for index := range jsonremote.Keystores {
hexKeys[jsonremote.Keystores[index].Pubkey] = true
}
keys := make([]string, 0)
for k := range hexKeys {
keys = append(keys, k)
}
return keys, nil
}
// GetLocalValidatorKeys calls the keymanager APIs for local validator keys
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.ListKeystoresResponseJson, error) {
localBytes, err := c.Get(ctx, localKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
jsonlocal := &apimiddleware.ListKeystoresResponseJson{}
if err := json.Unmarshal(localBytes, jsonlocal); err != nil {
return nil, errors.Wrap(err, "failed to parse local keystore list")
}
return jsonlocal, nil
}
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*apimiddleware.ListRemoteKeysResponseJson, error) {
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
return nil, err
}
}
jsonremote := &apimiddleware.ListRemoteKeysResponseJson{}
if len(remoteBytes) != 0 {
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
return nil, errors.Wrap(err, "failed to parse remote keystore list")
}
}
return jsonremote, nil
}
// GetFeeRecipientAddresses takes a list of validators in hex format and returns an equal length list of fee recipients in hex format.
func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []string) ([]string, error) {
feeRecipients := make([]string, len(validators))
for index, validator := range validators {
feejson, err := c.GetFeeRecipientAddress(ctx, validator)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("keymanager API failed to retrieve fee recipient for validator %s", validators[index]))
}
if feejson.Data == nil {
continue
}
feeRecipients[index] = feejson.Data.Ethaddress
}
return feeRecipients, nil
}
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*apimiddleware.GetFeeRecipientByPubkeyResponseJson, error) {
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
feejson := &apimiddleware.GetFeeRecipientByPubkeyResponseJson{}
if err := json.Unmarshal(b, feejson); err != nil {
return nil, errors.Wrap(err, "failed to parse fee recipient")
}
return feejson, nil
}

View File

@@ -144,7 +144,6 @@ func (f *Feed) Send(value interface{}) (nsent int) {
if !f.typecheck(rvalue.Type()) {
f.sendLock <- struct{}{}
f.mu.Unlock()
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
}
f.mu.Unlock()

View File

@@ -32,8 +32,6 @@ func TestFeedPanics(t *testing.T) {
f.Send(2)
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
// Validate it doesn't deadlock.
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
}
{
var f Feed

View File

@@ -14,7 +14,7 @@ type WorkerResults struct {
// Scatter scatters a computation across multiple goroutines.
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
// Results returned are collected and presented a a set of WorkerResults, which can be reassembled by the calling function.
// Any error that occurs in the workers will be passed back to the calling function.
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
if inputLen <= 0 {

View File

@@ -58,13 +58,11 @@ go_library(
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
@@ -120,7 +118,6 @@ go_test(
"receive_attestation_test.go",
"receive_block_test.go",
"service_test.go",
"setup_test.go",
"weak_subjectivity_checks_test.go",
],
embed = [":go_default_library"],
@@ -170,7 +167,6 @@ go_test(
"mock_test.go",
"receive_block_test.go",
"service_norace_test.go",
"setup_test.go",
],
embed = [":go_default_library"],
gc_goopts = [

View File

@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -86,12 +85,6 @@ type ForkFetcher interface {
TimeFetcher
}
// TemporalOracle is like ForkFetcher minus CurrentFork()
type TemporalOracle interface {
GenesisFetcher
TimeFetcher
}
// CanonicalFetcher retrieves the current chain's canonical information.
type CanonicalFetcher interface {
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
@@ -103,8 +96,6 @@ type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
PreviousJustifiedCheckpt() *ethpb.Checkpoint
UnrealizedJustifiedPayloadBlockHash() [32]byte
FinalizedBlockHash() [32]byte
InForkchoice([32]byte) bool
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
}
@@ -334,7 +325,7 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primiti
}
// IsOptimistic returns true if the current head is optimistic.
func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
return false, nil
}
@@ -381,14 +372,6 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
@@ -417,10 +400,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
}
if ss == nil {
ss, err = s.recoverStateSummary(ctx, root)
if err != nil {
return true, err
}
return true, errInvalidNilSummary
}
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
if err != nil {
@@ -446,10 +426,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, err
}
if lastValidated == nil {
lastValidated, err = s.recoverStateSummary(ctx, root)
if err != nil {
return false, err
}
return false, errInvalidNilSummary
}
if ss.Slot > lastValidated.Slot {
@@ -497,18 +474,3 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
}
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
if s.cfg.BeaconDB.HasBlock(ctx, blockRoot) {
b, err := s.cfg.BeaconDB.Block(ctx, blockRoot)
if err != nil {
return nil, err
}
summary := &ethpb.StateSummary{Slot: b.Block().Slot(), Root: blockRoot[:]}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, summary); err != nil {
return nil, err
}
return summary, nil
}
return nil, errBlockDoesNotExist
}

View File

@@ -78,17 +78,3 @@ func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.Tips()
}
// UnrealizedJustifiedPayloadBlockHash returns unrealized justified payload block hash from forkchoice.
func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
}
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
func (s *Service) FinalizedBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
}

View File

@@ -10,6 +10,7 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -71,8 +72,16 @@ func TestHeadRoot_Nil(t *testing.T) {
}
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -88,8 +97,16 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
}
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
jroot := [32]byte{'j'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: jroot}
@@ -102,37 +119,6 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
require.Equal(t, cp.Root, bytesutil.ToBytes32(jp.Root))
}
func TestFinalizedBlockHash(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
r := [32]byte{'f'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: r}
bState, _ := util.DeterministicGenesisState(t, 10)
require.NoError(t, beaconDB.SaveState(ctx, bState, r))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(cp))
h := service.FinalizedBlockHash()
require.Equal(t, params.BeaconConfig().ZeroHash, h)
require.Equal(t, r, fcs.FinalizedCheckpoint().Root)
}
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
ctx := context.Background()
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: []byte{'j'}}
ofc := &ethpb.Checkpoint{Root: []byte{'f'}}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
h := service.UnrealizedJustifiedPayloadBlockHash()
require.Equal(t, params.BeaconConfig().ZeroHash, h)
require.Equal(t, [32]byte{'j'}, service.cfg.ForkChoiceStore.JustifiedCheckpoint().Root)
}
func TestHeadSlot_CanRetrieve(t *testing.T) {
c := &Service{}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
@@ -145,9 +131,16 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
}
func TestHeadRoot_CanRetrieve(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -157,8 +150,16 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
}
func TestHeadRoot_UseDB(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
@@ -477,10 +478,15 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.ErrorContains(t, "nil summary returned from the DB", err)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: validatedRoot[:],
@@ -542,25 +548,6 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
}
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
_, err = c.IsOptimisticForRoot(ctx, br)
assert.NoError(t, err)
summ, err := beaconDB.StateSummary(ctx, br)
assert.NoError(t, err)
assert.NotNil(t, summ)
assert.Equal(t, 10, int(summ.Slot))
assert.DeepEqual(t, br[:], summ.Root)
}
func TestService_IsFinalized(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()

View File

@@ -15,8 +15,8 @@ var (
errNilFinalizedCheckpoint = errors.New("nil finalized checkpoint returned from state")
// errNilJustifiedCheckpoint is returned when a nil justified checkpt is returned from a state.
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
errBlockDoesNotExist = errors.New("could not find block in DB")
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
errInvalidNilSummary = errors.New("nil summary returned from the DB")
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
@@ -25,11 +25,8 @@ var (
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
// ErrNotCheckpoint is returned when a given checkpoint is not a
// checkpoint in any chain known to forkchoice
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -12,7 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -62,7 +61,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
return nil, nil
}
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
justifiedHash := s.cfg.ForkChoiceStore.JustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash(),
SafeBlockHash: justifiedHash[:],
@@ -70,7 +69,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot, arg.headRoot[:])
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
if err != nil {
@@ -154,7 +153,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
var pId [8]byte
copy(pId[:], payloadID[:])
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
} else if hasAttr && payloadID == nil {
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
"slot": headBlk.Slot(),
@@ -251,23 +250,20 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
// Root is `[32]byte{}` since we are retrieving proposer ID of a given slot. During insertion at assignment the root was not known.
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
if !ok && !features.Get().PrepareAllPayloads { // There's no need to build attribute if there is no proposer for slot.
if !ok { // There's no need to build attribute if there is no proposer for slot.
return false, emptyAttri, 0
}
// Get previous randao.
st = st.Copy()
if slot > st.Slot() {
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return false, emptyAttri, 0
}
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return false, emptyAttri, 0
}
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
@@ -305,7 +301,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var attr payloadattribute.Attributer
switch st.Version() {
case version.Capella:
case version.Capella, version.Deneb:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")

View File

@@ -9,12 +9,14 @@ import (
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
bstate "github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -30,16 +32,23 @@ import (
)
func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
@@ -86,15 +95,23 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
}
func Test_NotifyForkchoiceUpdate(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
@@ -246,8 +263,8 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}
func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
// Prepare blocks
ba := util.NewBeaconBlockBellatrix()
@@ -279,6 +296,12 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
brd, err := wbd.Block().HashTreeRoot()
require.NoError(t, err)
// Insert blocks into forkchoice
service := setupBeaconChain(t, beaconDB)
fcs := doublylinkedtree.New()
service.cfg.ForkChoiceStore = fcs
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
@@ -334,8 +357,8 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
// Prepare blocks
ba := util.NewBeaconBlockBellatrix()
@@ -390,6 +413,12 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
brg, err := wbg.Block().HashTreeRoot()
require.NoError(t, err)
// Insert blocks into forkchoice
service := setupBeaconChain(t, beaconDB)
fcs := doublylinkedtree.New()
service.cfg.ForkChoiceStore = fcs
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
@@ -467,9 +496,15 @@ func Test_NotifyNewPayload(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx, fcs := tr.ctx, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
phase0State, _ := util.DeterministicGenesisState(t, 1)
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
@@ -500,6 +535,8 @@ func Test_NotifyNewPayload(t *testing.T) {
}
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
service.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
r, err := bellatrixBlk.Block().HashTreeRoot()
@@ -706,10 +743,14 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
@@ -722,6 +763,8 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
}
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
Header: gethtypes.Header{
@@ -744,11 +787,19 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
}
func Test_GetPayloadAttribute(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
// Cache miss
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
require.Equal(t, false, hasPayload)
require.Equal(t, primitives.ValidatorIndex(0), vId)
@@ -757,7 +808,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
slot := primitives.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hook := logTest.NewGlobal()
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
@@ -767,36 +818,26 @@ func Test_GetPayloadAttribute(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
}
func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
hook := logTest.NewGlobal()
resetCfg := features.InitWithReset(&features.Flags{
PrepareAllPayloads: true,
})
defer resetCfg()
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, true, hasPayload)
require.Equal(t, primitives.ValidatorIndex(0), vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
}
func Test_GetPayloadAttributeV2(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
// Cache miss
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisStateCapella(t, 1)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
require.Equal(t, false, hasPayload)
require.Equal(t, primitives.ValidatorIndex(0), vId)
@@ -805,7 +846,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
slot := primitives.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hook := logTest.NewGlobal()
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
@@ -818,7 +859,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
@@ -830,9 +871,18 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fcs)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
var genesisStateRoot [32]byte
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
@@ -937,8 +987,16 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
}
func TestService_removeInvalidBlockAndState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
// Deleting unknown block should not error.
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}}))
@@ -982,10 +1040,18 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
}
func TestService_getPayloadHash(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err := service.getPayloadHash(ctx, []byte{})
_, err = service.getPayloadHash(ctx, []byte{})
require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err)
b := util.NewBeaconBlock()

View File

@@ -14,12 +14,11 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
func (s *Service) isNewProposer(slot primitives.Slot) bool {
_, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
return ok || features.Get().PrepareAllPayloads
return ok
}
func (s *Service) isNewHead(r [32]byte) bool {
@@ -50,27 +49,21 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
}
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
// it returns true if the new head is updated
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) (bool, error) {
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
defer span.End()
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
ctx = trace.NewContext(s.ctx, span)
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) error {
isNewHead := s.isNewHead(newHeadRoot)
if !isNewHead {
return false, nil
return nil
}
isNewProposer := s.isNewProposer(proposingSlot)
if isNewProposer && !features.Get().DisableReorgLateBlocks {
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return false, nil
return nil
}
}
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return false, nil
return nil
}
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
@@ -79,7 +72,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot
headBlock: headBlock.Block(),
})
if err != nil {
return false, errors.Wrap(err, "could not notify forkchoice update")
return errors.Wrap(err, "could not notify forkchoice update")
}
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
@@ -90,7 +83,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot
if err := s.pruneAttsFromPool(headBlock); err != nil {
log.WithError(err).Error("could not prune attestations from pool")
}
return true, nil
return nil
}
// shouldOverrideFCU checks whether the incoming block is still subject to being

View File

@@ -8,6 +8,8 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -74,8 +76,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
_, err = service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1)
require.NoError(t, err)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1))
hookErr := "could not notify forkchoice update"
invalidStateErr := "could not get state summary: could not find block in DB"
require.LogsDoNotContain(t, hook, invalidStateErr)
@@ -83,8 +84,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1)
require.NoError(t, err)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1))
require.LogsContain(t, hook, invalidStateErr)
hook.Reset()
@@ -108,8 +108,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot())
require.NoError(t, err)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()))
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
@@ -126,8 +125,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1)
require.NoError(t, err)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1))
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2, [32]byte{2})
@@ -137,21 +135,28 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1)
require.NoError(t, err)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1))
require.Equal(t, service.headRoot(), headRoot)
}
func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
@@ -183,17 +188,24 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
service.head.block = sb
service.head.state = st
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
_, err = service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1)
require.NoError(t, err)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1))
}
func TestShouldOverrideFCU(t *testing.T) {
hook := logTest.NewGlobal()
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
require.NoError(t, err)
headRoot := [32]byte{'b'}
parentRoot := [32]byte{'a'}
ojc := &ethpb.Checkpoint{}

View File

@@ -54,7 +54,7 @@ type head struct {
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
// Caller of the method MUST acquire a lock on forkchoice.
// Caller of the method MUST aqcuire a lock on forkchoice.
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.ReadOnlySignedBeaconBlock, headState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
@@ -89,13 +89,13 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
newHeadSlot := headBlock.Block().Slot()
newStateRoot := headBlock.Block().StateRoot()
// A chain re-org occurred, so we fire an event notifying the rest of the services.
r, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get old head root")
}
oldHeadRoot := bytesutil.ToBytes32(r)
if headBlock.Block().ParentRoot() != oldHeadRoot {
// A chain re-org occurred, so we fire an event notifying the rest of the services.
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not find common ancestor root")
@@ -403,19 +403,6 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
}
saveOrphanedAttCount.Inc()
}
for _, as := range orphanedBlk.Block().Body().AttesterSlashings() {
if err := s.cfg.SlashingPool.InsertAttesterSlashing(ctx, s.headStateReadOnly(ctx), as); err != nil {
log.WithError(err).Error("Could not insert reorg attester slashing")
}
}
for _, vs := range orphanedBlk.Block().Body().ProposerSlashings() {
if err := s.cfg.SlashingPool.InsertProposerSlashing(ctx, s.headStateReadOnly(ctx), vs); err != nil {
log.WithError(err).Error("Could not insert reorg proposer slashing")
}
}
for _, v := range orphanedBlk.Block().Body().VoluntaryExits() {
s.cfg.ExitPool.InsertVoluntaryExit(v)
}
if orphanedBlk.Version() >= version.Capella {
changes, err := orphanedBlk.Block().Body().BLSToExecutionChanges()
if err != nil {

View File

@@ -53,7 +53,7 @@ func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot prim
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the validator will be considered using next period sync committee.
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the valiator will be considered using next period sync committee.
//
// Spec definition:
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
@@ -157,11 +157,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
if headState == nil || headState.IsNil() {
return nil, errors.New("nil state")
}
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, err
}
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, slot)
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slot)
if err != nil {
return nil, err
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -16,7 +15,7 @@ import (
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := &Service{}
c.head = &head{state: s}
// Current period
@@ -39,7 +38,7 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := &Service{}
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
@@ -67,7 +66,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := &Service{}
c.head = &head{state: s}
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
@@ -82,7 +81,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())

View File

@@ -9,8 +9,10 @@ import (
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -324,88 +326,6 @@ func TestSaveOrphanedAtts(t *testing.T) {
require.DeepEqual(t, wantAtts, atts)
}
func TestSaveOrphanedOps(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.ShardCommitteePeriod = 0
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
service.head = &head{state: st}
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blkConfig := util.DefaultBlockGenConfig()
blkConfig.NumBLSChanges = 5
blkConfig.NumProposerSlashings = 1
blkConfig.NumAttesterSlashings = 1
blkConfig.NumVoluntaryExits = 1
blk3, err := util.GenerateFullBlock(st, keys, blkConfig, 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingAttesterSlashings(ctx, st, false)))
exits, err := service.cfg.ExitPool.PendingExits()
require.NoError(t, err)
require.Equal(t, 1, len(exits))
}
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -579,9 +499,18 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
}
func TestUpdateHead_noSavedChanges(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
ojp := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
require.NoError(t, err)

View File

@@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
consensusBlocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -53,7 +53,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedField):
case errors.Is(err, consensusBlocks.ErrUnsupportedGetter):
case err != nil:
return err
default:
@@ -61,6 +61,13 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
txsPerSlotCount.Set(float64(len(txs)))
}
}
if b.Version() >= version.Deneb {
k, err := b.Body().BlobKzgCommitments()
if err != nil {
return err
}
log = log.WithField("blobCount", len(k))
}
log.Info("Finished applying state transition")
return nil
}
@@ -95,6 +102,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"version": version.String(block.Version()),
}).Info("Synced new block")
}
return nil
@@ -120,7 +128,7 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
fields := logrus.Fields{
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
"blockNumber": payload.BlockNumber(),
"blockNumber": payload.BlockNumber,
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
}
if block.Version() >= version.Capella {

View File

@@ -5,19 +5,16 @@ import (
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
)
func testServiceOptsWithDB(t *testing.T) []Option {
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
cs := startup.NewClockSynchronizer()
return []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithClockSynchronizer(cs),
}
}
@@ -25,6 +22,5 @@ func testServiceOptsWithDB(t *testing.T) []Option {
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
// initialization requirements w/o the overhead of db init.
func testServiceOptsNoDB() []Option {
cs := startup.NewClockSynchronizer()
return []Option{WithClockSynchronizer(cs)}
return []Option{}
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -164,18 +163,3 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
return nil
}
}
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
return func(s *Service) error {
s.clockSetter = gs
s.clockWaiter = gs
return nil
}
}
func WithSyncComplete(c chan struct{}) Option {
return func(s *Service) error {
s.syncComplete = c
return nil
}
}

View File

@@ -1,13 +1,17 @@
package blockchain
import (
"context"
"fmt"
"math/big"
"testing"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/holiman/uint256"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -104,8 +108,16 @@ func Test_validateMergeBlock(t *testing.T) {
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
@@ -146,8 +158,16 @@ func Test_validateMergeBlock(t *testing.T) {
}
func Test_getBlkParentHashAndTD(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
@@ -219,9 +239,14 @@ func Test_validateTerminalBlockHash(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, ok)
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(&ethpb.SignedBeaconBlockBellatrix{}))
require.NoError(t, err)
blk.SetSlot(1)

View File

@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -19,17 +18,7 @@ import (
)
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return s.HeadStateReadOnly(ctx)
}
}
}
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
@@ -43,36 +32,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
if cachedState != nil && !cachedState.IsNil() {
return cachedState, nil
}
// Try the next slot cache for the early epoch calls, this should mostly have been covered already
// but is cheap
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil, errors.Wrap(err, "could not compute epoch start")
}
cachedState = transition.NextSlotState(c.Root, slot)
if cachedState != nil && !cachedState.IsNil() {
if cachedState.Slot() != slot {
cachedState, err = transition.ProcessSlots(ctx, cachedState, slot)
if err != nil {
return nil, errors.Wrap(err, "could not process slots")
}
}
if err := s.checkpointStateCache.AddCheckpointState(c, cachedState); err != nil {
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
}
return cachedState, nil
}
// Do not process attestations for old non viable checkpoints otherwise
ok, err := s.cfg.ForkChoiceStore.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: [32]byte(c.Root), Epoch: c.Epoch})
if err != nil {
return nil, errors.Wrap(err, "could not check checkpoint condition in forkchoice")
}
if !ok {
return nil, errors.Wrap(ErrNotCheckpoint, fmt.Sprintf("epoch %d root %#x", c.Epoch, c.Root))
}
// Fallback to state regeneration.
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
@@ -95,6 +55,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
}
return baseState, nil
}
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.

View File

@@ -6,6 +6,9 @@ import (
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -18,29 +21,29 @@ import (
)
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
_, err := blockTree1(t, beaconDB, []byte{'g'})
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithStateGen(stategen.New(beaconDB, fc)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err = blockTree1(t, beaconDB, []byte{'g'})
require.NoError(t, err)
blkWithoutState := util.NewBeaconBlock()
blkWithoutState.Block.Slot = 0
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
cp := &ethpb.Checkpoint{}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
blkWithStateBadAtt := util.NewBeaconBlock()
blkWithStateBadAtt.Block.Slot = 1
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
cp = &ethpb.Checkpoint{Root: r[:]}
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -51,7 +54,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
blkWithValidState := util.NewBeaconBlock()
blkWithValidState.Block.Slot = 32
blkWithValidState.Block.Slot = 2
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
@@ -66,10 +69,6 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
service.head = &head{
state: st,
}
tests := []struct {
name string
a *ethpb.Attestation
@@ -80,6 +79,11 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}),
wantedErr: "slot 32 does not match target epoch 0",
},
{
name: "no pre state for attestations's target block",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
wantedErr: "could not get pre state for epoch 0",
},
{
name: "process attestation doesn't match current epoch",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
@@ -124,9 +128,17 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
}
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisState, pks := util.DeterministicGenesisState(t, 64)
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
@@ -146,8 +158,15 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
}
func TestStore_SaveCheckpointState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -168,9 +187,6 @@ func TestStore_SaveCheckpointState(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s1, err := service.getAttPreState(ctx, cp1)
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
@@ -178,17 +194,8 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
s2, err := service.getAttPreState(ctx, cp2)
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s2, err = service.getAttPreState(ctx, cp2)
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
s1, err = service.getAttPreState(ctx, cp1)
@@ -207,30 +214,26 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s3, err := service.getAttPreState(ctx, cp3)
require.NoError(t, err)
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
}
func TestStore_UpdateCheckpointState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
baseState, _ := util.DeterministicGenesisState(t, 1)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
epoch := primitives.Epoch(1)
blk := util.NewBeaconBlock()
r1, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
baseState, _ := util.DeterministicGenesisState(t, 1)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
returned, err := service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
@@ -240,16 +243,8 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
epoch = 2
blk = util.NewBeaconBlock()
blk.Block.Slot = 64
r2, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r2[:]}
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
returned, err = service.getAttPreState(ctx, newCheckpoint)
require.NoError(t, err)
s, err := slots.EpochStart(newCheckpoint.Epoch)
@@ -328,22 +323,3 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
}
func TestGetAttPreState_HeadState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
baseState, _ := util.DeterministicGenesisState(t, 1)
epoch := primitives.Epoch(1)
blk := util.NewBeaconBlock()
r1, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
require.NoError(t, transition.UpdateNextSlotCache(ctx, checkpoint.Root, baseState))
_, err = service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
st, err := service.checkpointStateCache.StateByCheckpoint(checkpoint)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().SlotsPerEpoch, st.Slot())
}

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
@@ -136,12 +137,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
if err != nil {
return errors.Wrap(err, "could not validate new payload")
}
if signed.Version() < version.Capella && isValidPayload {
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
@@ -208,24 +208,12 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
"headRoot": fmt.Sprintf("%#x", headRoot),
"headWeight": headWeight,
}).Debug("Head block is not the received block")
} else {
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}()
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
// verify conditions for FCU, notifies FCU, and saves the new head.
// This function also prunes attestations, other similar operations happen in prunePostBlockOperationPools.
if _, err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
if err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
return err
}
@@ -240,6 +228,18 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
},
})
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}()
// Save justified check point to db.
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
@@ -283,9 +283,10 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
log.WithError(err).Error("Could not insert finalized deposits.")
}
}()
}
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState, blockRoot[:]); err != nil {
if err := s.handleEpochBoundary(ctx, postState); err != nil {
return err
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
@@ -483,14 +484,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
}
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState, blockRoot []byte) error {
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
var err error
if postState.Slot()+1 == s.nextEpochBoundarySlot {
copied := postState.Copy()
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, copied.Slot()+1)
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
if err != nil {
return err
}
@@ -651,81 +652,66 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
func (s *Service) runLateBlockTasks() {
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("failed to wait for initial sync")
return
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *event.Feed) {
// Wait for state to be initialized.
stateChannel := make(chan *feed.Event, 1)
stateSub := stateFeed.Subscribe(stateChannel)
go func() {
select {
case <-ticker.C():
s.lateBlockTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
stateSub.Unsubscribe()
return
case <-stateChannel:
stateSub.Unsubscribe()
break
}
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():
if err := s.fillMissingBlockPayloadId(ctx); err != nil {
log.WithError(err).Error("Could not fill missing payload ID")
}
case <-ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}()
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot
// it also updates the next slot cache to deal with skipped slots.
func (s *Service) lateBlockTasks(ctx context.Context) {
// fillMissingBlockPayloadId is called 4 seconds into the slot and calls FCU if we are proposing next slot
// and the cache has been missed
func (s *Service) fillMissingBlockPayloadId(ctx context.Context) error {
if s.CurrentSlot() == s.HeadSlot() {
return
return nil
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.MissedSlot,
})
headRoot := s.headRoot()
headState := s.headState(ctx)
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if (!has && !features.Get().PrepareAllPayloads) || id != [8]byte{} {
return
if !has || id != [8]byte{} {
return nil
}
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
s.headLock.RUnlock()
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
return err
}
headState := s.headState(ctx)
headRoot := s.headRoot()
s.headLock.RUnlock()
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: headRoot,
headBlock: headBlock.Block(),
})
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
}
// waitForSync blocks until the node is synced to the head.
func (s *Service) waitForSync() error {
select {
case <-s.syncComplete:
return nil
case <-s.ctx.Done():
return errors.New("context closed, exiting goroutine")
}
return err
}

View File

@@ -64,7 +64,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
parentRoot := b.ParentRoot()
// Loosen the check to HasBlock because state summary gets saved in batches
// during initial syncing. There's no risk given a state summary object is just a
// subset of the block object.
// a subset of the block object.
if !s.cfg.BeaconDB.HasStateSummary(ctx, parentRoot) && !s.cfg.BeaconDB.HasBlock(ctx, parentRoot) {
return errors.New("could not reconstruct parent state")
}
@@ -230,9 +230,7 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
// to be included(rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
eth1DepositIndex -= 1
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex)); err != nil {
return err
}
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex))
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")

View File

@@ -12,7 +12,9 @@ import (
"github.com/ethereum/go-ethereum/common"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/pkg/errors"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
@@ -22,8 +24,9 @@ import (
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -42,9 +45,18 @@ import (
)
func TestStore_OnBlock(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
var genesisStateRoot [32]byte
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
@@ -139,8 +151,17 @@ func TestStore_OnBlock(t *testing.T) {
}
func TestStore_OnBlockBatch(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
@@ -163,7 +184,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
err := service.onBlockBatch(ctx, blks, blkRoots[1:])
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
err = service.onBlockBatch(ctx, blks, blkRoots)
require.NoError(t, err)
@@ -174,9 +195,17 @@ func TestStore_OnBlockBatch(t *testing.T) {
}
func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
@@ -197,12 +226,22 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
require.NoError(t, service.onBlockBatch(ctx, blks, blkRoots))
err = service.onBlockBatch(ctx, blks, blkRoots)
require.NoError(t, err)
}
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
@@ -220,8 +259,16 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
}
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
@@ -261,8 +308,16 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
}
func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
@@ -304,8 +359,16 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
}
func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
var genesisStateRoot [32]byte
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -354,8 +417,17 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
}
func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
var genesisStateRoot [32]byte
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -493,8 +565,17 @@ func TestAncestorByDB_CtxErr(t *testing.T) {
}
func TestAncestor_HandleSkipSlot(t *testing.T) {
service, tr := minimalTestService(t)
beaconDB := tr.db
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
@@ -575,8 +656,17 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
}
func TestAncestor_CanUseDB(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
@@ -636,13 +726,26 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.handleEpochBoundary(ctx, s, []byte{}))
require.NoError(t, service.handleEpochBoundary(ctx, s))
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
}
func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -678,8 +781,21 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
}
func TestOnBlock_CanFinalize(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -713,15 +829,39 @@ func TestOnBlock_CanFinalize(t *testing.T) {
}
func TestOnBlock_NilBlock(t *testing.T) {
service, tr := minimalTestService(t)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
err := service.onBlock(tr.ctx, nil, [32]byte{})
err = service.onBlock(ctx, nil, [32]byte{})
require.Equal(t, true, IsInvalidBlock(err))
}
func TestOnBlock_InvalidSignature(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -744,8 +884,21 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -764,8 +917,13 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
}
func TestInsertFinalizedDeposits(t *testing.T) {
service, tr := minimalTestService(t)
ctx, depositCache := tr.ctx, tr.dc
ctx := context.Background()
opts := testServiceOptsWithDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
opts = append(opts, WithDepositCache(depositCache))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -793,8 +951,13 @@ func TestInsertFinalizedDeposits(t *testing.T) {
}
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
service, tr := minimalTestService(t)
ctx, depositCache := tr.ctx, tr.dc
ctx := context.Background()
opts := testServiceOptsWithDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
opts = append(opts, WithDepositCache(depositCache))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -817,7 +980,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
}
// Insert 3 deposits before hand.
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2))
depositCache.InsertFinalizedDeposits(ctx, 2)
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
@@ -921,8 +1084,18 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
cfg.TerminalBlockHash = params.BeaconConfig().ZeroHash
params.OverrideBeaconConfig(cfg)
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
aHash := common.BytesToHash([]byte("a"))
bHash := common.BytesToHash([]byte("b"))
@@ -1049,8 +1222,17 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
}
func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
att1 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
@@ -1091,8 +1273,21 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
}
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -1157,8 +1352,17 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
}
func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
blk := util.HydrateBeaconBlock(&ethpb.BeaconBlock{Slot: 1})
wb, err := consensusblocks.NewBeaconBlock(blk)
@@ -1181,9 +1385,22 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&mock.MockStateNotifier{}),
WithExecutionEngineCaller(mockEngine),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
@@ -1328,9 +1545,22 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&mock.MockStateNotifier{}),
WithExecutionEngineCaller(mockEngine),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
@@ -1476,9 +1706,22 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&mock.MockStateNotifier{}),
WithExecutionEngineCaller(mockEngine),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
@@ -1671,9 +1914,27 @@ func TestNoViableHead_Reboot(t *testing.T) {
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
newfc := doublylinkedtree.New()
newStateGen := stategen.New(beaconDB, newfc)
newfc.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(newStateGen),
WithForkChoiceStore(newfc),
WithStateNotifier(&mock.MockStateNotifier{}),
WithExecutionEngineCaller(mockEngine),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
WithAttestationService(attSrv),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisState, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := genesisState.HashTreeRoot(ctx)
@@ -1822,8 +2083,18 @@ func TestNoViableHead_Reboot(t *testing.T) {
}
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
@@ -1875,34 +2146,30 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
}
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
logHook := logTest.NewGlobal()
service, tr := minimalTestService(t)
service.lateBlockTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
}
fc := doublylinkedtree.New()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
func TestFillMissingBlockPayloadId_PrepareAllPayloads(t *testing.T) {
logHook := logTest.NewGlobal()
resetCfg := features.InitWithReset(&features.Flags{
PrepareAllPayloads: true,
})
defer resetCfg()
opts := []Option{
WithForkChoiceStore(fc),
WithStateGen(stategen.New(beaconDB, fc)),
}
service, tr := minimalTestService(t)
service.lateBlockTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, service.fillMissingBlockPayloadId(ctx), 0)
}
// Helper function to simulate the block being on time or delayed for proposer
// boost. It alters the genesisTime tracked by the store.
func driftGenesisTime(s *Service, slot, delay int64) {
func driftGenesisTime(s *Service, slot int64, delay int64) {
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) - delay
s.SetGenesisTime(time.Unix(time.Now().Unix()-offset, 0))
}

View File

@@ -7,6 +7,8 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
@@ -26,7 +28,7 @@ const reorgLateBlockCountAttestations = 2 * time.Second
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
// root of an attestation's target checkpoint.
type AttestationStateFetcher interface {
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error)
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error)
}
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
@@ -37,7 +39,7 @@ type AttestationReceiver interface {
}
// AttestationTargetState returns the pre state of attestation.
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error) {
ss, err := slots.EpochStart(target.Epoch)
if err != nil {
return nil, err
@@ -45,9 +47,6 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
return nil, err
}
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.getAttPreState(ctx, target)
}
@@ -68,13 +67,20 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
}
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) spawnProcessAttestationsRoutine() {
func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
// Wait for state to be initialized.
stateChannel := make(chan *feed.Event, 1)
stateSub := stateFeed.Subscribe(stateChannel)
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
select {
case <-s.ctx.Done():
stateSub.Unsubscribe()
return
case <-stateChannel:
stateSub.Unsubscribe()
break
}
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
@@ -111,9 +117,6 @@ func (s *Service) spawnProcessAttestationsRoutine() {
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
// The caller of this function MUST hold a lock in forkchoice
func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.UpdateHead")
defer span.End()
start := time.Now()
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
@@ -130,24 +133,19 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
log.WithError(err).Error("Could not compute head from new attestations")
// Fallback to our current head root in the event of a failure.
s.headLock.RLock()
newHeadRoot = s.headRoot()
s.headLock.RUnlock()
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
changed, err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot)
if err != nil {
log.WithError(err).Error("could not update forkchoice")
}
if changed {
s.headLock.RLock()
s.headLock.RLock()
if s.headRoot() != newHeadRoot {
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
s.headLock.RUnlock()
}
s.headLock.RUnlock()
if err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot); err != nil {
log.WithError(err).Error("could not update forkchoice")
}
}

View File

@@ -7,7 +7,11 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -27,18 +31,22 @@ var (
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
helpers.ClearCache()
service, _ := minimalTestService(t)
beaconDB := testDB.SetupDB(t)
service.genesisTime = time.Now()
chainService := setupBeaconChain(t, beaconDB)
chainService.genesisTime = time.Now()
e := primitives.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
_, err := service.AttestationTargetState(context.Background(), &ethpb.Checkpoint{Epoch: e})
_, err := chainService.AttestationTargetState(context.Background(), &ethpb.Checkpoint{Epoch: e})
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
}
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -61,8 +69,11 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
}
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -85,10 +96,13 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
}
func TestProcessAttestations_Ok(t *testing.T) {
service, tr := minimalTestService(t)
hook := logTest.NewGlobal()
ctx := tr.ctx
ctx := context.Background()
opts := testServiceOptsWithDB(t)
opts = append(opts, WithAttestationPool(attestations.NewPool()))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
@@ -112,9 +126,21 @@ func TestProcessAttestations_Ok(t *testing.T) {
}
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
newStateGen := stategen.New(beaconDB, fcs)
fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(newStateGen),
WithAttestationPool(attestations.NewPool()),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))
@@ -163,9 +189,21 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
}
func TestService_UpdateHead_NoAtts(t *testing.T) {
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
newStateGen := stategen.New(beaconDB, fcs)
fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateNotifier(&mockBeaconNode{}),
WithStateGen(newStateGen),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))

View File

@@ -179,14 +179,10 @@ func (s *Service) prunePostBlockOperationPools(ctx context.Context, blk interfac
return errors.Wrap(err, "could not process BLSToExecutionChanges")
}
// Mark slashings as seen so we don't include same ones in future blocks.
// Mark attester slashings as seen so we don't include same ones in future blocks.
for _, as := range blk.Block().Body().AttesterSlashings() {
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
}
for _, ps := range blk.Block().Body().ProposerSlashings() {
s.cfg.SlashingPool.MarkIncludedProposerSlashing(ps)
}
return nil
}

View File

@@ -7,7 +7,12 @@ import (
"time"
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -120,15 +125,22 @@ func TestService_ReceiveBlock(t *testing.T) {
for _, tt := range tests {
wg.Add(1)
t.Run(tt.name, func(t *testing.T) {
s, tr := minimalTestService(t,
WithFinalizedStateAtStartUp(genesis),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
beaconDB := tr.db
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB, fc)),
WithFinalizedStateAtStartUp(genesis),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
// Initialize it here.
_ = s.cfg.StateNotifier.StateFeed()
require.NoError(t, s.saveGenesisData(ctx, genesis))
@@ -150,16 +162,25 @@ func TestService_ReceiveBlock(t *testing.T) {
}
func TestService_ReceiveBlockUpdateHead(t *testing.T) {
s, tr := minimalTestService(t,
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB, fc)),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
// Initialize it here.
_ = s.cfg.StateNotifier.StateFeed()
require.NoError(t, s.saveGenesisData(ctx, genesis))
@@ -225,8 +246,17 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, _ := minimalTestService(t, WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
err := s.saveGenesisData(ctx, genesis)
fc := doublylinkedtree.New()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB, fc)),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
err = s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
@@ -246,7 +276,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
}
func TestService_HasBlock(t *testing.T) {
s, _ := minimalTestService(t)
opts := testServiceOptsWithDB(t)
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
r := [32]byte{'a'}
if s.HasBlock(context.Background(), r) {
t.Error("Should not have block")
@@ -266,8 +299,10 @@ func TestService_HasBlock(t *testing.T) {
}
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
opts := testServiceOptsWithDB(t)
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
@@ -277,9 +312,9 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
@@ -291,7 +326,9 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
@@ -299,8 +336,19 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
}
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
service, tr := minimalTestService(t)
pool := tr.blsPool
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
pool := blstoexec.NewPool()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&blockchainTesting.MockStateNotifier{}),
WithBLSToExecPool(pool),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
t.Run("pre Capella block", func(t *testing.T) {
body := &ethpb.BeaconBlockBodyBellatrix{}

View File

@@ -27,7 +27,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/features"
@@ -58,9 +57,6 @@ type Service struct {
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
}
// config options for the service.
@@ -87,8 +83,6 @@ type config struct {
ExecutionEngineCaller execution.EngineCaller
}
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
// NewService instantiates a new block service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
@@ -106,9 +100,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
return nil, err
}
}
if srv.clockSetter == nil {
return nil, ErrMissingClockSetter
}
var err error
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
if err != nil {
@@ -130,8 +121,8 @@ func (s *Service) Start() {
log.Fatal(err)
}
}
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
s.fillMissingPayloadIDRoutine(s.ctx, s.cfg.StateNotifier.StateFeed())
}
// Stop the blockchain service's main event loop and associated goroutines.
@@ -245,10 +236,13 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
}
vr := bytesutil.ToBytes32(saved.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
return errors.Wrap(err, "failed to initialize blockchain service")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: s.genesisTime,
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
},
})
return nil
}
@@ -365,10 +359,15 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
}
// We send out a state initialized event to the rest of the services
// running in the beacon node.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: genesisTime,
GenesisValidatorsRoot: initializedState.GenesisValidatorsRoot(),
},
})
}
// initializes the state and genesis block of the beacon chain to persistent storage

View File

@@ -8,9 +8,13 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v4/async/event"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
@@ -19,9 +23,7 @@ import (
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/features"
@@ -36,8 +38,50 @@ import (
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/protobuf/proto"
)
type mockBeaconNode struct {
stateFeed *event.Feed
}
// StateFeed mocks the same method in the beacon node.
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
if mbn.stateFeed == nil {
mbn.stateFeed = new(event.Feed)
}
return mbn.stateFeed
}
type mockBroadcaster struct {
broadcastCalled bool
}
func (mb *mockBroadcaster) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
//TODO implement me
panic("implement me")
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
ctx := context.Background()
var web3Service *execution.Service
@@ -92,15 +136,12 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
WithDepositCache(depositCache),
WithChainStartFetcher(web3Service),
WithAttestationPool(attestations.NewPool()),
WithSlashingPool(slashings.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithP2PBroadcaster(&mockBroadcaster{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fc),
WithAttestationService(attService),
WithStateGen(stateGen),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
WithClockSynchronizer(startup.NewClockSynchronizer()),
}
chainService, err := NewService(ctx, opts...)
@@ -117,14 +158,12 @@ func TestChainStartStop_Initialized(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
gt := time.Unix(23, 0)
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, s.SetSlot(1))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
@@ -154,14 +193,12 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
gt := time.Unix(23, 0)
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
@@ -228,14 +265,12 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
gt := time.Unix(23, 0)
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, s.SetSlot(0))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
@@ -256,9 +291,14 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
}
func TestChainService_InitializeChainInfo(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
@@ -270,18 +310,23 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, headBlock)
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
c, err := NewService(ctx,
WithForkChoiceStore(fc),
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithAttestationService(attSrv),
WithStateNotifier(&mock.MockStateNotifier{}),
WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
require.NoError(t, c.StartFromSavedState(headState))
headBlk, err := c.HeadBlock(ctx)
require.NoError(t, err)
@@ -301,9 +346,14 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
}
func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
@@ -315,21 +365,27 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
ctx, beaconDB := tr.ctx, tr.db
util.SaveBlock(t, ctx, beaconDB, genesis)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, headBlock)
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
ss := &ethpb.StateSummary{
Slot: finalizedSlot,
Root: headRoot[:],
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, ss))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: headRoot[:], Epoch: slots.ToEpoch(finalizedSlot)}))
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
c, err := NewService(ctx,
WithForkChoiceStore(fc),
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithAttestationService(attSrv),
WithStateNotifier(&mock.MockStateNotifier{}),
WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, c.StartFromSavedState(headState))
s, err := c.HeadState(ctx)
@@ -405,21 +461,17 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
}
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
mgs := &MockClockSetter{}
service.clockSetter = mgs
gt := time.Now()
service.onExecutionChainStart(context.Background(), gt)
gs, err := beaconDB.GenesisState(ctx)
require.NoError(t, err)
require.NotEqual(t, nil, gs)
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
var zero [32]byte
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
require.Equal(t, gt, mgs.G.GenesisTime())
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
stateChannel := make(chan *feed.Event, 1)
stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
service.onExecutionChainStart(context.Background(), time.Now())
stateEvent := <-stateChannel
require.Equal(t, int(stateEvent.Type), statefeed.Initialized)
_, ok := stateEvent.Data.(*statefeed.InitializedData)
require.Equal(t, true, ok)
}
func BenchmarkHasBlockDB(b *testing.B) {
@@ -468,10 +520,15 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
EnableStartOptimistic: true,
})
defer resetFn()
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
headBlock.Block.Slot = finalizedSlot
@@ -482,17 +539,21 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, headBlock)
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
c, err := NewService(ctx,
WithForkChoiceStore(fc),
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithAttestationService(attSrv),
WithStateNotifier(&mock.MockStateNotifier{}),
WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
@@ -502,19 +563,3 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, op)
}
// MockClockSetter satisfies the ClockSetter interface for testing the conditions where blockchain.Service should
// call SetGenesis.
type MockClockSetter struct {
G *startup.Clock
Err error
}
var _ startup.ClockSetter = &MockClockSetter{}
// SetClock satisfies the ClockSetter interface.
// The value is written to an exported field 'G' so that it can be accessed in tests.
func (s *MockClockSetter) SetClock(g *startup.Clock) error {
s.G = g
return s.Err
}

View File

@@ -1,115 +0,0 @@
package blockchain
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"google.golang.org/protobuf/proto"
)
type mockBeaconNode struct {
stateFeed *event.Feed
}
// StateFeed mocks the same method in the beacon node.
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
if mbn.stateFeed == nil {
mbn.stateFeed = new(event.Feed)
}
return mbn.stateFeed
}
type mockBroadcaster struct {
broadcastCalled bool
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
type testServiceRequirements struct {
ctx context.Context
db db.Database
fcs forkchoice.ForkChoicer
sg *stategen.State
notif statefeed.Notifier
cs *startup.ClockSynchronizer
attPool attestations.Pool
attSrv *attestations.Service
blsPool *blstoexec.Pool
dc *depositcache.DepositCache
}
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
sg := stategen.New(beaconDB, fcs)
notif := &mockBeaconNode{}
fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot)
cs := startup.NewClockSynchronizer()
attPool := attestations.NewPool()
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
require.NoError(t, err)
blsPool := blstoexec.NewPool()
dc, err := depositcache.New()
require.NoError(t, err)
req := &testServiceRequirements{
ctx: ctx,
db: beaconDB,
fcs: fcs,
sg: sg,
notif: notif,
cs: cs,
attPool: attPool,
attSrv: attSrv,
blsPool: blsPool,
dc: dc,
}
defOpts := []Option{WithDatabase(req.db),
WithStateNotifier(req.notif),
WithStateGen(req.sg),
WithForkChoiceStore(req.fcs),
WithClockSynchronizer(req.cs),
WithAttestationPool(req.attPool),
WithAttestationService(req.attSrv),
WithBLSToExecPool(req.blsPool),
WithDepositCache(dc),
}
// append the variadic opts so they override the defaults by being processed afterwards
opts = append(defOpts, opts...)
s, err := NewService(req.ctx, opts...)
require.NoError(t, err)
return s, req
}

View File

@@ -320,7 +320,7 @@ func (_ *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestatio
}
// AttestationTargetState mocks AttestationTargetState method in chain service.
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.BeaconState, error) {
return s.State, nil
}
@@ -587,13 +587,3 @@ func (s *ChainService) ProposerBoost() [32]byte {
}
return [32]byte{}
}
// FinalizedBlockHash mocks the same method in the chain service
func (s *ChainService) FinalizedBlockHash() [32]byte {
return [32]byte{}
}
// UnrealizedJustifiedPayloadBlockHash mocks the same method in the chain service
func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return [32]byte{}
}

View File

@@ -12,13 +12,11 @@ go_library(
deps = [
"//api/client/builder:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",

View File

@@ -3,7 +3,6 @@ package builder
import (
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/urfave/cli/v2"
@@ -51,11 +50,3 @@ func WithDatabase(beaconDB db.HeadAccessDatabase) Option {
return nil
}
}
// WithRegistrationCache uses a cache for the validator registrations instead of a persistent db.
func WithRegistrationCache() Option {
return func(s *Service) error {
s.registrationCache = cache.NewRegistrationCache()
return nil
}
}

View File

@@ -8,12 +8,10 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -27,7 +25,6 @@ type BlockBuilder interface {
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
Configured() bool
}
@@ -40,11 +37,10 @@ type config struct {
// Service defines a service that provides a client for interacting with the beacon chain and MEV relay network.
type Service struct {
cfg *config
c builder.BuilderClient
ctx context.Context
cancel context.CancelFunc
registrationCache *cache.RegistrationCache
cfg *config
c builder.BuilderClient
ctx context.Context
cancel context.CancelFunc
}
// NewService instantiates a new service.
@@ -81,8 +77,7 @@ func (s *Service) Start() {
}
// Stop halts the service.
func (s *Service) Stop() error {
s.cancel()
func (*Service) Stop() error {
return nil
}
@@ -94,9 +89,6 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
defer func() {
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
if s.c == nil {
return nil, ErrNoBuilder
}
return s.c.SubmitBlindedBlock(ctx, b)
}
@@ -109,14 +101,8 @@ func (s *Service) GetHeader(ctx context.Context, slot primitives.Slot, parentHas
defer func() {
getHeaderLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
if s.c == nil {
tracing.AnnotateError(span, ErrNoBuilder)
return nil, ErrNoBuilder
}
h, err := s.c.GetHeader(ctx, slot, parentHash, pubKey)
tracing.AnnotateError(span, err)
return h, err
return s.c.GetHeader(ctx, slot, parentHash, pubKey)
}
// Status retrieves the status of the builder relay network.
@@ -138,16 +124,9 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
defer func() {
registerValidatorLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
if s.c == nil {
return ErrNoBuilder
}
// should be removed if db is removed
idxs := make([]primitives.ValidatorIndex, 0)
msgs := make([]*ethpb.ValidatorRegistrationV1, 0)
indexToRegistration := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
for i := 0; i < len(reg); i++ {
r := reg[i]
@@ -161,33 +140,12 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
idxs = append(idxs, nx)
msgs = append(msgs, r.Message)
valid = append(valid, r)
indexToRegistration[nx] = r.Message
}
if err := s.c.RegisterValidator(ctx, valid); err != nil {
return errors.Wrap(err, "could not register validator(s)")
}
if len(indexToRegistration) != len(msgs) {
return errors.New("ids and registrations must be the same length")
}
if s.registrationCache != nil {
s.registrationCache.UpdateIndexToRegisteredMap(ctx, indexToRegistration)
return nil
} else {
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
}
}
// RegistrationByValidatorID returns either the values from the cache or db.
func (s *Service) RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
if s.registrationCache != nil {
return s.registrationCache.RegistrationByIndex(id)
} else {
if s.cfg == nil || s.cfg.beaconDB == nil {
return nil, errors.New("nil beacon db")
}
return s.cfg.beaconDB.RegistrationByValidatorID(ctx, id)
}
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
}
// Configured returns true if the user has configured a builder client.

View File

@@ -3,7 +3,6 @@ package builder
import (
"context"
"testing"
"time"
buildertesting "github.com/prysmaticlabs/prysm/v4/api/client/builder/testing"
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
@@ -38,33 +37,3 @@ func Test_RegisterValidator(t *testing.T) {
require.NoError(t, s.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{{Message: &eth.ValidatorRegistrationV1{Pubkey: pubkey[:], FeeRecipient: feeRecipient[:]}}}))
assert.Equal(t, true, builder.RegisteredVals[pubkey])
}
func Test_RegisterValidator_WithCache(t *testing.T) {
ctx := context.Background()
headFetcher := &blockchainTesting.ChainService{}
builder := buildertesting.NewClient()
s, err := NewService(ctx, WithRegistrationCache(), WithHeadFetcher(headFetcher), WithBuilderClient(&builder))
require.NoError(t, err)
pubkey := bytesutil.ToBytes48([]byte("pubkey"))
var feeRecipient [20]byte
reg := &eth.ValidatorRegistrationV1{Pubkey: pubkey[:], Timestamp: uint64(time.Now().UTC().Unix()), FeeRecipient: feeRecipient[:]}
require.NoError(t, s.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{{Message: reg}}))
registration, err := s.registrationCache.RegistrationByIndex(0)
require.NoError(t, err)
require.DeepEqual(t, reg, registration)
}
func Test_BuilderMethodsWithouClient(t *testing.T) {
s, err := NewService(context.Background())
require.NoError(t, err)
assert.Equal(t, false, s.Configured())
_, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{})
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
_, err = s.SubmitBlindedBlock(context.Background(), nil)
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
err = s.RegisterValidator(context.Background(), nil)
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
}

View File

@@ -8,8 +8,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//api/client/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -2,11 +2,10 @@ package testing
import (
"context"
"math/big"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -16,11 +15,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
// Config defines a config struct for dependencies into the service.
type Config struct {
BeaconDB db.HeadAccessDatabase
}
// MockBuilderService to mock builder.
type MockBuilderService struct {
HasConfigured bool
@@ -29,10 +23,8 @@ type MockBuilderService struct {
ErrSubmitBlindedBlock error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
RegistrationCache *cache.RegistrationCache
ErrGetHeader error
ErrRegisterValidator error
Cfg *Config
}
// Configured for mocking.
@@ -49,7 +41,7 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.
}
return w, s.ErrSubmitBlindedBlock
}
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
if err != nil {
return nil, errors.Wrap(err, "could not wrap capella payload")
}
@@ -57,8 +49,8 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.
}
// GetHeader for mocking.
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch || s.BidCapella != nil {
func (s *MockBuilderService) GetHeader(ctx context.Context, slot primitives.Slot, hr [32]byte, pb [48]byte) (builder.SignedBid, error) {
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
return builder.WrappedSignedBuilderBidCapella(s.BidCapella)
}
w, err := builder.WrappedSignedBuilderBid(s.Bid)
@@ -68,17 +60,6 @@ func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot,
return w, s.ErrGetHeader
}
// RegistrationByValidatorID returns either the values from the cache or db.
func (s *MockBuilderService) RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
if s.RegistrationCache != nil {
return s.RegistrationCache.RegistrationByIndex(id)
}
if s.Cfg.BeaconDB != nil {
return s.Cfg.BeaconDB.RegistrationByValidatorID(ctx, id)
}
return nil, cache.ErrNotFoundRegistration
}
// RegisterValidator for mocking.
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
return s.ErrRegisterValidator

View File

@@ -17,7 +17,6 @@ go_library(
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
"proposer_indices_type.go",
"registration.go",
"skip_slot_cache.go",
"subnet_ids.go",
"sync_committee.go",
@@ -34,7 +33,6 @@ go_library(
deps = [
"//beacon-chain/state:go_default_library",
"//cache/lru:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
@@ -49,7 +47,6 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
@@ -67,7 +64,6 @@ go_test(
"committee_test.go",
"payload_id_test.go",
"proposer_indices_test.go",
"registration_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",
"sync_committee_head_state_test.go",
@@ -86,7 +82,6 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],

View File

@@ -42,16 +42,9 @@ type BalanceCache struct {
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
func NewEffectiveBalanceCache() *BalanceCache {
c := &BalanceCache{}
c.Clear()
return c
}
// Clear resets the SyncCommitteeCache to its initial state
func (c *BalanceCache) Clear() {
c.lock.Lock()
defer c.lock.Unlock()
c.cache = lruwrpr.New(maxBalanceCacheSize)
return &BalanceCache{
cache: lruwrpr.New(maxBalanceCacheSize),
}
}
// AddTotalEffectiveBalance adds a new total effective balance entry for current balance for state `st` into the cache.

View File

@@ -3,11 +3,16 @@
package cache
import (
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
)
// FakeBalanceCache is a fake struct with 1 LRU cache for looking up balance by epoch.
type FakeBalanceCache struct {
cache *lru.Cache
lock sync.RWMutex
}
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
@@ -24,8 +29,3 @@ func (c *FakeBalanceCache) AddTotalEffectiveBalance(st state.ReadOnlyBeaconState
func (c *FakeBalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
return 0, nil
}
// Clear is a stub.
func (c *FakeBalanceCache) Clear() {
return
}

View File

@@ -56,17 +56,10 @@ func committeeKeyFn(obj interface{}) (string, error) {
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
func NewCommitteesCache() *CommitteeCache {
cc := &CommitteeCache{}
cc.Clear()
return cc
}
// Clear resets the CommitteeCache to its initial state
func (c *CommitteeCache) Clear() {
c.lock.Lock()
defer c.lock.Unlock()
c.CommitteeCache = lruwrpr.New(maxCommitteesCacheSize)
c.inProgress = make(map[string]bool)
return &CommitteeCache{
CommitteeCache: lruwrpr.New(maxCommitteesCacheSize),
inProgress: make(map[string]bool),
}
}
// Committee fetches the shuffled indices by slot and committee index. Every list of indices

View File

@@ -69,8 +69,3 @@ func (c *FakeCommitteeCache) MarkInProgress(seed [32]byte) error {
func (c *FakeCommitteeCache) MarkNotInProgress(seed [32]byte) error {
return nil
}
// Clear is a stub.
func (c *FakeCommitteeCache) Clear() {
return
}

View File

@@ -129,7 +129,7 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*eth
}
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) error {
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
defer span.End()
dc.depositsLock.Lock()
@@ -141,7 +141,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
// Don't insert into finalized trie if there is no deposit to
// insert.
if len(dc.deposits) == 0 {
return nil
return
}
// In the event we have less deposits than we need to
// finalize we finalize till the index on which we do have it.
@@ -151,7 +151,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
// If we finalize to some lower deposit index, we
// ignore it.
if int(eth1DepositIndex) < insertIndex {
return nil
return
}
for _, d := range dc.deposits {
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
@@ -162,10 +162,12 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
}
depHash, err := d.Deposit.Data.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash deposit data")
log.WithError(err).Error("Could not hash deposit data. Finalized deposit cache not updated.")
return
}
if err = depositTrie.Insert(depHash[:], insertIndex); err != nil {
return errors.Wrap(err, "could not insert deposit hash")
log.WithError(err).Error("Could not insert deposit hash")
return
}
insertIndex++
}
@@ -174,7 +176,6 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
Deposits: depositTrie,
MerkleTrieIndex: eth1DepositIndex,
}
return nil
}
// AllDepositContainers returns all historical deposit containers.

View File

@@ -416,7 +416,7 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
Index: 3,
})
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
@@ -474,9 +474,9 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
Index: 2,
}
dc.deposits = oldFinalizedDeposits
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
dc.InsertFinalizedDeposits(context.Background(), 1)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
dc.InsertFinalizedDeposits(context.Background(), 2)
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
@@ -503,7 +503,7 @@ func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
dc, err := New()
require.NoError(t, err)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
@@ -548,7 +548,7 @@ func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
}
dc.deposits = finalizedDeposits
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 5))
dc.InsertFinalizedDeposits(context.Background(), 5)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
@@ -623,10 +623,10 @@ func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
}
dc.deposits = finalizedDeposits
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 5))
dc.InsertFinalizedDeposits(context.Background(), 5)
// Reinsert finalized deposits with a lower index.
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
@@ -694,7 +694,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
},
Index: 3,
})
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
dc.InsertFinalizedDeposits(context.Background(), 1)
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
assert.Equal(t, 2, len(deps))
@@ -751,7 +751,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
},
Index: 3,
})
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
dc.InsertFinalizedDeposits(context.Background(), 1)
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
assert.Equal(t, 1, len(deps))
@@ -799,12 +799,12 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
assert.NoError(t, err)
// Perform this in a non-sensical ordering
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 4))
dc.InsertFinalizedDeposits(context.Background(), 10)
dc.InsertFinalizedDeposits(context.Background(), 2)
dc.InsertFinalizedDeposits(context.Background(), 3)
dc.InsertFinalizedDeposits(context.Background(), 4)
// Mimic finalized deposit trie fetch.
// Mimick finalized deposit trie fetch.
fd := dc.FinalizedDeposits(context.Background())
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
insertIndex := fd.MerkleTrieIndex + 1
@@ -817,9 +817,9 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
}
insertIndex++
}
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 15))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 15))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 14))
dc.InsertFinalizedDeposits(context.Background(), 15)
dc.InsertFinalizedDeposits(context.Background(), 15)
dc.InsertFinalizedDeposits(context.Background(), 14)
fd = dc.FinalizedDeposits(context.Background())
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(30))

View File

@@ -1,6 +1,6 @@
package cache
import "github.com/pkg/errors"
import "errors"
var (
// ErrNilValueProvided for when we try to put a nil value in a cache.
@@ -12,6 +12,4 @@ var (
// ErrNonExistingSyncCommitteeKey when sync committee key (root) does not exist in cache.
ErrNonExistingSyncCommitteeKey = errors.New("does not exist sync committee key")
errNotSyncCommitteeIndexPosition = errors.New("not syncCommitteeIndexPosition struct")
// ErrNotFoundRegistration when validator registration does not exist in cache.
ErrNotFoundRegistration = errors.Wrap(ErrNotFound, "no validator registered")
)

View File

@@ -4,41 +4,35 @@ import (
"bytes"
"sync"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
)
const keyLength = 40
const vIdLength = 8
const pIdLength = 8
const vpIdsLength = vIdLength + pIdLength
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
// The key is the concatenation of the slot and the block root.
// The value is the concatenation of the proposer and payload IDs, 8 bytes each.
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
type ProposerPayloadIDsCache struct {
slotToProposerAndPayloadIDs map[[keyLength]byte][vpIdsLength]byte
slotToProposerAndPayloadIDs map[[40]byte][vpIdsLength]byte
sync.RWMutex
}
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
return &ProposerPayloadIDsCache{
slotToProposerAndPayloadIDs: make(map[[keyLength]byte][vpIdsLength]byte),
slotToProposerAndPayloadIDs: make(map[[40]byte][vpIdsLength]byte),
}
}
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot and head root to build the block.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(
slot primitives.Slot,
r [fieldparams.RootLength]byte,
) (primitives.ValidatorIndex, [pIdLength]byte, bool) {
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot primitives.Slot, r [32]byte) (primitives.ValidatorIndex, [8]byte, bool) {
f.RLock()
defer f.RUnlock()
ids, ok := f.slotToProposerAndPayloadIDs[idKey(slot, r)]
if !ok {
return 0, [pIdLength]byte{}, false
return 0, [8]byte{}, false
}
vId := ids[:vIdLength]
@@ -49,13 +43,8 @@ func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(
return primitives.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
}
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot and head root to build block.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(
slot primitives.Slot,
vId primitives.ValidatorIndex,
pId [pIdLength]byte,
r [fieldparams.RootLength]byte,
) {
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot primitives.Slot, vId primitives.ValidatorIndex, pId [8]byte, r [32]byte) {
f.Lock()
defer f.Unlock()
var vIdBytes [vIdLength]byte
@@ -74,7 +63,7 @@ func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(
}
}
// PrunePayloadIDs removes the payload ID entries older than input slot.
// PrunePayloadIDs removes the payload id entries that's current than input slot.
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
f.Lock()
defer f.Unlock()
@@ -87,8 +76,8 @@ func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
}
}
func idKey(slot primitives.Slot, r [fieldparams.RootLength]byte) [keyLength]byte {
var k [keyLength]byte
func idKey(slot primitives.Slot, r [32]byte) [40]byte {
var k [40]byte
copy(k[:], append(bytesutil.Uint64ToBytesBigEndian(uint64(slot)), r[:]...))
return k
}

View File

@@ -46,16 +46,9 @@ func proposerIndicesKeyFn(obj interface{}) (string, error) {
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
func NewProposerIndicesCache() *ProposerIndicesCache {
c := &ProposerIndicesCache{}
c.Clear()
return c
}
// Clear resets the ProposerIndicesCache to its initial state
func (c *ProposerIndicesCache) Clear() {
c.lock.Lock()
defer c.lock.Unlock()
c.proposerIndicesCache = cache.NewFIFO(proposerIndicesKeyFn)
return &ProposerIndicesCache{
proposerIndicesCache: cache.NewFIFO(proposerIndicesKeyFn),
}
}
// AddProposerIndices adds ProposerIndices object to the cache.

View File

@@ -33,7 +33,3 @@ func (c *FakeProposerIndicesCache) HasProposerIndices(r [32]byte) (bool, error)
func (c *FakeProposerIndicesCache) Len() int {
return 0
}
// Clear is a stub.
func (c *FakeProposerIndicesCache) Clear() {
}

View File

@@ -1,55 +0,0 @@
package cache
import (
"context"
"sync"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
// RegistrationCache is used to store the cached results of an Validator Registration request.
// beacon api /eth/v1/validator/register_validator
type RegistrationCache struct {
indexToRegistration map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1
lock sync.RWMutex
}
// NewRegistrationCache initializes the map and underlying cache.
func NewRegistrationCache() *RegistrationCache {
return &RegistrationCache{
indexToRegistration: make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1),
lock: sync.RWMutex{},
}
}
// RegistrationByIndex returns the registration by index in the cache and also removes items in the cache if expired.
func (regCache *RegistrationCache) RegistrationByIndex(id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
regCache.lock.RLock()
v, ok := regCache.indexToRegistration[id]
if !ok {
regCache.lock.RUnlock()
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
regCache.lock.RUnlock()
return v, nil
}
// UpdateIndexToRegisteredMap adds or updates values in the cache based on the argument.
func (regCache *RegistrationCache) UpdateIndexToRegisteredMap(ctx context.Context, m map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1) {
_, span := trace.StartSpan(ctx, "RegistrationCache.UpdateIndexToRegisteredMap")
defer span.End()
regCache.lock.Lock()
defer regCache.lock.Unlock()
for key, value := range m {
regCache.indexToRegistration[key] = &ethpb.ValidatorRegistrationV1{
Pubkey: bytesutil.SafeCopyBytes(value.Pubkey),
FeeRecipient: bytesutil.SafeCopyBytes(value.FeeRecipient),
GasLimit: value.GasLimit,
Timestamp: value.Timestamp,
}
}
}

View File

@@ -1,46 +0,0 @@
package cache
import (
"context"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestRegistrationCache(t *testing.T) {
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex := primitives.ValidatorIndex(1)
cache := NewRegistrationCache()
m := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
m[validatorIndex] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
reg, err := cache.RegistrationByIndex(validatorIndex)
require.NoError(t, err)
require.Equal(t, string(reg.Pubkey), string(pubkey))
t.Run("successfully updates", func(t *testing.T) {
pubkey, err := hexutil.Decode("0x88247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex2 := primitives.ValidatorIndex(2)
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
reg, err := cache.RegistrationByIndex(validatorIndex2)
require.NoError(t, err)
require.Equal(t, string(reg.Pubkey), string(pubkey))
})
}

View File

@@ -4,14 +4,12 @@ package cache
import (
"sync"
"sync/atomic"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/tools/cache"
)
@@ -33,9 +31,8 @@ var (
// SyncCommitteeCache utilizes a FIFO cache to sufficiently cache validator position within sync committee.
// It is thread safe with concurrent read write.
type SyncCommitteeCache struct {
cache *cache.FIFO
lock sync.RWMutex
cleared *atomic.Uint64
cache *cache.FIFO
lock sync.RWMutex
}
// Index position of all validators in sync committee where `currentSyncCommitteeRoot` is the
@@ -54,17 +51,9 @@ type positionInCommittee struct {
// NewSyncCommittee initializes and returns a new SyncCommitteeCache.
func NewSyncCommittee() *SyncCommitteeCache {
c := &SyncCommitteeCache{cleared: &atomic.Uint64{}}
c.Clear()
return c
}
// Clear resets the SyncCommitteeCache to its initial state
func (s *SyncCommitteeCache) Clear() {
s.lock.Lock()
defer s.lock.Unlock()
s.cleared.Add(1)
s.cache = cache.NewFIFO(keyFn)
return &SyncCommitteeCache{
cache: cache.NewFIFO(keyFn),
}
}
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
@@ -134,10 +123,6 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
// current epoch and next epoch. This should be called when `current_sync_committee` and `next_sync_committee`
// change and that happens every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`.
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.BeaconState) error {
// since we call UpdatePositionsInCommittee asynchronously, keep track of the cache value
// seen at the beginning of the routine and compare at the end before updating. If the underlying value has been
// cycled (new address), don't update it.
clearCount := s.cleared.Load()
csc, err := st.CurrentSyncCommittee()
if err != nil {
return err
@@ -177,10 +162,6 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
s.lock.Lock()
defer s.lock.Unlock()
if clearCount != s.cleared.Load() {
log.Warn("cache rotated during async committee update operation - abandoning cache update")
return nil
}
if err := s.cache.Add(&syncCommitteeIndexPosition{
currentSyncCommitteeRoot: syncCommitteeBoundaryRoot,

View File

@@ -30,8 +30,3 @@ func (s *FakeSyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx p
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.BeaconState) error {
return nil
}
// Clear -- fake.
func (s *FakeSyncCommitteeCache) Clear() {
return
}

View File

@@ -208,7 +208,7 @@ func ProcessEpochParticipation(
}
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
// This is an optimized version by passing in precomputed validator attesting records and total epoch balances.
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
func ProcessRewardsAndPenaltiesPrecompute(
beaconState state.BeaconState,
bal *precompute.Balance,
@@ -265,7 +265,7 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
finalizedEpoch := beaconState.FinalizedCheckpointEpoch()
increment := cfg.EffectiveBalanceIncrement
factor := cfg.BaseRewardFactor
baseRewardMultiplier := increment * factor / math.CachedSquareRoot(bal.ActiveCurrentEpoch)
baseRewardMultiplier := increment * factor / math.IntegerSquareRoot(bal.ActiveCurrentEpoch)
leak := helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch)
// Modified in Altair and Bellatrix.

View File

@@ -12,7 +12,6 @@ import (
// ProcessSyncCommitteeUpdates processes sync client committee updates for the beacon state.
//
// nolint:dupword
// Spec code:
// def process_sync_committee_updates(state: BeaconState) -> None:
//
@@ -46,7 +45,6 @@ func ProcessSyncCommitteeUpdates(ctx context.Context, beaconState state.BeaconSt
// ProcessParticipationFlagUpdates processes participation flag updates by rotating current to previous.
//
// nolint:dupword
// Spec code:
// def process_participation_flag_updates(state: BeaconState) -> None:
//

View File

@@ -58,5 +58,5 @@ func BaseRewardPerIncrement(activeBalance uint64) (uint64, error) {
return 0, errors.New("active balance can't be 0")
}
cfg := params.BeaconConfig()
return cfg.EffectiveBalanceIncrement * cfg.BaseRewardFactor / math.CachedSquareRoot(activeBalance), nil
return cfg.EffectiveBalanceIncrement * cfg.BaseRewardFactor / math.IntegerSquareRoot(activeBalance), nil
}

View File

@@ -2,7 +2,6 @@ package altair
import (
"context"
goErrors "errors"
"fmt"
"time"
@@ -23,10 +22,6 @@ import (
const maxRandomByte = uint64(1<<8 - 1)
var (
ErrTooLate = errors.New("sync message is too late")
)
// ValidateNilSyncContribution validates the following fields are not nil:
// -the contribution and proof itself
// -the message within contribution and proof
@@ -222,7 +217,7 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
upperBound := time.Now().Add(clockDisparity)
// Verify sync message slot is within the time range.
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
syncErr := fmt.Errorf(
return fmt.Errorf(
"sync message time %v (slot %d) not within allowable range of %v (slot %d) to %v (slot %d)",
messageTime,
slot,
@@ -231,11 +226,6 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
upperBound,
uint64(upperBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
)
// Wrap error message if sync message is too late.
if messageTime.Before(lowerBound) {
syncErr = goErrors.Join(ErrTooLate, syncErr)
}
return syncErr
}
return nil
}

View File

@@ -28,7 +28,6 @@ go_library(
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -61,7 +60,7 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
}
payload, err := body.Execution()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedField):
case errors.Is(err, blocks.ErrUnsupportedGetter):
return false, nil
case err != nil:
return false, err

View File

@@ -1,6 +1,7 @@
package blocks_test
import (
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
@@ -609,7 +610,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
payload.PrevRandao = random
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, 0)
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, wrapped)
require.NoError(t, err)
@@ -873,7 +874,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
}, 0)
}, big.NewInt(0))
}
func emptyPayload() *enginev1.ExecutionPayload {

View File

@@ -1,6 +1,7 @@
package blocks_test
import (
"math/big"
"math/rand"
"testing"
@@ -643,7 +644,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
require.NoError(t, err)
wdRoot, err := ssz.WithdrawalSliceRoot(test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]}, 0)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]}, big.NewInt(0))
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {
@@ -1061,7 +1062,7 @@ func TestProcessWithdrawals(t *testing.T) {
}
st, err := prepareValidators(spb, test.Args)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, 0)
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {

View File

@@ -9,6 +9,110 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
epoch := time.CurrentEpoch(state)
currentSyncCommittee, err := state.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := state.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := state.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := state.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := state.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := state.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
txRoot, err := payloadHeader.TransactionsRoot()
if err != nil {
return nil, err
}
wdRoot, err := payloadHeader.WithdrawalsRoot()
if err != nil {
return nil, err
}
wi, err := state.NextWithdrawalIndex()
if err != nil {
return nil, err
}
vi, err := state.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
summarires, err := state.HistoricalSummaries()
if err != nil {
return nil, err
}
s := &ethpb.BeaconStateDeneb{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().DenebForkVersion,
Epoch: epoch,
},
LatestBlockHeader: state.LatestBlockHeader(),
BlockRoots: state.BlockRoots(),
StateRoots: state.StateRoots(),
HistoricalRoots: [][]byte{},
Eth1Data: state.Eth1Data(),
Eth1DataVotes: state.Eth1DataVotes(),
Eth1DepositIndex: state.Eth1DepositIndex(),
Validators: state.Validators(),
Balances: state.Balances(),
RandaoMixes: state.RandaoMixes(),
Slashings: state.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: state.JustificationBits(),
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: state.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
LogsBloom: payloadHeader.LogsBloom(),
PrevRandao: payloadHeader.PrevRandao(),
BlockNumber: payloadHeader.BlockNumber(),
GasLimit: payloadHeader.GasLimit(),
GasUsed: payloadHeader.GasUsed(),
Timestamp: payloadHeader.Timestamp(),
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
ExcessDataGas: make([]byte, 32),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summarires,
}
return state_native.InitializeFromProtoUnsafeDeneb(s)
}
// UpgradeToCapella updates a generic state to return the version Capella state.
func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
epoch := time.CurrentEpoch(state)

View File

@@ -353,7 +353,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
}
// ProcessHistoricalDataUpdate processes the updates to historical data during epoch processing.
// From Capella onward, per spec,state's historical summaries are updated instead of historical roots.
// From Capella onward, per spec, state's historical summaries are updated instead of historical roots.
func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, error) {
currentEpoch := time.CurrentEpoch(state)
nextEpoch := currentEpoch + 1
@@ -393,7 +393,6 @@ func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, er
// ProcessParticipationRecordUpdates rotates current/previous epoch attestations during epoch processing.
//
// nolint:dupword
// Spec pseudocode definition:
//
// def process_participation_record_updates(state: BeaconState) -> None:

View File

@@ -14,7 +14,7 @@ type attesterRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []*Validator)
type proposerRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []*Validator) ([]uint64, error)
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
// This is an optimized version by passing in precomputed validator attesting records and total epoch balances.
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
func ProcessRewardsAndPenaltiesPrecompute(
state state.BeaconState,
pBal *Balance,
@@ -72,7 +72,7 @@ func AttestationsDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Val
prevEpoch := time.PrevEpoch(state)
finalizedEpoch := state.FinalizedCheckpointEpoch()
sqrtActiveCurrentEpoch := math.CachedSquareRoot(pBal.ActiveCurrentEpoch)
sqrtActiveCurrentEpoch := math.IntegerSquareRoot(pBal.ActiveCurrentEpoch)
for i, v := range vp {
rewards[i], penalties[i] = attestationDelta(pBal, sqrtActiveCurrentEpoch, v, prevEpoch, finalizedEpoch)
}
@@ -104,6 +104,7 @@ func attestationDelta(pBal *Balance, sqrtActiveCurrentEpoch uint64, v *Validator
} else {
rewardNumerator := br * (pBal.PrevEpochAttested / effectiveBalanceIncrement)
r += rewardNumerator / currentEpochBalance
}
} else {
p += br
@@ -160,7 +161,7 @@ func ProposersDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Valida
rewards := make([]uint64, numofVals)
totalBalance := pBal.ActiveCurrentEpoch
balanceSqrt := math.CachedSquareRoot(totalBalance)
balanceSqrt := math.IntegerSquareRoot(totalBalance)
// Balance square root cannot be 0, this prevents division by 0.
if balanceSqrt == 0 {
balanceSqrt = 1

View File

@@ -15,11 +15,12 @@ const (
BlockProcessed = iota + 1
// ChainStarted is sent when enough validators are active to start proposing blocks.
ChainStarted
// deprecated: Initialized is sent when the internal beacon node's state is ready to be accessed.
_
// deprecated: Synced is sent when the beacon node has completed syncing and is ready to participate in the network.
_
// Reorg is an event sent when the new head is not a descendant of the previous head.
// Initialized is sent when the internal beacon node's state is ready to be accessed.
Initialized
// Synced is sent when the beacon node has completed syncing and is ready to participate in the network.
Synced
// Reorg is an event sent when the new head state's slot after a block
// transition is lower than its previous head state slot value.
Reorg
// FinalizedCheckpoint event.
FinalizedCheckpoint

View File

@@ -38,7 +38,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -49,7 +48,6 @@ go_test(
"attestation_test.go",
"beacon_committee_test.go",
"block_test.go",
"main_test.go",
"randao_test.go",
"rewards_penalties_test.go",
"shuffle_test.go",
@@ -58,7 +56,6 @@ go_test(
"weak_subjectivity_test.go",
],
embed = [":go_default_library"],
race = "on",
shard_count = 2,
deps = [
"//beacon-chain/cache:go_default_library",
@@ -69,6 +66,7 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -8,16 +8,13 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
var (
ErrTooLate = errors.New("attestation is too late")
)
// ValidateNilAttestation checks if any composite field of input attestation is nil.
// Access to these nil fields will result in run time panic,
// it is recommended to run these checks as first line of defense.
@@ -69,6 +66,25 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
}
// AggregateSignature returns the aggregated signature of the input attestations.
//
// Spec pseudocode definition:
//
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
// signatures = [attestation.signature for attestation in attestations]
// return bls.Aggregate(signatures)
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
sigs := make([]bls.Signature, len(attestations))
var err error
for i := 0; i < len(sigs); i++ {
sigs[i], err = bls.SignatureFromBytes(attestations[i].Signature)
if err != nil {
return nil, err
}
}
return bls.AggregateSignatures(sigs), nil
}
// IsAggregated returns true if the attestation is an aggregated attestation,
// false otherwise.
func IsAggregated(attestation *ethpb.Attestation) bool {
@@ -167,11 +183,11 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
currentSlot,
)
if attTime.Before(lowerBounds) {
attReceivedTooLateCount.Inc()
return errors.Join(ErrTooLate, attError)
attReceivedTooEarlyCount.Inc()
return attError
}
if attTime.After(upperBounds) {
attReceivedTooEarlyCount.Inc()
attReceivedTooLateCount.Inc()
return attError
}
return nil

View File

@@ -10,6 +10,8 @@ import (
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -43,6 +45,44 @@ func TestAttestation_IsAggregator(t *testing.T) {
})
}
func TestAttestation_AggregateSignature(t *testing.T) {
t.Run("verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := bytesutil.ToBytes32([]byte("hello"))
for i := 0; i < 100; i++ {
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg[:])
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts)
require.NoError(t, err)
assert.Equal(t, true, aggSig.FastAggregateVerify(pubkeys, msg), "Signature did not verify")
})
t.Run("not verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := []byte("hello")
for i := 0; i < 100; i++ {
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg)
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts[0 : len(atts)-2])
require.NoError(t, err)
assert.Equal(t, false, aggSig.FastAggregateVerify(pubkeys, bytesutil.ToBytes32(msg)), "Signature not suppose to verify")
})
}
func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
// Create 10 committees
committeeCount := uint64(10)

View File

@@ -382,10 +382,10 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
// ClearCache clears the beacon committee cache and sync committee cache.
func ClearCache() {
committeeCache.Clear()
proposerIndicesCache.Clear()
syncCommitteeCache.Clear()
balanceCache.Clear()
committeeCache = cache.NewCommitteesCache()
proposerIndicesCache = cache.NewProposerIndicesCache()
syncCommitteeCache = cache.NewSyncCommittee()
balanceCache = cache.NewEffectiveBalanceCache()
}
// computeCommittee returns the requested shuffled committee out of the total committees using

View File

@@ -91,7 +91,6 @@ func TestVerifyBitfieldLength_OK(t *testing.T) {
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
ClearCache()
defer ClearCache()
epoch := primitives.Epoch(1)
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 0, // Epoch 0.
@@ -102,8 +101,6 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
}
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
var activationEpoch primitives.Epoch
@@ -121,6 +118,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
ClearCache()
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, 0)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
for _, ss := range proposerIndexToSlots {
@@ -190,7 +188,6 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
},
}
defer ClearCache()
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
ClearCache()
@@ -258,8 +255,6 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
ClearCache()
defer ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -274,6 +269,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
ClearCache()
epoch := primitives.Epoch(1)
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, epoch)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
@@ -380,7 +376,6 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
},
}
defer ClearCache()
for i, tt := range tests {
ClearCache()
require.NoError(t, state.SetSlot(tt.stateSlot))
@@ -395,7 +390,6 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
ClearCache()
defer ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]primitives.ValidatorIndex, validatorCount)

View File

@@ -1,13 +0,0 @@
package helpers
import (
"os"
"testing"
)
// run ClearCache before each test to prevent cross-test side effects
func TestMain(m *testing.M) {
ClearCache()
code := m.Run()
os.Exit(code)
}

View File

@@ -75,8 +75,6 @@ func TestTotalActiveBalance(t *testing.T) {
}
func TestTotalActiveBal_ReturnMin(t *testing.T) {
ClearCache()
defer ClearCache()
tests := []struct {
vCount int
}{
@@ -98,8 +96,6 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
}
func TestTotalActiveBalance_WithCache(t *testing.T) {
ClearCache()
defer ClearCache()
tests := []struct {
vCount int
wantCount int

View File

@@ -25,7 +25,9 @@ var (
// along with the sync committee root.
// 1. Checks if the public key exists in the sync committee cache
// 2. If 1 fails, checks if the public key exists in the input current sync committee object
func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.ValidatorIndex) (bool, error) {
func IsCurrentPeriodSyncCommittee(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) (bool, error) {
root, err := syncPeriodBoundaryRoot(st)
if err != nil {
return false, err
@@ -34,7 +36,7 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return false, err
return false, nil
}
committee, err := st.CurrentSyncCommittee()
if err != nil {
@@ -71,7 +73,7 @@ func IsNextPeriodSyncCommittee(
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return false, err
return false, nil
}
committee, err := st.NextSyncCommittee()
if err != nil {
@@ -98,7 +100,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return nil, err
return nil, nil
}
committee, err := st.CurrentSyncCommittee()
if err != nil {
@@ -132,7 +134,7 @@ func NextPeriodSyncSubcommitteeIndices(
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return nil, err
return nil, nil
}
committee, err := st.NextSyncCommittee()
if err != nil {

View File

@@ -17,8 +17,6 @@ import (
)
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -39,6 +37,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
@@ -48,8 +47,6 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
}
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -76,8 +73,6 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
}
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -99,13 +94,11 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
require.ErrorContains(t, "index 12390192 out of range", err)
require.NoError(t, err)
require.Equal(t, false, ok)
}
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -126,6 +119,7 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
@@ -182,13 +176,11 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
require.ErrorContains(t, "index 120391029 out of range", err)
require.NoError(t, err)
require.Equal(t, false, ok)
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -209,6 +201,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
@@ -218,8 +211,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -239,6 +230,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
root, err := syncPeriodBoundaryRoot(state)
require.NoError(t, err)
@@ -260,7 +252,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -282,13 +273,11 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
require.ErrorContains(t, "index 129301923 out of range", err)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -309,6 +298,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
@@ -345,7 +335,6 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -367,7 +356,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
require.ErrorContains(t, "index 21093019 out of range", err)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
@@ -398,8 +387,6 @@ func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
}
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -425,6 +412,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
comIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
require.NoError(t, err)

Some files were not shown because too many files have changed in this diff Show More