Compare commits

..

256 Commits

Author SHA1 Message Date
Zahoor Mohamed
869d4443d3 process the optimistic blocks whose parent are optimistic too 2022-03-11 14:59:10 +05:30
terence tsao
f6eb6cd6bf Fix run time 2022-03-10 14:01:18 -08:00
terence tsao
a08c809073 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-03-10 13:53:01 -08:00
terence tsao
ef0435493d Revert "Update receive_block.go"
This reverts commit 5b4a87c512.
2022-03-10 11:09:17 -08:00
terence tsao
6f15d2b0b2 Update bzl 2022-03-10 11:02:07 -08:00
terence tsao
5b4a87c512 Update receive_block.go 2022-03-09 19:47:11 -08:00
terence tsao
5d7704e3a9 Update process_block.go 2022-03-09 19:41:03 -08:00
terence tsao
c4093f8adb Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-03-09 19:36:56 -08:00
terence tsao
4724b8430f Sync with devleop 2022-03-07 12:23:37 -08:00
terence tsao
55c8922f51 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-03-07 12:20:36 -08:00
Raul Jordan
7f8d66c919 Merge branch 'develop' into kiln 2022-03-07 18:55:37 +00:00
terence tsao
e2e5a0d86c Return early if ttd is not reached 2022-03-04 05:55:30 -08:00
terence tsao
4acc40ffed Update proposer_execution_payload.go 2022-03-03 15:48:28 -08:00
terence tsao
acc528ff75 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-03-03 15:45:27 -08:00
terence tsao
87395141e8 Bypass eth1 data checks 2022-03-03 09:36:48 -08:00
terence tsao
a69901bd7c Rm post state check 2022-03-02 16:45:03 -08:00
terence tsao
72d2bc7ce1 Sync with develop 2022-03-02 16:32:07 -08:00
terence tsao
aecd34a1ea Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-03-02 16:22:28 -08:00
terence tsao
1daae0f5cf Update proposer_execution_payload.go 2022-03-01 09:15:41 -08:00
terence tsao
1583c77bdf Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-03-01 09:10:10 -08:00
terence tsao
8baf2179b3 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-03-01 07:26:05 -08:00
terence tsao
d41947c60d Fix deadlock, uncomment duty opt sync 2022-02-28 20:12:27 -08:00
terence tsao
d8f9ecbd4d Update process_block.go 2022-02-27 14:43:28 -08:00
terence tsao
9da43e4170 refactor engine calls 2022-02-27 14:42:09 -08:00
terence tsao
d3756ea4ea clean ups 2022-02-25 18:41:42 -08:00
Raul Jordan
66418ec0ff Merge branch 'develop' into kiln 2022-02-25 16:52:46 -05:00
terence tsao
e3963094d4 Sync with develop 2022-02-24 20:26:57 -08:00
terence tsao
a5240cf4b8 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-24 15:09:08 -08:00
terence tsao
78a90af679 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-23 10:11:49 -08:00
terence tsao
b280e796da Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-22 10:00:50 -08:00
terence tsao
1e32cd5596 Sync with devleop 2022-02-22 07:50:44 -08:00
terence tsao
72c1720704 Merge branch 'kiln' of github.com:prysmaticlabs/prysm into kiln 2022-02-22 07:45:21 -08:00
terence tsao
b6fd9e5315 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-22 07:45:06 -08:00
Nishant Das
fa1509c970 remove kiln flag here (#10269) 2022-02-22 06:48:39 -08:00
terence tsao
f9fbda80c2 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-21 08:18:37 -08:00
terence tsao
9a56a5d101 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-20 10:38:43 -07:00
Raul Jordan
5e8c49c871 Merge branch 'develop' into kiln 2022-02-19 13:22:44 -07:00
terence tsao
4c7daf7a1f Comment out optimistic status 2022-02-19 11:51:05 -07:00
Raul Jordan
c4454cae78 gaz 2022-02-19 11:32:13 -06:00
Raul Jordan
1cedf4ba9a json 2022-02-18 17:43:49 -07:00
Raul Jordan
4ce3da7ecc set string 2022-02-18 17:28:45 -07:00
Raul Jordan
70a6fc4222 marshal un 2022-02-18 17:21:28 -07:00
Raul Jordan
bb126a9829 str 2022-02-18 18:20:34 -06:00
Raul Jordan
99deee57d1 string total diff 2022-02-18 17:19:44 -07:00
terence tsao
4c23401a3b Log 2022-02-16 11:45:47 -08:00
terence tsao
9636fde1eb Sync 2022-02-16 07:51:34 -08:00
terence tsao
a424f523a1 Update json_marshal_unmarshal.go 2022-02-15 20:26:26 -08:00
terence tsao
68e75d5851 Fix marshal 2022-02-15 15:42:39 -08:00
terence tsao
176ea137ee Merge branch 'payload-pointers' into kiln 2022-02-15 14:55:47 -08:00
terence tsao
b15cd763b6 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-15 14:55:00 -08:00
terence tsao
8e78eae897 Update json_marshal_unmarshal.go 2022-02-15 11:19:44 -08:00
terence tsao
f6883f2aa9 Use pointers 2022-02-15 11:04:19 -08:00
terence tsao
19782d2563 Use pointers 2022-02-15 11:00:58 -08:00
terence tsao
032cf433c5 Use pointers 2022-02-15 11:00:21 -08:00
terence tsao
fa656a86a5 Logs to reproduce 2022-02-15 09:38:31 -08:00
terence tsao
5f414b3e82 Optimistic sync: prysm validator rpcs (#10200) 2022-02-15 07:25:02 -08:00
terence tsao
41b8b1a0f8 Merge branch 'kiln2' into kiln 2022-02-15 06:50:17 -08:00
terence tsao
1d36ecb98d Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-15 06:50:03 -08:00
terence tsao
80cd539297 Rm uncommented 2022-02-15 06:48:33 -08:00
terence tsao
f47b6af910 Done 2022-02-14 22:09:29 -08:00
terence tsao
443df77bb3 c 2022-02-14 18:25:59 -08:00
terence tsao
94fe3884a0 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-14 08:07:12 -08:00
terence tsao
7d6046276d Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-11 18:02:58 -08:00
terence tsao
4f77ad20c8 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kiln 2022-02-10 14:24:28 -08:00
terence tsao
1b5a6d4195 Add back get payload 2022-02-10 14:22:35 -08:00
terence tsao
eae0db383f Clean ups 2022-02-10 12:29:52 -08:00
terence tsao
b56bd9e9d8 Fix build 2022-02-10 12:21:35 -08:00
terence tsao
481d8847c2 Merge branch 'kintsugi' of github.com:prysmaticlabs/prysm into kiln 2022-02-10 08:54:35 -08:00
terence tsao
42d5416658 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-02-10 08:51:51 -08:00
terence tsao
a1d8833749 Logs and err handling 2022-02-10 08:47:23 -08:00
terence tsao
695389b7bb Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-02-10 08:04:48 -08:00
Potuz
a67b8610f0 Change optimistic logic (#10194) 2022-02-10 09:59:09 -03:00
terence tsao
29eceba4d2 delete deprecated client, update testnet flag 2022-02-09 16:05:42 -08:00
terence tsao
4c34e5d424 Sync with develop 2022-02-09 15:53:01 -08:00
terence tsao
569375286e Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-02-09 15:25:28 -08:00
terence tsao
924758a557 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-02-08 18:28:31 -08:00
terence tsao
eedcb529fd Merge commit '8eaf3919189cd6d5f51904d8e9d74995ab70d4ac' into kintsugi 2022-02-08 07:43:49 -08:00
terence tsao
30e796a4f1 Update proposer.go 2022-02-08 07:29:51 -08:00
terence tsao
ea6ca456e6 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-02-08 07:29:47 -08:00
terence tsao
4b75b991dd Fix merge transition block validation 2022-02-07 15:11:05 -08:00
Potuz
8eaf391918 allow optimistic sync 2022-02-07 17:35:17 -03:00
terence tsao
cbdb3c9e86 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-02-07 12:02:32 -08:00
Potuz
12754adddc Sync optimistically candidate blocks (#10193) 2022-02-07 07:22:45 -03:00
Potuz
08a5155ee3 Revert "Sync optimistically candidate blocks (#10193)"
This reverts commit f99a0419ef.
2022-02-07 07:20:40 -03:00
Potuz
f99a0419ef Sync optimistically candidate blocks (#10193) 2022-02-07 10:14:25 +00:00
terence tsao
4ad31f9c05 Sync with develop 2022-02-06 19:41:39 -08:00
terence tsao
26876d64d7 Clean ups 2022-02-04 14:20:37 -08:00
terence tsao
3450923661 Sync with develop 2022-02-04 10:08:46 -08:00
terence tsao
aba628b56b Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-01-28 13:59:31 -08:00
terence tsao
5effb92d11 Update mainnet_config.go 2022-01-27 11:40:45 -08:00
terence tsao
2b55368c99 sync with develop 2022-01-27 11:40:39 -08:00
terence tsao
327903b7bb Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-01-27 10:35:38 -08:00
terence tsao
77f815a39f Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-01-24 09:06:44 -08:00
terence tsao
80dc725412 sync with develop 2022-01-14 18:42:45 -08:00
terence tsao
263c18992e Update generate_keys.go 2022-01-13 15:26:54 -08:00
terence tsao
9e220f9052 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-01-13 15:22:58 -08:00
terence tsao
99878d104c Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-01-12 10:40:04 -08:00
terence tsao
a870bf7a74 Clean up after sync 2022-01-10 18:50:27 -08:00
terence tsao
dc42ff382f Sync with develop 2022-01-10 11:20:05 -08:00
terence tsao
53b78a38a3 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-01-10 11:07:23 -08:00
terence tsao
b45826e731 Speed up syncing, hide cosmetic errors 2022-01-04 10:15:40 -08:00
terence tsao
7b59ecac5e Sync with develop, fix payload nil check bug 2022-01-03 07:55:37 -08:00
terence tsao
9149178a9c Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2022-01-03 07:54:56 -08:00
terence tsao
51ef502b04 clean ups 2021-12-23 09:29:26 -08:00
terence tsao
8d891821ee Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-12-23 08:44:36 -08:00
terence tsao
762863ce6a Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-12-20 08:20:07 -08:00
terence tsao
41f5fa7524 visibility 2021-12-16 12:41:30 -08:00
terence tsao
09744bac70 correct gossip sizes this time 2021-12-16 11:57:17 -08:00
terence tsao
f5db847237 use merge gossip sizes 2021-12-16 11:15:00 -08:00
terence tsao
8600f70b0b sync with develop 2021-12-16 07:25:02 -08:00
terence tsao
6fe430de44 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-12-16 07:23:52 -08:00
Zahoor Mohamed
42a5f96d3f ReverseByteOrder function does not mess the input 2021-12-15 22:19:50 +05:30
Mohamed Zahoor
e7f0fcf202 converting base fee to big endian format (#10018) 2021-12-15 06:41:06 -08:00
terence tsao
5ae564f1bf fix conflicts 2021-12-09 09:01:23 +01:00
terence tsao
719109c219 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-12-09 08:42:02 +01:00
terence tsao
64533a4b0c Merge branch 'kintsugi' of github.com:prysmaticlabs/prysm into kintsugi 2021-12-08 17:27:01 +01:00
terence tsao
9fecd761d7 latest kintusgi execution api 2021-12-08 17:24:45 +01:00
Zahoor Mohamed
f84c95667c change EP field names 2021-12-08 21:52:03 +05:30
terence tsao
9af081797e Go mod tidy 2021-12-06 09:34:54 +01:00
terence tsao
e4e9f12c8b Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-12-06 09:24:49 +01:00
terence tsao
2f4e8beae6 Sync 2021-12-04 15:40:18 +01:00
terence tsao
81c7b90d26 Sync 2021-12-04 15:30:59 +01:00
Potuz
dd3d65ff18 Add v2 endpoint for merge blocks (#9802)
* Add V2 blocks endpoint for merge blocks

* Update beacon-chain/rpc/apimiddleware/structs.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* go mod

* fix transactions

* Terence's comments

* add missing file

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2021-12-01 14:09:28 -03:00
terence tsao
ac5a227aeb Fix transactions root 2021-11-29 13:56:58 -08:00
terence tsao
33f4d5c3cc Fix a bug with loading mainnet state 2021-11-29 09:59:41 -08:00
terence tsao
67d7f8baee State pkg cleanup 2021-11-24 11:29:01 -08:00
terence tsao
3c54aef7b1 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-11-23 15:34:47 -08:00
terence tsao
938c28c42e Fix build 2021-11-23 14:55:31 -08:00
terence tsao
8ddb2c26c4 Merge commit '4858de787558c792b01aae44bc3902859b98fcac' of github.com:prysmaticlabs/prysm into kintsugi 2021-11-23 14:35:39 -08:00
terence tsao
cf0e78c2f6 Handle merge test case for update balance 2021-11-23 09:56:38 -08:00
terence tsao
4c0b262fdc Fix state merge 2021-11-23 09:13:50 -08:00
terence tsao
33e675e204 Update config to devnet1 2021-11-23 08:21:44 -08:00
terence tsao
e599f6a8a1 Fix build 2021-11-22 19:58:00 -08:00
terence tsao
49c9ab9fda Clean up conflicts 2021-11-22 19:40:57 -08:00
terence tsao
f90dec287b Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-11-22 19:29:07 -08:00
terence tsao
12c36cff9d Update state_trie.go 2021-11-17 08:07:26 -08:00
terence tsao
bc565d9ee6 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-11-17 08:07:03 -08:00
terence tsao
db67d5bad8 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-11-15 11:07:42 -08:00
terence tsao
3bc0c2be54 Merge branch 'develop' into kintsugi 2021-11-15 09:42:21 -08:00
terence tsao
1bed9ef749 Sync with develop 2021-11-15 09:41:24 -08:00
terence tsao
ec772beeaf Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-11-15 09:35:29 -08:00
Mohamed Zahoor
56407dde02 Change Gossip message size and Chunk SIze from 1 MB t0 10MB (#9860)
* change gossip size and chunk size after merge

* change ssz to accomodate both changes

* gofmt config file

* add testcase for merge MsgId

* Update beacon-chain/p2p/message_id.go

Change MB to Mib in comment

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* change function name from altairMsgID to postAltairMsgID

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-11-15 10:37:02 +05:30
terence tsao
445f17881e Fix bad hex conversion 2021-11-12 11:56:22 -08:00
terence tsao
183d40d8f1 Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi 2021-11-11 09:36:26 -08:00
terence tsao
87bc6aa5e5 Manually override nil transaction field. M2 works 2021-11-09 16:06:01 -08:00
terence tsao
5b5065b01d Remove unused merge genesis state gen tool 2021-11-09 11:09:59 -08:00
terence tsao
ee1c567561 Remove secp256k1 2021-11-09 08:43:10 -08:00
terence tsao
ff1416c98d Update Kintsugi consensus implementations (#9872) 2021-11-08 21:26:58 -08:00
terence tsao
471c94031f Update spec test shas 2021-11-08 19:39:13 -08:00
terence tsao
9863fb3d6a All spec tests pass 2021-11-08 19:31:28 -08:00
kasey
f3c2d1a00b Kintsugi ssz (#9867) 2021-11-08 18:42:23 -08:00
terence tsao
5d8879a4df Update Kintsugi engine API (#9865) 2021-11-08 09:56:14 -08:00
terence tsao
abea0a11bc Update WORKSPACE 2021-11-05 12:06:19 -07:00
terence tsao
80ce1603bd Merge branch 'kintsugi' of github.com:prysmaticlabs/prysm into kintsugi 2021-11-03 20:40:22 -07:00
terence tsao
ca478244e0 Add and use TBH_ACTIVATION_EPOCH 2021-11-03 20:39:51 -07:00
terence tsao
8a864b66a1 Add and use 2021-11-03 20:38:40 -07:00
terence tsao
72f3b9e84b Remove extraneous p2p condition 2021-11-03 19:17:12 -07:00
terence tsao
493e95060f Fix gossip and tx size limits for the merge part 1 2021-11-03 17:03:06 -07:00
terence tsao
e7e1ecd72f Update penalty params for Merge 2021-11-03 16:37:17 -07:00
terence tsao
c286ac8b87 Remove gas validations 2021-11-03 14:47:33 -07:00
terence tsao
bde315224c Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-11-03 12:52:50 -07:00
terence tsao
00520705bc Sync with develop 2021-11-02 20:52:33 -07:00
Zahoor Mohamed
c7fcd804d7 all gossip tests passing 2021-10-27 18:48:22 +05:30
terence tsao
985ac2e848 Update htrutils.go 2021-10-24 11:35:59 -07:00
terence tsao
f4a0e98926 Disable genesis ETH1.0 chain header logging 2021-10-19 22:13:59 -07:00
terence tsao
5f93ff10ea Merge: switch from go bindings to raw rpc calls (#9803) 2021-10-19 21:00:11 -07:00
terence tsao
544248f60f Go fmt 2021-10-18 22:38:57 -07:00
terence tsao
3b41968510 Merge branch 'merge-oct' of github.com:prysmaticlabs/prysm into merge-oct 2021-10-18 22:38:21 -07:00
terence tsao
7fc418042a Disable deposit contract lookback 2021-10-18 22:38:09 -07:00
terence tsao
9a03946706 Disable contract lookback 2021-10-18 22:34:50 -07:00
terence tsao
33dd6dd5f2 Use proper receive block path for initial syncing 2021-10-18 21:28:16 -07:00
terence tsao
56542e1958 Correctly upgrade to merge state + object mapping fixes 2021-10-18 17:46:55 -07:00
terence tsao
e82d7b4c0b Use uint64 for ttd 2021-10-18 14:00:45 -07:00
terence tsao
6cb69d8ff0 Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-10-18 09:26:42 -07:00
terence tsao
70b55a0191 Proper upgrade altair to merge state 2021-10-15 12:48:21 -07:00
terence tsao
50f4951194 Various fixes to pass all spec tests for Merge (#9777) 2021-10-14 15:34:31 -07:00
terence tsao
1a14f2368d Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-10-14 11:52:28 -07:00
terence tsao
bb8cad58f1 Update beacon_block.pb.go 2021-10-13 13:49:16 -07:00
terence tsao
05412c1f0e Update mainnet_config.go 2021-10-13 13:26:48 -07:00
terence tsao
b03441fed8 Fix finding terminal block hash calculation 2021-10-13 11:29:17 -07:00
terence tsao
fa7d7cef69 Merge: support terminal difficulty override (#9769) 2021-10-12 20:40:01 -07:00
terence tsao
1caa6c969f Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-10-12 09:43:42 -07:00
Kasey Kirkham
eeb7d5bbfb tell bazel about this new file 2021-10-08 13:31:57 -05:00
Kasey Kirkham
d7c7d150b1 separate ExecutionPayload/Header from codegen 2021-10-08 11:06:21 -05:00
Kasey Kirkham
63c4d2eb2b defensive nil check 2021-10-08 09:18:02 -05:00
Kasey Kirkham
9de1f694a0 restoring generated pb field ordering 2021-10-08 08:16:43 -05:00
terence tsao
8a79d06cbd Fix bazel build //... 2021-10-07 15:31:49 -07:00
terence tsao
5290ad93b8 Merge conflict. Sync with upstream 2021-10-07 15:07:29 -07:00
terence tsao
2128208ef7 M2 works with Geth 🎉 2021-10-07 14:57:20 -07:00
Kasey Kirkham
296323719c get rid of codegen garbage 2021-10-07 16:31:35 -05:00
Kasey Kirkham
5e9583ea85 noisy commit, restoring pb field order codegen 2021-10-07 15:59:28 -05:00
Zahoor Mohamed
17196e0f80 changes test cases per ssz changes 2021-10-08 01:39:30 +05:30
kasey
c50d54000d Merge union debugging (#9751) 2021-10-07 10:44:26 -07:00
terence tsao
85b3061d1b Update go commit 2021-10-07 10:10:46 -07:00
terence tsao
0146c5317a Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-10-07 09:06:55 -07:00
Zahoor Mohamed
fcbc48ffd9 fix finding Transactions size 2021-10-07 14:16:15 +05:30
terence tsao
76ee51af9d Interop merge beacon state 2021-10-06 17:22:47 -07:00
terence tsao
370b0b97ed Fix beacon chain build 2021-10-06 14:41:43 -07:00
terence tsao
990ebd3fe3 Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-10-06 14:34:33 -07:00
Zahoor Mohamed
54449c72e8 Merge branch 'merge-oct' of https://github.com/prysmaticlabs/prysm into merge-oct 2021-10-06 23:53:43 +05:30
Zahoor Mohamed
1dbd0b98eb add merge specific checks when receiving a block from gossip 2021-10-06 23:53:24 +05:30
terence tsao
09c3896c6b Go fmt 2021-10-06 09:38:24 -07:00
terence tsao
d494845e19 Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-10-06 09:36:13 -07:00
terence tsao
4d0c0f7234 Update todo strings 2021-10-05 14:43:01 -07:00
terence tsao
bfe570b1aa Merge branch 'merge-oct-net' of github.com:prysmaticlabs/prysm into merge-oct 2021-10-05 14:41:24 -07:00
terence tsao
56db696823 Clean up and fix a test 2021-10-05 14:38:09 -07:00
terence tsao
d312e15db8 Clean up misc state store 2021-10-05 14:17:44 -07:00
terence tsao
907d4cf7e6 Clean up validator additions 2021-10-05 14:06:03 -07:00
terence tsao
891353d6ad Clean up beacon chain additions 2021-10-05 11:28:36 -07:00
terence tsao
0adc08660c Rest of the validator changes 2021-10-05 10:18:26 -07:00
terence tsao
de31425dcd Add proposer get execution payload helpers 2021-10-04 16:37:22 -07:00
terence tsao
2094e0f21f Update rpc service and proposer get block 2021-10-04 16:37:01 -07:00
terence tsao
2c6f554500 Update process_block.go 2021-10-04 10:45:56 -07:00
terence tsao
18a1e07711 Update and use forked go-ethereum with catalyst go binding 2021-10-04 10:45:39 -07:00
prestonvanloon
5e432f5aaa Use MariusVanDerWijden go-ethereum fork with latest catalyst updates 2021-10-03 22:05:21 -05:00
prestonvanloon
284e2696cb Merge branch 'rm-bazel-go-ethereum' of github.com:prysmaticlabs/prysm into merge-oct 2021-10-03 21:52:13 -05:00
terence tsao
7547aaa6ce Fix build, update comments 2021-10-03 19:11:43 -07:00
prestonvanloon
953315c2cc fix geth e2e flags 2021-10-03 14:01:26 -05:00
terence tsao
9662d06b08 Update catalyst merge commit 2021-10-03 11:51:12 -07:00
prestonvanloon
ecaea26ace fix geth e2e flags 2021-10-03 13:31:52 -05:00
prestonvanloon
63819e2690 move vendor stuff to third_party so that go mod wont be mad anymore 2021-10-03 13:21:27 -05:00
prestonvanloon
a6d0cd06b3 Remove bazel-go-ethereum, use vendored libraries only 2021-10-03 13:11:50 -05:00
prestonvanloon
2dbe4f5e67 viz improvement 2021-10-03 13:11:26 -05:00
prestonvanloon
2689d6814d Add karalabe/usb 2021-10-03 13:11:16 -05:00
prestonvanloon
69a681ddc0 gaz 2021-10-03 12:29:17 -05:00
prestonvanloon
7f9f1fd36c Check in go-ethereum crypto/sepc256k1 package with proper build rules 2021-10-03 12:27:40 -05:00
terence tsao
57c97eb561 Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-10-03 09:42:34 -07:00
terence tsao
f0f94a8193 Handle more version merge cases 2021-10-02 11:43:50 -07:00
Zahoor Mohamed
87b0bf2c2a fix more merge conflicts 2021-10-02 12:27:12 +05:30
Zahoor Mohamed
d8ad317dec fix mrge conflicts 2021-10-02 12:19:29 +05:30
terence tsao
ab5f488cf4 Fix spectest merge fork 2021-10-01 16:27:21 -07:00
terence tsao
296d7464ad Add powchain execution methods 2021-10-01 16:07:33 -07:00
terence tsao
221c542e4f Go mod tidy and build 2021-10-01 13:43:57 -07:00
terence tsao
7ad32aaa96 Add execution caller engine interface 2021-10-01 12:59:04 -07:00
terence tsao
3dc0969c0c Point go-ethereum to https://github.com/ethereum/go-ethereum/pull/23607 2021-10-01 08:30:15 -07:00
Zahoor Mohamed
0e18e835c3 req/resp structure has not changed. so no need of a new version 2021-09-30 21:34:19 +05:30
terence tsao
8adfbfc382 Update sync_committee.go 2021-09-30 08:21:40 -07:00
Zahoor Mohamed
68b0b5e0ce add merge in fork watcher 2021-09-30 14:09:18 +05:30
terence tsao
eede309e0f Fix build 2021-09-29 16:26:13 -07:00
terence tsao
b11628dc53 Can configure flags 2021-09-29 16:04:39 -07:00
terence tsao
ea3ae22d3b Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-09-29 14:25:16 -07:00
terence tsao
02bb39ddeb Minor clean up to improve readability 2021-09-29 10:21:00 -07:00
terence tsao
1618c1f55d Fix comment 2021-09-29 07:57:43 -07:00
terence tsao
73c8493fd7 Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-09-28 14:54:24 -07:00
terence tsao
a4f59a4f15 Forkchoice and upgrade changes 2021-09-28 14:53:11 -07:00
Zahoor Mohamed
3c497efdb8 Merge branch 'merge-oct' of https://github.com/prysmaticlabs/prysm into merge-oct-net 2021-09-28 21:58:22 +05:30
Zahoor Mohamed
9f5daafbb7 initial networking code 2021-09-28 20:02:47 +05:30
terence tsao
11d7ffdfa8 Add merge spec tests 2021-09-26 11:07:31 -07:00
terence tsao
c26b3305e6 Resolve conflict 2021-09-25 09:49:53 -07:00
terence tsao
38d8b63fbf Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-09-25 09:15:20 -07:00
terence tsao
aea67405c8 Add upgrade to merge path 2021-09-21 14:34:03 -07:00
terence tsao
57d830f8b3 Add wrapper, cloner and interface 2021-09-21 13:34:10 -07:00
terence tsao
ac4b1ef4ea Merge branch 'develop' of github.com:prysmaticlabs/prysm into merge-oct 2021-09-21 13:07:01 -07:00
terence tsao
1d32119f5a can process execution header 2021-09-20 17:08:53 -07:00
terence tsao
3540cc7b05 Add state v3 2021-09-16 21:31:08 -07:00
terence tsao
191e7767a6 Add beacon block and state protos 2021-09-16 16:15:55 -07:00
308 changed files with 1278 additions and 5002 deletions

View File

@@ -217,6 +217,3 @@ build:remote --remote_local_fallback
# Ignore GoStdLib with remote caching
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
# Set bazel gotag
build --define gotags=bazel

View File

@@ -1 +1 @@
5.0.0
4.2.2

View File

@@ -10,26 +10,26 @@
# Prysm specific remote-cache properties.
#build:remote-cache --disk_cache=
build:remote-cache --remote_download_minimal
build:remote-cache --remote_download_toplevel
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --remote_local_fallback
build:remote-cache --experimental_remote_cache_async
build:remote-cache --experimental_remote_merkle_tree_cache
build:remote-cache --experimental_action_cache_store_output_metadata
build:remote-cache --experimental_remote_cache_compression
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote-cache --incompatible_strict_action_env=true
build --experimental_use_hermetic_linux_sandbox
# Import workspace options.
import %workspace%/.bazelrc
startup --host_jvm_args=-Xmx4g --host_jvm_args=-Xms2g
startup --host_jvm_args=-Xmx2g --host_jvm_args=-Xms2g
query --repository_cache=/tmp/repositorycache
query --experimental_repository_cache_hardlinks
build --repository_cache=/tmp/repositorycache
build --experimental_repository_cache_hardlinks
build --experimental_strict_action_env
build --disk_cache=/tmp/bazelbuilds
build --experimental_multi_threaded_digest
build --sandbox_tmpfs_path=/tmp
build --verbose_failures
build --announce_rc

View File

@@ -3,7 +3,7 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@graknlabs_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
prefix = "github.com/prysmaticlabs/prysm"
@@ -16,8 +16,6 @@ exports_files([
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
# gazelle:build_tags bazel
# gazelle:exclude tools/analyzers/**/testdata/**
gazelle(
name = "gazelle",
prefix = prefix,
@@ -127,7 +125,6 @@ nogo(
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/properpermissions:go_default_library",
"//tools/analyzers/recursivelock:go_default_library",
"//tools/analyzers/uintcast:go_default_library",
] + select({
# nogo checks that fail with coverage enabled.
":coverage_enabled": [],

View File

@@ -117,6 +117,13 @@ http_archive(
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
)
git_repository(
name = "graknlabs_bazel_distribution",
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
remote = "https://github.com/graknlabs/bazel-distribution",
shallow_since = "1569509514 +0300",
)
load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
@@ -215,7 +222,7 @@ filegroup(
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.1.10"
consensus_spec_version = "v1.1.9"
bls_test_version = "v0.1.1"
@@ -231,7 +238,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "28043009cc2f6fc9804e73c8c1fc2cb27062f1591e6884f3015ae1dd7a276883",
sha256 = "207d9c326ba4fa1f34bab7b6169201c32f2611755db030909a3405873445e0ba",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -247,7 +254,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "bc1a283ca068f310f04d70c4f6a8eaa0b8f7e9318073a8bdc2ee233111b4e339",
sha256 = "a3995b39f412db236b2f1db909f288218da53cb53b9923b71dda9d144d68f40a",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -263,7 +270,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "bbabb482c229ff9d4e2c7b77c992edb452f9d0af7c6d8dd4f922f06a7b101e81",
sha256 = "76cea7a4c8e32d458ad456b54bfbb30bc772481a91954a4cd97e229aa3023b1d",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -278,7 +285,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "408a5524548ad3fcf387f65ac7ec52781d9ee899499720bb12451b48a15818d4",
sha256 = "0fc429684775f943250dce1f9c485ac25e26c6395d7f585c8d1317becec2ace7",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -7,8 +7,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
@@ -325,44 +323,13 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
return false, nil
}
return s.IsOptimisticForRoot(ctx, s.head.root)
return s.cfg.ForkChoiceStore.IsOptimistic(ctx, s.head.root)
}
// IsOptimisticForRoot takes the root and slot as aguments instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
if err == nil {
return optimistic, nil
}
if err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
return false, err
}
ss, err := s.cfg.BeaconDB.StateSummary(ctx, root)
if err != nil {
return false, err
}
if ss == nil {
return false, errInvalidNilSummary
}
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
if err != nil {
return false, err
}
if slots.ToEpoch(ss.Slot) > validatedCheckpoint.Epoch {
return true, nil
}
if slots.ToEpoch(ss.Slot)+1 < validatedCheckpoint.Epoch {
return false, nil
}
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, bytesutil.ToBytes32(validatedCheckpoint.Root))
if err != nil {
return false, err
}
return ss.Slot > lastValidated.Slot, nil
return s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
}
// SetGenesisTime sets the genesis time of beacon chain.

View File

@@ -16,13 +16,10 @@ func TestHeadSlot_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
s.HeadSlot()
<-wait
}
func TestHeadRoot_DataRace(t *testing.T) {
@@ -31,14 +28,11 @@ func TestHeadRoot_DataRace(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err := s.HeadRoot(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadBlock_DataRace(t *testing.T) {
@@ -47,14 +41,11 @@ func TestHeadBlock_DataRace(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(&ethpb.SignedBeaconBlock{})},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err := s.HeadBlock(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadState_DataRace(t *testing.T) {
@@ -62,12 +53,9 @@ func TestHeadState_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err := s.HeadState(context.Background())
require.NoError(t, err)
<-wait
}

View File

@@ -288,11 +288,11 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0,
params.BeaconConfig().ZeroHash)}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0, false))
roots, slots := c.ChainHeads()
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
@@ -302,11 +302,11 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0, false))
roots, slots := c.ChainHeads()
require.Equal(t, 3, len(roots))
@@ -384,8 +384,8 @@ func TestService_IsOptimistic_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
@@ -400,8 +400,8 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
@@ -419,8 +419,8 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
require.NoError(t, err)
@@ -430,86 +430,10 @@ func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 11
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(optimisticBlock)))
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(validatedBlock)))
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
}
func TestService_IsOptimisticForRoot__DB_DoublyLinkedTree(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 11
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(optimisticBlock)))
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(validatedBlock)))
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
}

View File

@@ -1,4 +1,3 @@
//go:build !develop
// +build !develop
package blockchain

View File

@@ -9,8 +9,4 @@ var (
errNilBestJustifiedInStore = errors.New("nil best justified checkpoint returned from store")
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
errInvalidNilSummary = errors.New("nil summary returned from the DB")
// errNilParentInDB is returned when a nil parent block is returned from the DB.
errNilParentInDB = errors.New("nil parent block in DB")
)

View File

@@ -83,7 +83,8 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
} else {
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
}
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
// TODO(10261) send optimistic status
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j, false /* optimistic status */); err != nil {
return err
}
}

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
@@ -18,7 +17,7 @@ import (
var log = logrus.WithField("prefix", "blockchain")
// logs state transition related data every slot.
func logStateTransitionData(b block.BeaconBlock) error {
func logStateTransitionData(b block.BeaconBlock) {
log := log.WithField("slot", b.Slot())
if len(b.Body().Attestations()) > 0 {
log = log.WithField("attestations", len(b.Body().Attestations()))
@@ -35,23 +34,13 @@ func logStateTransitionData(b block.BeaconBlock) error {
if len(b.Body().VoluntaryExits()) > 0 {
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
}
if b.Version() == version.Altair || b.Version() == version.Bellatrix {
if b.Version() == version.Altair {
agg, err := b.Body().SyncAggregate()
if err != nil {
return err
if err == nil {
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
if b.Version() == version.Bellatrix {
p, err := b.Body().ExecutionPayload()
if err != nil {
return err
}
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash)))
log = log.WithField("txCount", len(p.Transactions))
}
log.Info("Finished applying state transition")
return nil
}
func logBlockSyncStatus(block block.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {

View File

@@ -3,7 +3,6 @@ package blockchain
import (
"testing"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
@@ -12,17 +11,6 @@ import (
)
func Test_logStateTransitionData(t *testing.T) {
payloadBlk := &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
SyncAggregate: &ethpb.SyncAggregate{},
ExecutionPayload: &enginev1.ExecutionPayload{
BlockHash: []byte{1, 2, 3},
Transactions: [][]byte{{}, {}},
},
},
}
wrappedPayloadBlk, err := wrapper.WrappedBeaconBlock(payloadBlk)
require.NoError(t, err)
tests := []struct {
name string
b block.BeaconBlock
@@ -67,15 +55,11 @@ func Test_logStateTransitionData(t *testing.T) {
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
},
{name: "has payload",
b: wrappedPayloadBlk,
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
},
}
for _, tt := range tests {
hook := logTest.NewGlobal()
t.Run(tt.name, func(t *testing.T) {
require.NoError(t, logStateTransitionData(tt.b))
logStateTransitionData(tt.b)
require.LogsContain(t, hook, tt.want)
})
}

View File

@@ -25,11 +25,11 @@ func TestService_newSlot(t *testing.T) {
}
ctx := context.Background()
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0)) // genesis
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, 0, 0)) // finalized
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, 0, 0)) // justified
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, 0, 0)) // best justified
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, 0, 0)) // bad
require.NoError(t, fcs.ProcessBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0, false)) // genesis
require.NoError(t, fcs.ProcessBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, 0, 0, false)) // finalized
require.NoError(t, fcs.ProcessBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false)) // justified
require.NoError(t, fcs.ProcessBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, 0, 0, false)) // best justified
require.NoError(t, fcs.ProcessBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, 0, 0, false)) // bad
type args struct {
slot types.Slot

View File

@@ -21,7 +21,7 @@ import (
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
return nil, errors.New("nil head block")
}
@@ -76,9 +76,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
}
}
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
return nil, errors.Wrap(err, "could not set block to valid")
}
return payloadID, nil
}
@@ -122,9 +119,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int, hea
// During the transition event, the transition block should be verified for sanity.
if isPreBellatrix(preStateVersion) {
// Handle case where pre-state is Altair but block contains payload.
// To reach here, the block must have contained a valid payload.
return s.validateMergeBlock(ctx, blk)
return nil
}
atTransition, err := blocks.IsMergeTransitionBlockUsingPayloadHeader(header, body)
if err != nil {
@@ -145,38 +140,14 @@ func isPreBellatrix(v int) bool {
//
// Spec pseudocode definition:
// def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock) -> bool:
// if is_execution_block(opt_store.blocks[block.parent_root]):
// return True
//
// justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
// if is_execution_block(opt_store.blocks[justified_root]):
// return True
//
// if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot:
// return True
//
// return False
// justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
// justified_is_execution_block = is_execution_block(opt_store.blocks[justified_root])
// block_is_deep = block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot
// return justified_is_execution_block or block_is_deep
func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.BeaconBlock) (bool, error) {
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
return true, nil
}
parent, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
if err != nil {
return false, err
}
if parent == nil {
return false, errNilParentInDB
}
parentIsExecutionBlock, err := blocks.ExecutionBlock(parent.Block().Body())
if err != nil {
return false, err
}
if parentIsExecutionBlock {
return true, nil
}
j := s.store.JustifiedCheckpt()
if j == nil {
return false, errNilJustifiedInStore

View File

@@ -36,6 +36,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
@@ -44,7 +45,6 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0))
tests := []struct {
name string
@@ -155,7 +155,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngineService{forkchoiceError: tt.newForkchoiceErr}
service.cfg.ExecutionEngineCaller = engine
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, service.headRoot(), tt.finalizedRoot)
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, tt.finalizedRoot)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
@@ -188,13 +188,6 @@ func Test_NotifyNewPayload(t *testing.T) {
},
},
}
a := &ethpb.SignedBeaconBlockAltair{
Block: &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{},
},
}
altairBlk, err := wrapper.WrappedSignedBeaconBlock(a)
require.NoError(t, err)
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service, err := NewService(ctx, opts...)
@@ -245,29 +238,10 @@ func Test_NotifyNewPayload(t *testing.T) {
errString: "could not validate execution payload from execution engine: payload status is INVALID",
},
{
name: "altair pre state, altair block",
name: "altair pre state",
postState: bellatrixState,
preState: altairState,
blk: altairBlk,
},
{
name: "altair pre state, happy case",
postState: bellatrixState,
preState: altairState,
blk: func() block.SignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
return b
}(),
blk: bellatrixBlk,
},
{
name: "could not get merge block",
@@ -330,29 +304,27 @@ func Test_NotifyNewPayload(t *testing.T) {
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = engine
var payload *ethpb.ExecutionPayloadHeader
if tt.preState.Version() == version.Bellatrix {
payload, err = tt.preState.LatestExecutionPayloadHeader()
require.NoError(t, err)
}
err := service.notifyNewPayload(ctx, tt.preState.Version(), payload, tt.postState, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
})
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = engine
var payload *ethpb.ExecutionPayloadHeader
if tt.preState.Version() == version.Bellatrix {
payload, err = tt.preState.LatestExecutionPayloadHeader()
require.NoError(t, err)
}
err := service.notifyNewPayload(ctx, tt.preState.Version(), payload, tt.postState, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
}
}
@@ -375,12 +347,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
parentBlk := util.NewBeaconBlockBellatrix()
wrappedParentBlock, err := wrapper.WrappedBellatrixSignedBeaconBlock(parentBlk)
require.NoError(t, err)
parentRoot, err := wrappedParentBlock.Block().HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blk block.BeaconBlock
@@ -392,7 +358,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 1
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
@@ -400,7 +365,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
@@ -412,7 +376,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockAltair()
blk.Block.Slot = 200
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedAltairBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
@@ -420,7 +383,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockAltair()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedAltairSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
@@ -432,7 +394,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 200
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
@@ -440,7 +401,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
@@ -452,7 +412,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 200
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
@@ -460,7 +419,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
blk.Block.Body.ExecutionPayload.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
@@ -485,59 +443,8 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
Root: jroot[:],
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
})
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
candidate, err := service.optimisticCandidateBlock(ctx, tt.blk)
require.NoError(t, err)
require.Equal(t, tt.want, candidate, tt.name)
}
}
func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
payload := &v1.ExecutionPayload{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
BaseFeePerGas: bytesutil.PadTo([]byte{1, 2, 3, 4}, fieldparams.RootLength),
BlockHash: make([]byte, 32),
BlockNumber: 100,
}
body := &ethpb.BeaconBlockBodyBellatrix{ExecutionPayload: payload}
block := &ethpb.BeaconBlockBellatrix{Body: body, Slot: 200}
rawSigned := &ethpb.SignedBeaconBlockBellatrix{Block: block}
blk := util.HydrateSignedBeaconBlockBellatrix(rawSigned)
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wr))
blkRoot, err := wr.Block().HashTreeRoot()
require.NoError(t, err)
childBlock := util.NewBeaconBlockBellatrix()
childBlock.Block.ParentRoot = blkRoot[:]
// shallow block
childBlock.Block.Slot = 201
wrappedChild, err := wrapper.WrappedBellatrixSignedBeaconBlock(childBlock)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedChild))
candidate, err := service.optimisticCandidateBlock(ctx, wrappedChild.Block())
require.NoError(t, err)
require.Equal(t, true, candidate)
}

View File

@@ -256,7 +256,7 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
require.NoError(t, service.OnAttestation(ctx, att[0]))
}
@@ -282,7 +282,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
require.NoError(t, service.OnAttestation(ctx, att[0]))
}
@@ -571,8 +571,8 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b32.Block.Slot, r32, [32]byte{}, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b33.Block.Slot, r33, r32, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, 0, 0, false))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, 0, 0, false))
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
require.NoError(t, err)

View File

@@ -110,11 +110,13 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err := s.notifyNewPayload(ctx, preStateVersion, preStateHeader, postState, signed); err != nil {
return errors.Wrap(err, "could not verify new payload")
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
// TODO(10261) Check optimistic status
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */, false /*optimistic sync*/); err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
return err
}
// We add a proposer score boost to fork choice for the block root if applicable, right after
// running a successful state transition for the block.
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(
@@ -182,7 +184,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err := s.updateHead(ctx, balances); err != nil {
log.WithError(err).Warn("Could not update head")
}
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), s.headRoot(), bytesutil.ToBytes32(finalized.Root)); err != nil {
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), bytesutil.ToBytes32(finalized.Root)); err != nil {
return err
}
@@ -273,32 +275,33 @@ func getStateVersionAndPayload(preState state.BeaconState) (int, *ethpb.Executio
}
func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock,
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, []bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
if len(blks) == 0 || len(blockRoots) == 0 {
return nil, nil, errors.New("no blocks provided")
return nil, nil, nil, errors.New("no blocks provided")
}
if err := helpers.BeaconBlockIsNil(blks[0]); err != nil {
return nil, nil, err
return nil, nil, nil, err
}
b := blks[0].Block()
// Retrieve incoming block's pre state.
if err := s.verifyBlkPreState(ctx, b); err != nil {
return nil, nil, err
return nil, nil, nil, err
}
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
if preState == nil || preState.IsNil() {
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
return nil, nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
}
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
optimistic := make([]bool, len(blks))
sigSet := &bls.SignatureBatch{
Signatures: [][]byte{},
PublicKeys: []bls.PublicKey{},
@@ -309,64 +312,65 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
for i, b := range blks {
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
if err := s.notifyNewPayload(ctx, preStateVersion, preStateHeader, preState, b); err != nil {
return nil, nil, err
return nil, nil, nil, err
}
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
boundaries[blockRoots[i]] = preState.Copy()
if err := s.handleEpochBoundary(ctx, preState); err != nil {
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
return nil, nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
}
}
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
fCheckpoints[i] = preState.FinalizedCheckpoint()
sigSet.Join(set)
}
verify, err := sigSet.Verify()
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
if !verify {
return nil, nil, errors.New("batch block signature verification failed")
return nil, nil, nil, errors.New("batch block signature verification failed")
}
for r, st := range boundaries {
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return nil, nil, err
return nil, nil, nil, err
}
}
// Also saves the last post state which to be used as pre state for the next batch.
lastB := blks[len(blks)-1]
lastBR := blockRoots[len(blockRoots)-1]
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return nil, nil, err
return nil, nil, nil, err
}
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
return nil, nil, err
return nil, nil, nil, err
}
return fCheckpoints, jCheckpoints, nil
return fCheckpoints, jCheckpoints, optimistic, nil
}
// handles a block after the block's batch has been verified, where we can save blocks
// their state summaries and split them off to relative hot/cold storage.
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.SignedBeaconBlock,
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
b := signed.Block()
s.saveInitSyncBlock(blockRoot, signed)
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint, optimistic); err != nil {
return err
}
if _, err := s.notifyForkchoiceUpdate(ctx, b, blockRoot, bytesutil.ToBytes32(fCheckpoint.Root)); err != nil {
if _, err := s.notifyForkchoiceUpdate(ctx, b, bytesutil.ToBytes32(fCheckpoint.Root)); err != nil {
return err
}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: signed.Block().Slot(),
Root: blockRoot[:],
@@ -451,13 +455,13 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk block.BeaconBlock, root [32]byte,
st state.BeaconState) error {
st state.BeaconState, optimistic bool) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
defer span.End()
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint, optimistic); err != nil {
return err
}
// Feed in block's attestations to fork choice store.
@@ -476,20 +480,24 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
}
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.BeaconBlock,
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
//TODO(10261) check if the blocks are optimistic or not when filling fork choice
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}
// Feed in block to fork choice store.
return s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()),
jCheckpoint.Epoch,
fCheckpoint.Epoch)
fCheckpoint.Epoch, optimistic); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
return nil
}
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool) error {
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool, optimistic bool) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if initSync {
@@ -500,7 +508,7 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st, optimistic); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
}
return nil

View File

@@ -366,13 +366,14 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.B
for i := len(pendingNodes) - 1; i >= 0; i-- {
b := pendingNodes[i]
r := pendingRoots[i]
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()),
jCheckpoint.Epoch,
fCheckpoint.Epoch); err != nil {
fCheckpoint.Epoch, false /* optimistic status */); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
}
return nil
}

View File

@@ -295,7 +295,7 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
_, _, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
}
@@ -351,56 +351,6 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
}
func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesis := blocks.NewGenesisBlock(params.BeaconConfig().ZeroHash[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
st, keys := util.DeterministicGenesisState(t, 64)
bState := st.Copy()
var blks []block.SignedBeaconBlock
var blkRoots [][32]byte
var firstState state.BeaconState
blkCount := 4
for i := 1; i <= blkCount; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wrapper.WrappedPhase0SignedBeaconBlock(b))
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(root, wrapper.WrappedPhase0SignedBeaconBlock(b))
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
blkRoots = append(blkRoots, root)
}
rBlock, err := blks[0].PbPhase0Block()
assert.NoError(t, err)
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
cp1, cp2, err := service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
require.Equal(t, blkCount-1, len(cp1))
require.Equal(t, blkCount-1, len(cp2))
}
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -1085,7 +1035,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), 0, 0)) // Saves blocks to fork choice store.
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), 0, 0, false)) // Saves blocks to fork choice store.
}
r, err := service.ancestor(context.Background(), r200[:], 150)
@@ -1130,7 +1080,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
}
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), 200, r200, r200, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, 0, 0, false))
r, err := service.ancestor(context.Background(), r200[:], 150)
require.NoError(t, err)

View File

@@ -161,36 +161,14 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
log.WithError(err).Errorf("Unable to get justified balances for root %v", justified.Root)
continue
}
prevHead := s.headRoot()
if err := s.updateHead(s.ctx, balances); err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
s.notifyEngineIfChangedHead(prevHead)
}
}
}()
}
// This calls notify Forkchoice Update in the event that the head has changed
func (s *Service) notifyEngineIfChangedHead(prevHead [32]byte) {
if s.headRoot() == prevHead {
return
}
finalized := s.store.FinalizedCheckpt()
if finalized == nil {
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
return
}
_, err := s.notifyForkchoiceUpdate(s.ctx,
s.headBlock().Block(),
s.headRoot(),
bytesutil.ToBytes32(finalized.Root),
)
if err != nil {
log.WithError(err).Error("could not notify forkchoice update")
}
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) processAttestations(ctx context.Context) {
atts := s.cfg.AttPool.ForkchoiceAttestations()

View File

@@ -111,44 +111,9 @@ func TestProcessAttestations_Ok(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
service.processAttestations(ctx)
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
}
func TestNotifyEngineIfChangedHead(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.notifyEngineIfChangedHead(service.headRoot())
hookErr := "could not notify forkchoice update"
finalizedErr := "could not get finalized checkpoint"
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, hookErr)
service.notifyEngineIfChangedHead([32]byte{'a'})
require.LogsContain(t, hook, finalizedErr)
hook.Reset()
b := util.NewBeaconBlock()
b.Block.Slot = 1
wr := wrapper.WrappedPhase0SignedBeaconBlock(b)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wr))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
finalized := &ethpb.Checkpoint{Root: r[:], Epoch: 0}
service.head = &head{
slot: 1,
root: r,
block: wr,
}
service.store.SetFinalizedCheckpt(finalized)
service.notifyEngineIfChangedHead([32]byte{'b'})
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, hookErr)
}

View File

@@ -64,9 +64,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block block.SignedBeaconBloc
return err
}
// Log state transition data.
if err := logStateTransitionData(blockCopy.Block()); err != nil {
return err
}
logStateTransitionData(blockCopy.Block())
return nil
}
@@ -79,7 +77,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
defer span.End()
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
fCheckpoints, jCheckpoints, optimistic, err := s.onBlockBatch(ctx, blocks, blkRoots)
if err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
@@ -89,10 +87,20 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
for i, b := range blocks {
blockCopy := b.Copy()
// TODO(10261) check optimistic status
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i], false /*optimistic status*/); err != nil {
tracing.AnnotateError(span, err)
return err
}
if !optimistic[i] {
root, err := b.Block().HashTreeRoot()
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root); err != nil {
return err
}
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,

View File

@@ -127,7 +127,7 @@ func (s *Service) Start() {
saved := s.cfg.FinalizedStateAtStartUp
if saved != nil && !saved.IsNil() {
if err := s.StartFromSavedState(saved); err != nil {
if err := s.startFromSavedState(saved); err != nil {
log.Fatal(err)
}
} else {
@@ -164,9 +164,9 @@ func (s *Service) Status() error {
return nil
}
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
func (s *Service) startFromSavedState(saved state.BeaconState) error {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0)
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
originRoot, err := s.originRootFromSavedState(s.ctx)
@@ -449,17 +449,14 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
genesisCheckpoint := genesisState.FinalizedCheckpoint()
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
genesisBlk.Block().Slot(),
genesisBlkRoot,
params.BeaconConfig().ZeroHash,
genesisCheckpoint.Epoch,
genesisCheckpoint.Epoch); err != nil {
genesisCheckpoint.Epoch, false /* optimistic status */); err != nil {
log.Fatalf("Could not process genesis block for fork choice: %v", err)
}
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
log.Fatalf("Could not set optimistic status of genesis block to false: %v", err)
}
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
return nil

View File

@@ -290,7 +290,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, err)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, c.StartFromSavedState(headState))
require.NoError(t, c.startFromSavedState(headState))
headBlk, err := c.HeadBlock(ctx)
require.NoError(t, err)
assert.DeepEqual(t, headBlock, headBlk.Proto(), "Head block incorrect")
@@ -333,7 +333,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, err)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}))
require.NoError(t, err)
require.NoError(t, c.StartFromSavedState(headState))
require.NoError(t, c.startFromSavedState(headState))
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
@@ -393,7 +393,7 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
require.NoError(t, err)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, c.StartFromSavedState(headState))
require.NoError(t, c.startFromSavedState(headState))
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
@@ -460,7 +460,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -479,7 +479,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -547,7 +547,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -568,7 +568,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -1,4 +1,3 @@
//go:build !fuzz
// +build !fuzz
package cache
@@ -18,12 +17,10 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
)
const (
// maxBalanceCacheSize defines the max number of active balances can cache.
maxBalanceCacheSize = int(4)
)
var (
// maxBalanceCacheSize defines the max number of active balances can cache.
maxBalanceCacheSize = uint64(4)
// BalanceCacheMiss tracks the number of balance requests that aren't present in the cache.
balanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "total_effective_balance_cache_miss",
@@ -45,7 +42,7 @@ type BalanceCache struct {
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
func NewEffectiveBalanceCache() *BalanceCache {
return &BalanceCache{
cache: lruwrpr.New(maxBalanceCacheSize),
cache: lruwrpr.New(int(maxBalanceCacheSize)),
}
}

View File

@@ -1,4 +1,3 @@
//go:build fuzz
// +build fuzz
package cache

View File

@@ -1,4 +1,3 @@
//go:build !fuzz
// +build !fuzz
package cache
@@ -20,13 +19,11 @@ import (
mathutil "github.com/prysmaticlabs/prysm/math"
)
const (
var (
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
maxCommitteesCacheSize = int(32)
)
maxCommitteesCacheSize = uint64(32)
var (
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "committee_cache_miss",
@@ -58,7 +55,7 @@ func committeeKeyFn(obj interface{}) (string, error) {
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
func NewCommitteesCache() *CommitteeCache {
return &CommitteeCache{
CommitteeCache: lruwrpr.New(maxCommitteesCacheSize),
CommitteeCache: lruwrpr.New(int(maxCommitteesCacheSize)),
inProgress: make(map[string]bool),
}
}

View File

@@ -1,4 +1,3 @@
//go:build fuzz
// +build fuzz
// This file is used in fuzzer builds to bypass global committee caches.

View File

@@ -33,7 +33,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
require.NoError(t, err)
}
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
}
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
@@ -50,5 +50,5 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
assert.DeepEqual(t, c.SortedIndices, indices)
}
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
}

View File

@@ -102,7 +102,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
}
k := cache.CommitteeCache.Keys()
assert.Equal(t, maxCommitteesCacheSize, len(k))
assert.Equal(t, maxCommitteesCacheSize, uint64(len(k)))
sort.Slice(k, func(i, j int) bool {
return k[i].(string) < k[j].(string)

View File

@@ -1,4 +1,3 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,4 +1,3 @@
//go:build fuzz
// +build fuzz
// This file is used in fuzzer builds to bypass proposer indices caches.

View File

@@ -27,7 +27,7 @@ var SubnetIDs = newSubnetIDs()
func newSubnetIDs() *subnetIDs {
// Given a node can calculate committee assignments of current epoch and next epoch.
// Max size is set to 2 epoch length.
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2)) // lint:ignore uintcast -- constant values that would panic on startup if negative.
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2))
attesterCache := lruwrpr.New(cacheSize)
aggregatorCache := lruwrpr.New(cacheSize)
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))

View File

@@ -1,4 +1,3 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,4 +1,3 @@
//go:build fuzz
// +build fuzz
package cache

View File

@@ -225,7 +225,7 @@ func verifyDeposit(beaconState state.ReadOnlyBeaconState, deposit *ethpb.Deposit
if ok := trie.VerifyMerkleProofWithDepth(
receiptRoot,
leaf[:],
beaconState.Eth1DepositIndex(),
int(beaconState.Eth1DepositIndex()),
deposit.Proof,
params.BeaconConfig().DepositContractTreeDepth,
); !ok {

View File

@@ -17,15 +17,13 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["slot_epoch_test.go"],
embed = [":go_default_library"],
deps = [
":go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
],

View File

@@ -46,9 +46,9 @@ func NextEpoch(state state.ReadOnlyBeaconState) types.Epoch {
return slots.ToEpoch(state.Slot()) + 1
}
// HigherEqualThanAltairVersionAndEpoch returns if the input state `s` has a higher version number than Altair state and input epoch `e` is higher equal than fork epoch.
func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e types.Epoch) bool {
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
// AltairCompatible returns if the input state `s` is altair compatible and input epoch `e` is higher equal than fork epoch.
func AltairCompatible(s state.BeaconState, e types.Epoch) bool {
return s.Version() == version.Altair && e >= params.BeaconConfig().AltairForkEpoch
}
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.

View File

@@ -1,17 +1,14 @@
package time_test
package time
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
@@ -45,7 +42,7 @@ func TestCurrentEpoch_OK(t *testing.T) {
for _, tt := range tests {
state, err := v1.InitializeFromProto(&eth.BeaconState{Slot: tt.slot})
require.NoError(t, err)
assert.Equal(t, tt.epoch, time.CurrentEpoch(state), "ActiveCurrentEpoch(%d)", state.Slot())
assert.Equal(t, tt.epoch, CurrentEpoch(state), "ActiveCurrentEpoch(%d)", state.Slot())
}
}
@@ -61,7 +58,7 @@ func TestPrevEpoch_OK(t *testing.T) {
for _, tt := range tests {
state, err := v1.InitializeFromProto(&eth.BeaconState{Slot: tt.slot})
require.NoError(t, err)
assert.Equal(t, tt.epoch, time.PrevEpoch(state), "ActivePrevEpoch(%d)", state.Slot())
assert.Equal(t, tt.epoch, PrevEpoch(state), "ActivePrevEpoch(%d)", state.Slot())
}
}
@@ -79,7 +76,7 @@ func TestNextEpoch_OK(t *testing.T) {
for _, tt := range tests {
state, err := v1.InitializeFromProto(&eth.BeaconState{Slot: tt.slot})
require.NoError(t, err)
assert.Equal(t, tt.epoch, time.NextEpoch(state), "NextEpoch(%d)", state.Slot())
assert.Equal(t, tt.epoch, NextEpoch(state), "NextEpoch(%d)", state.Slot())
}
}
@@ -111,7 +108,7 @@ func TestCanUpgradeToAltair(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToAltair(tt.slot); got != tt.want {
if got := CanUpgradeToAltair(tt.slot); got != tt.want {
t.Errorf("canUpgradeToAltair() = %v, want %v", got, tt.want)
}
})
@@ -146,7 +143,7 @@ func TestCanUpgradeBellatrix(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToBellatrix(tt.slot); got != tt.want {
if got := CanUpgradeToBellatrix(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToBellatrix() = %v, want %v", got, tt.want)
}
})
@@ -181,85 +178,6 @@ func TestCanProcessEpoch_TrueOnEpochsLastSlot(t *testing.T) {
b := &eth.BeaconState{Slot: tt.slot}
s, err := v1.InitializeFromProto(b)
require.NoError(t, err)
assert.Equal(t, tt.canProcessEpoch, time.CanProcessEpoch(s), "CanProcessEpoch(%d)", tt.slot)
}
}
func TestAltairCompatible(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 1
cfg.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(cfg)
type args struct {
s state.BeaconState
e types.Epoch
}
tests := []struct {
name string
args args
want bool
}{
{
name: "phase0 state",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisState(t, 1)
return st
}(),
},
want: false,
},
{
name: "altair state, altair epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateAltair(t, 1)
return st
}(),
e: params.BeaconConfig().AltairForkEpoch,
},
want: true,
},
{
name: "bellatrix state, bellatrix epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
return st
}(),
e: params.BeaconConfig().BellatrixForkEpoch,
},
want: true,
},
{
name: "bellatrix state, altair epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
return st
}(),
e: params.BeaconConfig().AltairForkEpoch,
},
want: true,
},
{
name: "bellatrix state, phase0 epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
return st
}(),
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.HigherEqualThanAltairVersionAndEpoch(tt.args.s, tt.args.e); got != tt.want {
t.Errorf("HigherEqualThanAltairVersionAndEpoch() = %v, want %v", got, tt.want)
}
})
assert.Equal(t, tt.canProcessEpoch, CanProcessEpoch(s), "CanProcessEpoch(%d)", tt.slot)
}
}

View File

@@ -245,7 +245,7 @@ func createFullBellatrixBlockWithOperations(t *testing.T) (state.BeaconState,
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: bytesutil.PadTo([]byte{1, 2, 3, 4}, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
ExtraData: make([]byte, 0),

View File

@@ -92,7 +92,7 @@ func ExecuteStateTransition(
func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlot")
defer span.End()
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot())))
prevStateRoot, err := state.HashTreeRoot(ctx)
if err != nil {
@@ -190,7 +190,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
if state == nil || state.IsNil() {
return nil, errors.New("nil state")
}
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot())))
// The block must have a higher slot than parent state.
if state.Slot() >= slot {
@@ -354,7 +354,7 @@ func VerifyOperationLengths(_ context.Context, state state.BeaconState, b block.
func ProcessEpochPrecompute(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessEpochPrecompute")
defer span.End()
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state)))) // lint:ignore uintcast -- This is OK for tracing.
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state))))
if state == nil || state.IsNil() {
return nil, errors.New("nil state")

View File

@@ -45,13 +45,12 @@ type ReadOnlyDatabase interface {
HasArchivedPoint(ctx context.Context, slot types.Slot) bool
LastArchivedRoot(ctx context.Context) [32]byte
LastArchivedSlot(ctx context.Context) (types.Slot, error)
LastValidatedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error)
// Deposit contract related handlers.
DepositContractAddress(ctx context.Context) ([]byte, error)
// Powchain operations.
PowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error)
// Fee reicipients operations.
FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error)
FeeRecipientByValidatorID(ctx context.Context, id uint64) (common.Address, error)
// origin checkpoint sync support
OriginBlockRoot(ctx context.Context) ([32]byte, error)
}
@@ -75,7 +74,6 @@ type NoHeadAccessDatabase interface {
// Checkpoint operations.
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
SaveLastValidatedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
// Deposit contract related handlers.
SaveDepositContractAddress(ctx context.Context, addr common.Address) error
// Powchain operations.
@@ -83,7 +81,7 @@ type NoHeadAccessDatabase interface {
// Run any required database migrations.
RunMigrations(ctx context.Context) error
// Fee reicipients operations.
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, addrs []common.Address) error
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint64, addrs []common.Address) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error
}

View File

@@ -25,7 +25,6 @@ go_library(
"state_summary.go",
"state_summary_cache.go",
"utils.go",
"validated_checkpoint.go",
"wss.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/kv",
@@ -92,7 +91,6 @@ go_test(
"state_summary_test.go",
"state_test.go",
"utils_test.go",
"validated_checkpoint_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],

View File

@@ -395,13 +395,13 @@ func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]
// FeeRecipientByValidatorID returns the fee recipient for a validator id.
// `ErrNotFoundFeeRecipient` is returned if the validator id is not found.
func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error) {
func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id uint64) (common.Address, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.FeeRecipientByValidatorID")
defer span.End()
var addr []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(id))
if addr == nil {
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
}
@@ -412,7 +412,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.Validato
// SaveFeeRecipientsByValidatorIDs saves the fee recipients for validator ids.
// Error is returned if `ids` and `recipients` are not the same length.
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, feeRecipients []common.Address) error {
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint64, feeRecipients []common.Address) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
defer span.End()
@@ -423,7 +423,7 @@ func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
for i, id := range ids {
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(uint64(id)), feeRecipients[i].Bytes()); err != nil {
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(id), feeRecipients[i].Bytes()); err != nil {
return err
}
}

View File

@@ -595,11 +595,11 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
func TestStore_FeeRecipientByValidatorID(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
ids := []types.ValidatorIndex{0, 0, 0}
ids := []uint64{0, 0, 0}
feeRecipients := []common.Address{{}, {}, {}, {}}
require.ErrorContains(t, "validatorIDs and feeRecipients must be the same length", db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients))
ids = []types.ValidatorIndex{0, 1, 2}
ids = []uint64{0, 1, 2}
feeRecipients = []common.Address{{'a'}, {'b'}, {'c'}}
require.NoError(t, db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients))
f, err := db.FeeRecipientByValidatorID(ctx, 0)

View File

@@ -175,6 +175,7 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
powchainBucket,
stateSummaryBucket,
stateValidatorsBucket,
validatedTips,
// Indices buckets.
attestationHeadBlockRootBucket,
attestationSourceRootIndicesBucket,

View File

@@ -19,6 +19,7 @@ var (
powchainBucket = []byte("powchain")
stateValidatorsBucket = []byte("state-validators")
feeRecipientBucket = []byte("fee-recipient")
validatedTips = []byte("validated-synced-tips")
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
slotsHasObjectBucket = []byte("slots-has-objects")
@@ -38,13 +39,12 @@ var (
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
// Specific item keys.
headBlockRootKey = []byte("head-root")
genesisBlockRootKey = []byte("genesis-root")
depositContractAddressKey = []byte("deposit-contract")
justifiedCheckpointKey = []byte("justified-checkpoint")
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
headBlockRootKey = []byte("head-root")
genesisBlockRootKey = []byte("genesis-root")
depositContractAddressKey = []byte("deposit-contract")
justifiedCheckpointKey = []byte("justified-checkpoint")
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
// Below keys are used to identify objects are to be fork compatible.
// Objects that are only compatible with specific forks should be prefixed with such keys.

View File

@@ -1,49 +0,0 @@
package kv
import (
"context"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// LastValidatedCheckpoint returns the latest fully validated checkpoint in beacon chain.
func (s *Store) LastValidatedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastValidatedCheckpoint")
defer span.End()
var checkpoint *ethpb.Checkpoint
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(checkpointBucket)
enc := bkt.Get(lastValidatedCheckpointKey)
if enc == nil {
var finErr error
checkpoint, finErr = s.FinalizedCheckpoint(ctx)
return finErr
}
checkpoint = &ethpb.Checkpoint{}
return decode(ctx, enc, checkpoint)
})
return checkpoint, err
}
// SaveLastValidatedCheckpoint saves the last validated checkpoint in beacon chain.
func (s *Store) SaveLastValidatedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastValidatedCheckpoint")
defer span.End()
enc, err := encode(ctx, checkpoint)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummary) {
return errMissingStateForCheckpoint
}
return bucket.Put(lastValidatedCheckpointKey, enc)
})
}

View File

@@ -1,67 +0,0 @@
package kv
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"google.golang.org/protobuf/proto"
)
func TestStore_LastValidatedCheckpoint_CanSaveRetrieve(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
root := bytesutil.ToBytes32([]byte{'A'})
cp := &ethpb.Checkpoint{
Epoch: 10,
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveLastValidatedCheckpoint(ctx, cp))
retrieved, err := db.LastValidatedCheckpoint(ctx)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(cp, retrieved), "Wanted %v, received %v", cp, retrieved)
}
func TestStore_LastValidatedCheckpoint_DefaultIsFinalized(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
genesis := bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'})
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesis))
blk := util.NewBeaconBlock()
blk.Block.ParentRoot = genesis[:]
blk.Block.Slot = 40
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 5,
Root: root[:],
}
// a valid chain is required to save finalized checkpoint.
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
// a state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
retrieved, err := db.LastValidatedCheckpoint(ctx)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(cp, retrieved), "Wanted %v, received %v", cp, retrieved)
}

View File

@@ -2,7 +2,7 @@ package doublylinkedtree
import "errors"
var ErrNilNode = errors.New("invalid nil or unknown node")
var errNilNode = errors.New("invalid nil or unknown node")
var errInvalidBalance = errors.New("invalid node balance")
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
var errUnknownFinalizedRoot = errors.New("unknown finalized root")

View File

@@ -27,9 +27,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- justified: 1, finalized: 0
// |
// 3 <- justified: 2, finalized: 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1, false))
// With starting justified epoch at 0, the head should be 3:
// 0 <- start
@@ -89,17 +89,17 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// | |
// justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0
// Left branch.
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0, false))
// Right branch.
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0, false))
// With start at 0, the head should be 10:
// 0 <-- start
@@ -185,7 +185,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
ctx := context.Background()
f := New(justifiedEpoch, finalizedEpoch)
err := f.InsertOptimisticBlock(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, justifiedEpoch, finalizedEpoch)
err := f.ProcessBlock(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, justifiedEpoch, finalizedEpoch, false)
if err != nil {
return nil
}

View File

@@ -102,17 +102,17 @@ func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []
processedAttestationCount.Inc()
}
// InsertOptimisticBlock processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) InsertOptimisticBlock(
// ProcessBlock processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) ProcessBlock(
ctx context.Context,
slot types.Slot,
blockRoot, parentRoot [fieldparams.RootLength]byte,
justifiedEpoch, finalizedEpoch types.Epoch,
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool,
) error {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.InsertOptimisticBlock")
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessBlock")
defer span.End()
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch)
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch, optimistic)
}
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
@@ -174,7 +174,7 @@ func (f *ForkChoice) IsOptimistic(_ context.Context, root [32]byte) (bool, error
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return false, ErrNilNode
return false, errNilNode
}
return node.optimistic, nil
@@ -190,7 +190,7 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return nil, ErrNilNode
return nil, errNilNode
}
n := node
@@ -202,7 +202,7 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
}
if n == nil {
return nil, ErrNilNode
return nil, errNilNode
}
return n.root[:], nil
@@ -237,7 +237,7 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
if ok && vote.nextRoot != params.BeaconConfig().ZeroHash {
// Protection against nil node
if nextNode == nil {
return ErrNilNode
return errNilNode
}
nextNode.balance += newBalance
}
@@ -246,7 +246,7 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
if ok && vote.currentRoot != params.BeaconConfig().ZeroHash {
// Protection against nil node
if currentNode == nil {
return ErrNilNode
return errNilNode
}
if currentNode.balance < oldBalance {
return errInvalidBalance
@@ -279,7 +279,7 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams
defer f.store.nodesLock.Unlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return ErrNilNode
return errNilNode
}
return node.setNodeAndParentValidated(ctx)
}

View File

@@ -15,9 +15,9 @@ import (
func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0},
@@ -37,9 +37,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
@@ -61,12 +61,12 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
func TestForkChoice_IsCanonical(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(4), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, indexToHash(6), indexToHash(5), 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(4), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 6, indexToHash(6), indexToHash(5), 1, 1, false))
require.Equal(t, true, f.IsCanonical(params.BeaconConfig().ZeroHash))
require.Equal(t, false, f.IsCanonical(indexToHash(1)))
@@ -80,12 +80,12 @@ func TestForkChoice_IsCanonical(t *testing.T) {
func TestForkChoice_IsCanonicalReorg(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, 1, 1, false))
f.store.nodesLock.Lock()
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
@@ -114,9 +114,9 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
func TestForkChoice_AncestorRoot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(3), indexToHash(2), 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(3), indexToHash(2), 1, 1, false))
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
f.store.treeRootNode.parent = nil
@@ -125,7 +125,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
assert.Equal(t, bytesutil.ToBytes32(r), indexToHash(3))
_, err = f.AncestorRoot(ctx, indexToHash(3), 0)
assert.ErrorContains(t, ErrNilNode.Error(), err)
assert.ErrorContains(t, errNilNode.Error(), err)
root, err := f.AncestorRoot(ctx, indexToHash(3), 5)
require.NoError(t, err)
@@ -140,8 +140,8 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
require.NoError(t, err)
@@ -152,8 +152,8 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) {
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
require.NoError(t, err)

View File

@@ -48,10 +48,10 @@ var (
Help: "The number of times pruning happened.",
},
)
validatedCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_validated_count",
Help: "The number of blocks that have been fully validated.",
optimisticCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "doublylinkedtree_optimistic_count",
Help: "The number of blocks that have been optimistically synced.",
},
)
)

View File

@@ -24,7 +24,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 0
// /
// 2 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -33,7 +33,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 0
// / \
// head -> 2 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -44,7 +44,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// head -> 2 1
// |
// 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -55,7 +55,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 2 1
// | |
// head -> 4 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -68,7 +68,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// head -> 4 3
// |
// 5 <- justified epoch = 2
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -107,7 +107,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 5
// |
// 6 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1, false))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")

View File

@@ -55,7 +55,7 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
hasViableDescendant := false
for _, child := range n.children {
if child == nil {
return ErrNilNode
return errNilNode
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
return err
@@ -120,7 +120,6 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
}
n.optimistic = false
validatedCount.Inc()
return n.parent.setNodeAndParentValidated(ctx)
}

View File

@@ -13,9 +13,9 @@ import (
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
// The updated balances of each node is 100
s := f.store
@@ -36,9 +36,9 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
// The updated balances of each node is 100
s := f.store
@@ -63,7 +63,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is not viable.
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 2, 3))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 2, 3, false))
// Verify parent's best child and best descendant are `none`.
s := f.store
@@ -76,7 +76,7 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
assert.Equal(t, 1, len(s.treeRootNode.children))
@@ -87,8 +87,8 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 100
@@ -103,8 +103,8 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 200
@@ -119,9 +119,9 @@ func TestNode_TestDepth(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
require.Equal(t, s.nodeByRoot[indexToHash(2)].depth(), uint64(2))
@@ -151,11 +151,11 @@ func TestNode_ViableForHead(t *testing.T) {
func TestNode_LeadsToViableHead(t *testing.T) {
f := setup(4, 3)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(3), 4, 3))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(3), 4, 3, false))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
@@ -172,13 +172,11 @@ func TestNode_SetFullyValidated(t *testing.T) {
// \
// -- 5 (true)
//
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(3), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(1), 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(3), 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(1), 1, 1, true))
opt, err := f.IsOptimistic(ctx, indexToHash(5))
require.NoError(t, err)

View File

@@ -12,7 +12,7 @@ func (s *Store) removeNode(ctx context.Context, root [32]byte) error {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return ErrNilNode
return errNilNode
}
if !node.optimistic || node.parent == nil {
return errInvalidOptimisticStatus

View File

@@ -51,18 +51,18 @@ func TestPruneInvalid(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
require.NoError(t, f.store.removeNode(context.Background(), tc.root))
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())

View File

@@ -41,13 +41,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot := types.Slot(1)
newRoot := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
@@ -64,13 +65,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot = types.Slot(2)
newRoot = indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
@@ -89,13 +91,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot = types.Slot(3)
newRoot = indexToHash(3)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
@@ -114,13 +117,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot = types.Slot(3)
newRoot = indexToHash(4)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
@@ -183,13 +187,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
honestBlockSlot := types.Slot(2)
honestBlock := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
honestBlockSlot,
honestBlock,
zeroHash,
jEpoch,
fEpoch,
true,
),
)
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
@@ -199,13 +204,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
maliciouslyWithheldBlockSlot := types.Slot(1)
maliciouslyWithheldBlock := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
maliciouslyWithheldBlockSlot,
maliciouslyWithheldBlock,
zeroHash,
jEpoch,
fEpoch,
true,
),
)
@@ -247,13 +253,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
honestBlockSlot := types.Slot(2)
honestBlock := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
honestBlockSlot,
honestBlock,
zeroHash,
jEpoch,
fEpoch,
true,
),
)
@@ -265,13 +272,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
maliciouslyWithheldBlockSlot := types.Slot(1)
maliciouslyWithheldBlock := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
maliciouslyWithheldBlockSlot,
maliciouslyWithheldBlock,
zeroHash,
jEpoch,
fEpoch,
true,
),
)
@@ -320,13 +328,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
cSlot := types.Slot(2)
c := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
cSlot,
c,
a, // parent
jEpoch,
fEpoch,
true,
),
)
@@ -343,13 +352,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
bSlot := types.Slot(1)
b := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
bSlot,
b,
a, // parent
jEpoch,
fEpoch,
true,
),
)
@@ -366,13 +376,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
dSlot := types.Slot(3)
d := indexToHash(3)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
dSlot,
d,
b, // parent
jEpoch,
fEpoch,
true,
),
)

View File

@@ -97,7 +97,7 @@ func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, err
func (s *Store) insert(ctx context.Context,
slot types.Slot,
root, parentRoot [fieldparams.RootLength]byte,
justifiedEpoch, finalizedEpoch types.Epoch) error {
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool) error {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
defer span.End()
@@ -117,7 +117,7 @@ func (s *Store) insert(ctx context.Context,
parent: parent,
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
optimistic: true,
optimistic: optimistic,
}
s.nodeByRoot[root] = n
@@ -128,6 +128,14 @@ func (s *Store) insert(ctx context.Context,
}
}
if !optimistic {
if err := n.setNodeAndParentValidated(ctx); err != nil {
return err
}
} else {
optimisticCount.Inc()
}
// Set the node as root if the store was empty
if s.treeRootNode == nil {
s.treeRootNode = n

View File

@@ -33,14 +33,14 @@ func TestStore_FinalizedEpoch(t *testing.T) {
func TestStore_NodeCount(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.Equal(t, 2, f.NodeCount())
}
func TestStore_NodeByRoot(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0, false))
node0 := f.store.treeRootNode
node1 := node0.children[0]
node2 := node1.children[0]
@@ -61,7 +61,7 @@ func TestStore_NodeByRoot(t *testing.T) {
func TestForkChoice_HasNode(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.Equal(t, true, f.HasNode(indexToHash(1)))
}
@@ -74,7 +74,7 @@ func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
func TestStore_Head_Itself(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
// Since the justified node does not have a best descendant so the best node
// is itself.
@@ -85,10 +85,10 @@ func TestStore_Head_Itself(t *testing.T) {
func TestStore_Head_BestDescendant(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(4), indexToHash(2), 0, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(4), indexToHash(2), 0, 0, false))
h, err := f.store.head(context.Background(), indexToHash(1))
require.NoError(t, err)
require.Equal(t, h, indexToHash(4))
@@ -97,9 +97,9 @@ func TestStore_Head_BestDescendant(t *testing.T) {
func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
cancel()
err := f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0)
err := f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false)
require.ErrorContains(t, "context canceled", err)
}
@@ -108,7 +108,7 @@ func TestStore_Insert(t *testing.T) {
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode}
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), 1, 1))
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), 1, 1, false))
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
@@ -132,9 +132,9 @@ func TestStore_Prune_LessThanThreshold(t *testing.T) {
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
}
s := f.store
@@ -151,9 +151,9 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
}
s := f.store
@@ -169,9 +169,9 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
}
s := f.store
@@ -196,8 +196,8 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
func TestStore_Prune_NoDanglingBranch(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
f.store.pruneThreshold = 0
s := f.store
@@ -221,18 +221,18 @@ func TestStore_tips(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
expectedMap := map[[32]byte]types.Slot{
[32]byte{'f'}: 105,
[32]byte{'i'}: 106,
@@ -250,8 +250,8 @@ func TestStore_tips(t *testing.T) {
func TestStore_PruneMapsNodes(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
s := f.store
s.pruneThreshold = 0
@@ -263,9 +263,9 @@ func TestStore_PruneMapsNodes(t *testing.T) {
func TestStore_HasParent(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1, false))
require.Equal(t, false, f.HasParent(params.BeaconConfig().ZeroHash))
require.Equal(t, true, f.HasParent(indexToHash(1)))
require.Equal(t, true, f.HasParent(indexToHash(2)))

View File

@@ -22,7 +22,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 0
// /
// 2 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -32,7 +32,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 0
// / \
// head -> 2 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -62,7 +62,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// head -> 2 1
// |
// 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -98,7 +98,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 3
// |
// 4 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -114,7 +114,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4 <- head
// /
// 5 <- justified epoch = 2
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -130,7 +130,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4 <- head
// / \
// 5 6 <- justified epoch = 0
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
// Moved 2 votes to block 5:
// 0
@@ -142,7 +142,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4
// / \
// 2 votes-> 5 6
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
@@ -163,9 +163,9 @@ func TestVotes_CanFindHead(t *testing.T) {
// 8
// |
// 9
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -210,7 +210,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 2 votes->9 10
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2, true))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
@@ -289,7 +289,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 9 10
// |
// head-> 11
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2, true))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)

View File

@@ -29,13 +29,13 @@ type HeadRetriever interface {
// BlockProcessor processes the block that's used for accounting fork choice.
type BlockProcessor interface {
InsertOptimisticBlock(ctx context.Context,
ProcessBlock(ctx context.Context,
slot types.Slot,
root [32]byte,
parentRoot [32]byte,
justifiedEpoch types.Epoch,
finalizedEpoch types.Epoch,
) error
optimisticStatus bool) error
}
// AttestationProcessor processes the attestation that's used for accounting fork choice.

View File

@@ -21,7 +21,6 @@ go_library(
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -5,7 +5,7 @@ import "errors"
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")
var errInvalidNodeIndex = errors.New("node index is invalid")
var ErrUnknownNodeRoot = errors.New("unknown block root")
var errUnknownNodeRoot = errors.New("unknown block root")
var errInvalidJustifiedIndex = errors.New("justified index is invalid")
var errInvalidBestChildIndex = errors.New("best child index is invalid")
var errInvalidBestDescendantIndex = errors.New("best descendant index is invalid")

View File

@@ -27,9 +27,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- justified: 1, finalized: 0
// |
// 3 <- justified: 2, finalized: 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1, false))
// With starting justified epoch at 0, the head should be 3:
// 0 <- start
@@ -89,17 +89,17 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// | |
// justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0
// Left branch.
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0, false))
// Right branch.
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0, false))
// With start at 0, the head should be 10:
// 0 <-- start

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/prysmaticlabs/prysm/config/params"
pmath "github.com/prysmaticlabs/prysm/math"
"go.opencensus.io/trace"
)
@@ -47,27 +46,19 @@ func computeDeltas(
if ok {
// Protection against out of bound, the `nextDeltaIndex` which defines
// the block location in the dag can not exceed the total `delta` length.
if nextDeltaIndex >= uint64(len(deltas)) {
if int(nextDeltaIndex) >= len(deltas) {
return nil, nil, errInvalidNodeDelta
}
delta, err := pmath.Int(newBalance)
if err != nil {
return nil, nil, err
}
deltas[nextDeltaIndex] += delta
deltas[nextDeltaIndex] += int(newBalance)
}
currentDeltaIndex, ok := blockIndices[vote.currentRoot]
if ok {
// Protection against out of bound (same as above)
if currentDeltaIndex >= uint64(len(deltas)) {
if int(currentDeltaIndex) >= len(deltas) {
return nil, nil, errInvalidNodeDelta
}
delta, err := pmath.Int(oldBalance)
if err != nil {
return nil, nil, err
}
deltas[currentDeltaIndex] -= delta
deltas[currentDeltaIndex] -= int(oldBalance)
}
}

View File

@@ -24,7 +24,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 0
// /
// 2 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -33,7 +33,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 0
// / \
// head -> 2 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -44,7 +44,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// head -> 2 1
// |
// 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -55,7 +55,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 2 1
// | |
// head -> 4 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -68,7 +68,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// head -> 4 3
// |
// 5 <- justified epoch = 2
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -107,7 +107,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 5
// |
// 6 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1, false))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")

View File

@@ -40,7 +40,7 @@ func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, err
index, ok := f.store.nodesIndices[root]
if !ok {
f.store.nodesLock.RUnlock()
return false, ErrUnknownNodeRoot
return false, errUnknownNodeRoot
}
node := f.store.nodes[index]
slot := node.slot

View File

@@ -147,10 +147,10 @@ func TestOptimistic(t *testing.T) {
// We test first nodes outside the Fork Choice store
_, err := f.IsOptimistic(ctx, root0)
require.ErrorIs(t, ErrUnknownNodeRoot, err)
require.ErrorIs(t, errUnknownNodeRoot, err)
_, err = f.IsOptimistic(ctx, root1)
require.ErrorIs(t, ErrUnknownNodeRoot, err)
require.ErrorIs(t, errUnknownNodeRoot, err)
// We check all nodes in the Fork Choice store.
op, err := f.IsOptimistic(ctx, nodeA.root)
@@ -220,18 +220,18 @@ func TestSetOptimisticToValid(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
tests := []struct {
root [32]byte // the root of the new VALID block
tips map[[32]byte]types.Slot // the old synced tips
@@ -409,18 +409,18 @@ func TestSetOptimisticToInvalid(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
weights := []uint64{10, 10, 9, 7, 1, 6, 2, 3, 1, 1, 1, 0, 0}
f.syncedTips.Lock()
f.syncedTips.validatedTips = tc.tips
@@ -467,18 +467,18 @@ func TestFindSyncedTip(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
tests := []struct {
root [32]byte // the root of the block
tips map[[32]byte]types.Slot // the synced tips
@@ -548,45 +548,3 @@ func TestFindSyncedTip(t *testing.T) {
syncedTips.RUnlock()
}
}
// This is a regression test (10341)
func TestIsOptimistic_DeadLock(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 90, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
tips := map[[32]byte]types.Slot{
[32]byte{'a'}: 100,
[32]byte{'d'}: 102,
}
f.syncedTips.validatedTips = tips
_, err := f.IsOptimistic(ctx, [32]byte{'a'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'e'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'b'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'c'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
}

View File

@@ -41,13 +41,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot := types.Slot(1)
newRoot := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
false,
),
)
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
@@ -64,13 +65,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot = types.Slot(2)
newRoot = indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
false,
),
)
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
@@ -89,13 +91,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot = types.Slot(2)
newRoot = indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
false,
),
)
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
@@ -114,13 +117,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot = types.Slot(3)
newRoot = indexToHash(4)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
false,
),
)
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
@@ -180,13 +184,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
honestBlockSlot := types.Slot(2)
honestBlock := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
honestBlockSlot,
honestBlock,
zeroHash,
jEpoch,
fEpoch,
false,
),
)
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
@@ -196,13 +201,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
maliciouslyWithheldBlockSlot := types.Slot(1)
maliciouslyWithheldBlock := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
maliciouslyWithheldBlockSlot,
maliciouslyWithheldBlock,
zeroHash,
jEpoch,
fEpoch,
false,
),
)
@@ -244,13 +250,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
honestBlockSlot := types.Slot(2)
honestBlock := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
honestBlockSlot,
honestBlock,
zeroHash,
jEpoch,
fEpoch,
false,
),
)
@@ -262,13 +269,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
maliciouslyWithheldBlockSlot := types.Slot(1)
maliciouslyWithheldBlock := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
maliciouslyWithheldBlockSlot,
maliciouslyWithheldBlock,
zeroHash,
jEpoch,
fEpoch,
false,
),
)
@@ -317,13 +325,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
cSlot := types.Slot(2)
c := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
cSlot,
c,
a, // parent
jEpoch,
fEpoch,
false,
),
)
@@ -340,13 +349,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
bSlot := types.Slot(1)
b := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
bSlot,
b,
a, // parent
jEpoch,
fEpoch,
false,
),
)
@@ -363,13 +373,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
dSlot := types.Slot(3)
d := indexToHash(3)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
dSlot,
d,
b, // parent
jEpoch,
fEpoch,
false,
),
)

View File

@@ -9,7 +9,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
pmath "github.com/prysmaticlabs/prysm/math"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
@@ -144,16 +143,23 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
return f.store.proposerBoost()
}
// InsertOptimisticBlock processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) InsertOptimisticBlock(
// ProcessBlock processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) ProcessBlock(
ctx context.Context,
slot types.Slot,
blockRoot, parentRoot [32]byte,
justifiedEpoch, finalizedEpoch types.Epoch) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.InsertOptimisticBlock")
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessBlock")
defer span.End()
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch)
if err := f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch); err != nil {
return err
}
if !optimistic {
return f.SetOptimisticToValid(ctx, blockRoot)
}
return nil
}
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
@@ -440,11 +446,7 @@ func (s *Store) applyWeightChanges(
s.proposerBoostLock.Unlock()
return err
}
iProposerScore, err := pmath.Int(proposerScore)
if err != nil {
return err
}
nodeDelta = nodeDelta + iProposerScore
nodeDelta = nodeDelta + int(proposerScore)
}
s.proposerBoostLock.Unlock()

View File

@@ -491,18 +491,18 @@ func TestStore_PruneSyncedTips(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
syncedTips := &optimisticStore{
validatedTips: map[[32]byte]types.Slot{
[32]byte{'b'}: 101,

View File

@@ -23,7 +23,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 0
// /
// 2 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -33,7 +33,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 0
// / \
// head -> 2 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -63,7 +63,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// head -> 2 1
// |
// 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -99,7 +99,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 3
// |
// 4 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -115,7 +115,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4 <- head
// /
// 5 <- justified epoch = 2
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -131,7 +131,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4 <- head
// / \
// 5 6 <- justified epoch = 0
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
// Moved 2 votes to block 5:
// 0
@@ -143,7 +143,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4
// / \
// 2 votes-> 5 6
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
@@ -164,9 +164,9 @@ func TestVotes_CanFindHead(t *testing.T) {
// 8
// |
// 9
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -211,7 +211,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 2 votes->9 10
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2, true))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
@@ -290,7 +290,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 9 10
// |
// head-> 11
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2, true))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)

View File

@@ -1,8 +1,6 @@
package node
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/cmd"
@@ -37,7 +35,7 @@ func configureHistoricalSlasher(cliCtx *cli.Context) {
params.OverrideBeaconConfig(c)
cmdConfig := cmd.Get()
// Allow up to 4096 attestations at a time to be requested from the beacon nde.
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)) // lint:ignore uintcast -- Page size should not exceed int64 with these constants.
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations))
cmd.Init(cmdConfig)
log.Warnf(
"Setting %d slots per archive point and %d max RPC page size for historical slasher usage. This requires additional storage",
@@ -107,17 +105,10 @@ func configureInteropConfig(cliCtx *cli.Context) {
}
}
func configureExecutionSetting(cliCtx *cli.Context) error {
if !cliCtx.IsSet(flags.FeeRecipient.Name) {
return nil
func configureExecutionSetting(cliCtx *cli.Context) {
if cliCtx.IsSet(flags.FeeRecipient.Name) {
c := params.BeaconConfig()
c.FeeRecipient = common.HexToAddress(cliCtx.String(flags.FeeRecipient.Name))
params.OverrideBeaconConfig(c)
}
c := params.BeaconConfig()
ha := cliCtx.String(flags.FeeRecipient.Name)
if !common.IsHexAddress(ha) {
return fmt.Errorf("%s is not a valid fee recipient address", ha)
}
c.DefaultFeeRecipient = common.HexToAddress(ha)
params.OverrideBeaconConfig(c)
return nil
}

View File

@@ -93,20 +93,9 @@ func TestConfigureExecutionSetting(t *testing.T) {
set.String(flags.FeeRecipient.Name, "", "")
require.NoError(t, set.Set(flags.FeeRecipient.Name, "0xB"))
cliCtx := cli.NewContext(&app, set, nil)
err := configureExecutionSetting(cliCtx)
require.ErrorContains(t, "0xB is not a valid fee recipient address", err)
require.NoError(t, set.Set(flags.FeeRecipient.Name, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
cliCtx = cli.NewContext(&app, set, nil)
err = configureExecutionSetting(cliCtx)
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
require.NoError(t, set.Set(flags.FeeRecipient.Name, "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
cliCtx = cli.NewContext(&app, set, nil)
err = configureExecutionSetting(cliCtx)
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
configureExecutionSetting(cliCtx)
assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().FeeRecipient)
}
func TestConfigureNetwork(t *testing.T) {

View File

@@ -120,9 +120,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
configureEth1Config(cliCtx)
configureNetwork(cliCtx)
configureInteropConfig(cliCtx)
if err := configureExecutionSetting(cliCtx); err != nil {
return nil, err
}
configureExecutionSetting(cliCtx)
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()

View File

@@ -104,8 +104,8 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
span.AddAttributes(
trace.BoolAttribute("hasPeer", hasPeer),
trace.Int64Attribute("slot", int64(att.Data.Slot)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("subnet", int64(subnet)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("slot", int64(att.Data.Slot)),
trace.Int64Attribute("subnet", int64(subnet)),
)
if !hasPeer {
@@ -160,8 +160,8 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
span.AddAttributes(
trace.BoolAttribute("hasPeer", hasPeer),
trace.Int64Attribute("slot", int64(sMsg.Slot)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("subnet", int64(subnet)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("slot", int64(sMsg.Slot)),
trace.Int64Attribute("subnet", int64(subnet)),
)
if !hasPeer {
@@ -213,9 +213,7 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
if span.IsRecordingEvents() {
id := hash.FastSum64(buf.Bytes())
messageLen := int64(buf.Len())
// lint:ignore uintcast -- It's safe to do this for tracing.
iid := int64(id)
span.AddMessageSendEvent(iid, messageLen /*uncompressed*/, messageLen /*compressed*/)
span.AddMessageSendEvent(int64(id), messageLen /*uncompressed*/, messageLen /*compressed*/)
}
if err := s.PublishToTopic(ctx, topic+s.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil {
err := errors.Wrap(err, "could not publish message")

View File

@@ -14,7 +14,6 @@ go_library(
],
deps = [
"//config/params:go_default_library",
"//math:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_golang_snappy//:go_default_library",

View File

@@ -3,6 +3,7 @@ package encoder
import (
"fmt"
"io"
"math"
"sync"
fastssz "github.com/ferranbt/fastssz"
@@ -10,7 +11,6 @@ import (
"github.com/golang/snappy"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/math"
)
var _ NetworkEncoding = (*SszNetworkEncoder)(nil)
@@ -145,11 +145,11 @@ func (_ SszNetworkEncoder) ProtocolSuffix() string {
// MaxLength specifies the maximum possible length of an encoded
// chunk of data.
func (_ SszNetworkEncoder) MaxLength(length uint64) (int, error) {
il, err := math.Int(length)
if err != nil {
return 0, errors.Wrap(err, "invalid length provided")
// Defensive check to prevent potential issues when casting to int64.
if length > math.MaxInt64 {
return 0, errors.Errorf("invalid length provided: %d", length)
}
maxLen := snappy.MaxEncodedLen(il)
maxLen := snappy.MaxEncodedLen(int(length))
if maxLen < 0 {
return 0, errors.Errorf("max encoded length is negative: %d", maxLen)
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/math"
"github.com/prysmaticlabs/prysm/network/forks"
)
@@ -73,8 +72,8 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsub_pb.Message) string {
// the topic byte string, and the raw message data: i.e. SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20].
func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
topic := *pmsg.Topic
topicLen := len(topic)
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(uint64(topicLen)) // topicLen cannot be negative
topicLen := uint64(len(topic))
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(topicLen)
// beyond Bellatrix epoch, allow 10 Mib gossip data size
gossipPubSubSize := params.BeaconNetworkConfig().GossipMaxSize
@@ -84,25 +83,7 @@ func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
decodedData, err := encoder.DecodeSnappy(pmsg.Data, gossipPubSubSize)
if err != nil {
totalLength, err := math.AddInt(
len(params.BeaconNetworkConfig().MessageDomainValidSnappy),
len(topicLenBytes),
topicLen,
len(pmsg.Data),
)
if err != nil {
log.WithError(err).Error("Failed to sum lengths of message domain and topic")
// should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
}
if uint64(totalLength) > gossipPubSubSize {
// this should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
}
totalLength := len(params.BeaconNetworkConfig().MessageDomainInvalidSnappy) + len(topicLenBytes) + int(topicLen) + len(pmsg.Data)
combinedData := make([]byte, 0, totalLength)
combinedData = append(combinedData, params.BeaconNetworkConfig().MessageDomainInvalidSnappy[:]...)
combinedData = append(combinedData, topicLenBytes...)
@@ -111,19 +92,7 @@ func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
h := hash.Hash(combinedData)
return string(h[:20])
}
totalLength, err := math.AddInt(
len(params.BeaconNetworkConfig().MessageDomainValidSnappy),
len(topicLenBytes),
topicLen,
len(decodedData),
)
if err != nil {
log.WithError(err).Error("Failed to sum lengths of message domain and topic")
// should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
}
totalLength := len(params.BeaconNetworkConfig().MessageDomainValidSnappy) + len(topicLenBytes) + int(topicLen) + len(decodedData)
combinedData := make([]byte, 0, totalLength)
combinedData = append(combinedData, params.BeaconNetworkConfig().MessageDomainValidSnappy[:]...)
combinedData = append(combinedData, topicLenBytes...)

View File

@@ -2,10 +2,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"status.go",
],
srcs = ["status.go"],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
@@ -14,7 +11,6 @@ go_library(
"//config/features:go_default_library",
"//config/params:go_default_library",
"//crypto/rand:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
"//time:go_default_library",
@@ -26,7 +22,6 @@ go_library(
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,5 +0,0 @@
package peers
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "peers")

View File

@@ -40,7 +40,6 @@ import (
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/rand"
pmath "github.com/prysmaticlabs/prysm/math"
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/metadata"
prysmTime "github.com/prysmaticlabs/prysm/time"
@@ -750,12 +749,12 @@ func (p *Status) PeersToPrune() []peer.ID {
return p.deprecatedPeersToPrune()
}
connLimit := p.ConnectedPeerLimit()
inBoundLimit := uint64(p.InboundLimit())
inBoundLimit := p.InboundLimit()
activePeers := p.Active()
numInboundPeers := uint64(len(p.InboundConnected()))
numInboundPeers := len(p.InboundConnected())
// Exit early if we are still below our max
// limit.
if uint64(len(activePeers)) <= connLimit {
if len(activePeers) <= int(connLimit) {
return []peer.ID{}
}
p.store.Lock()
@@ -785,15 +784,10 @@ func (p *Status) PeersToPrune() []peer.ID {
// Determine amount of peers to prune using our
// max connection limit.
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
if err != nil {
// This should never happen.
log.WithError(err).Error("Failed to determine amount of peers to prune")
return []peer.ID{}
}
amountToPrune := len(activePeers) - int(connLimit)
// Also check for inbound peers above our limit.
excessInbound := uint64(0)
excessInbound := 0
if numInboundPeers > inBoundLimit {
excessInbound = numInboundPeers - inBoundLimit
}
@@ -802,7 +796,7 @@ func (p *Status) PeersToPrune() []peer.ID {
if excessInbound > amountToPrune {
amountToPrune = excessInbound
}
if amountToPrune < uint64(len(peersToPrune)) {
if amountToPrune < len(peersToPrune) {
peersToPrune = peersToPrune[:amountToPrune]
}
ids := make([]peer.ID, 0, len(peersToPrune))
@@ -821,7 +815,7 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
numInboundPeers := len(p.InboundConnected())
// Exit early if we are still below our max
// limit.
if uint64(len(activePeers)) <= connLimit {
if len(activePeers) <= int(connLimit) {
return []peer.ID{}
}
p.store.Lock()
@@ -851,23 +845,18 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
// Determine amount of peers to prune using our
// max connection limit.
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
if err != nil {
// This should never happen
log.WithError(err).Error("Failed to determine amount of peers to prune")
return []peer.ID{}
}
amountToPrune := len(activePeers) - int(connLimit)
// Also check for inbound peers above our limit.
excessInbound := uint64(0)
excessInbound := 0
if numInboundPeers > inBoundLimit {
excessInbound = uint64(numInboundPeers - inBoundLimit)
excessInbound = numInboundPeers - inBoundLimit
}
// Prune the largest amount between excess peers and
// excess inbound peers.
if excessInbound > amountToPrune {
amountToPrune = excessInbound
}
if amountToPrune < uint64(len(peersToPrune)) {
if amountToPrune < len(peersToPrune) {
peersToPrune = peersToPrune[:amountToPrune]
}
ids := make([]peer.ID, 0, len(peersToPrune))

View File

@@ -40,7 +40,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
defer span.End()
span.AddAttributes(trace.Int64Attribute("index", int64(index))) // lint:ignore uintcast -- It's safe to do this for tracing.
span.AddAttributes(trace.Int64Attribute("index", int64(index)))
if s.dv5Listener == nil {
// return if discovery isn't set
@@ -138,7 +138,7 @@ func (s *Service) hasPeerWithSubnet(topic string) bool {
// In the event peer threshold is lower, we will choose the lower
// threshold.
minPeers := mathutil.Min(1, uint64(flags.Get().MinimumPeersPerSubnet))
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers) // lint:ignore uintcast -- Min peers can be safely cast to int.
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers)
}
// Updates the service's discv5 listener record's attestation subnet
@@ -195,7 +195,6 @@ func attSubnets(record *enr.Record) ([]uint64, error) {
if err != nil {
return nil, err
}
// lint:ignore uintcast -- subnet count can be safely cast to int.
if len(bitV) != byteCount(int(attestationSubnetCount)) {
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
}
@@ -215,7 +214,6 @@ func syncSubnets(record *enr.Record) ([]uint64, error) {
if err != nil {
return nil, err
}
// lint:ignore uintcast -- subnet count can be safely cast to int.
if len(bitV) != byteCount(int(syncCommsSubnetCount)) {
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
}

View File

@@ -122,7 +122,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
}
// Initialize a pointer to eth1 chain's history to start our search
// from.
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
cursorNum := big.NewInt(int64(latestBlkHeight))
cursorTime := latestBlkTime
numOfBlocks := uint64(0)
@@ -168,9 +168,9 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
}
if cursorTime > time {
return s.findLessTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
return s.findLessTargetEth1Block(ctx, big.NewInt(int64(estimatedBlk)), time)
}
return s.findMoreTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
return s.findMoreTargetEth1Block(ctx, big.NewInt(int64(estimatedBlk)), time)
}
// Performs a search to find a target eth1 block which is earlier than or equal to the
@@ -214,7 +214,7 @@ func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int
}
func (s *Service) retrieveHeaderInfo(ctx context.Context, bNum uint64) (*types.HeaderInfo, error) {
bn := big.NewInt(0).SetUint64(bNum)
bn := big.NewInt(int64(bNum))
exists, info, err := s.headerCache.HeaderInfoByHeight(bn)
if err != nil {
return nil, err

View File

@@ -596,7 +596,7 @@ func fixtures() map[string]interface{} {
foo := bytesutil.ToBytes32([]byte("foo"))
bar := bytesutil.PadTo([]byte("bar"), 20)
baz := bytesutil.PadTo([]byte("baz"), 256)
baseFeePerGas := big.NewInt(12345)
baseFeePerGas := big.NewInt(6)
executionPayloadFixture := &pb.ExecutionPayload{
ParentHash: foo[:],
FeeRecipient: bar,

View File

@@ -109,7 +109,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
// This can happen sometimes when we receive the same log twice from the
// ETH1.0 network, and prevents us from updating our trie
// with the same log twice, causing an inconsistent state root.
index := int64(binary.LittleEndian.Uint64(merkleTreeIndex)) // lint:ignore uintcast -- MerkleTreeIndex should not exceed int64 in your lifetime.
index := int64(binary.LittleEndian.Uint64(merkleTreeIndex))
if index <= s.lastReceivedMerkleIndex {
return nil
}
@@ -211,7 +211,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
s.chainStartData.Chainstarted = true
s.chainStartData.GenesisBlock = blockNumber.Uint64()
chainStartTime := time.Unix(int64(genesisTime), 0) // lint:ignore uintcast -- Genesis time wont exceed int64 in your lifetime.
chainStartTime := time.Unix(int64(genesisTime), 0)
for i := range s.chainStartData.ChainstartDeposits {
proof, err := s.depositTrie.MerkleProof(i)
@@ -303,8 +303,8 @@ func (s *Service) processPastLogs(ctx context.Context) error {
Addresses: []common.Address{
s.cfg.depositContractAddr,
},
FromBlock: big.NewInt(0).SetUint64(start),
ToBlock: big.NewInt(0).SetUint64(end),
FromBlock: big.NewInt(int64(start)),
ToBlock: big.NewInt(int64(end)),
}
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
// only change the end block if the remaining logs are below the required log limit.
@@ -312,7 +312,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
withinLimit := remainingLogs < depositlogRequestLimit
aboveFollowHeight := end >= latestFollowHeight
if withinLimit && aboveFollowHeight {
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
query.ToBlock = big.NewInt(int64(latestFollowHeight))
end = latestFollowHeight
}
logs, err := s.httpLogger.FilterLogs(ctx, query)
@@ -391,7 +391,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
}
}
if fState != nil && !fState.IsNil() && fState.Eth1DepositIndex() > 0 {
s.cfg.depositCache.PrunePendingDeposits(ctx, int64(fState.Eth1DepositIndex())) // lint:ignore uintcast -- Deposit index should not exceed int64 in your lifetime.
s.cfg.depositCache.PrunePendingDeposits(ctx, int64(fState.Eth1DepositIndex()))
}
return nil
}
@@ -413,11 +413,11 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
}
for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ {
// Cache eth1 block header here.
_, err := s.BlockHashByHeight(ctx, big.NewInt(0).SetUint64(i))
_, err := s.BlockHashByHeight(ctx, big.NewInt(int64(i)))
if err != nil {
return err
}
err = s.ProcessETH1Block(ctx, big.NewInt(0).SetUint64(i))
err = s.ProcessETH1Block(ctx, big.NewInt(int64(i)))
if err != nil {
return err
}

View File

@@ -613,10 +613,8 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
// accumulates. we finalize them here before we are ready to receive a block.
// Otherwise, the first few blocks will be slower to compute as we will
// hold the lock and be busy finalizing the deposits.
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex)) // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex))
// Deposit proofs are only used during state transition and can be safely removed to save space.
// lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
if err = s.cfg.depositCache.PruneProofs(ctx, int64(currIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
@@ -664,7 +662,7 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes
err := error(nil)
elems = append(elems, gethRPC.BatchElem{
Method: "eth_getBlockByNumber",
Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(i)), false},
Args: []interface{}{hexutil.EncodeBig(big.NewInt(int64(i))), false},
Result: header,
Error: err,
})
@@ -1090,7 +1088,7 @@ func dedupEndpoints(endpoints []string) []string {
func eth1HeadIsBehind(timestamp uint64) bool {
timeout := prysmTime.Now().Add(-eth1Threshold)
// check that web3 client is syncing
return time.Unix(int64(timestamp), 0).Before(timeout) // lint:ignore uintcast -- timestamp will not exceed int64 in your lifetime.
return time.Unix(int64(timestamp), 0).Before(timeout)
}
func (s *Service) primaryConnected() bool {

View File

@@ -17,29 +17,6 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
// https://ethereum.github.io/beacon-apis/#/Validator/prepareBeaconProposer expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct.
func wrapFeeRecipientsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*feeRecipientsRequestJSON); !ok {
return true, nil
}
recipients := make([]*feeRecipientJson, 0)
if err := json.NewDecoder(req.Body).Decode(&recipients); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &feeRecipientsRequestJSON{Recipients: recipients}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = ioutil.NopCloser(bytes.NewReader(b))
return true, nil
}
// https://ethereum.github.io/beacon-apis/#/Beacon/submitPoolAttestations expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapAttestationsArray(
@@ -204,14 +181,9 @@ type altairPublishBlockRequestJson struct {
Signature string `json:"signature" hex:"true"`
}
type bellatrixPublishBlockRequestJson struct {
BellatrixBlock *beaconBlockBellatrixJson `json:"bellatrix_block"`
Signature string `json:"signature" hex:"true"`
}
// setInitialPublishBlockPostRequest is triggered before we deserialize the request JSON into a struct.
// We don't know which version of the block got posted, but we can determine it from the slot.
// We know that blocks of all versions have a Message field with a Slot field,
// We know that both Phase 0 and Altair blocks have a Message field with a Slot field,
// so we deserialize the request into a struct s, which has the right fields, to obtain the slot.
// Once we know the slot, we can determine what the PostRequest field of the endpoint should be, and we set it appropriately.
func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
@@ -235,20 +207,17 @@ func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "slot is not an unsigned integer")
}
currentEpoch := slots.ToEpoch(types.Slot(slot))
if currentEpoch < params.BeaconConfig().AltairForkEpoch {
if slots.ToEpoch(types.Slot(slot)) < params.BeaconConfig().AltairForkEpoch {
endpoint.PostRequest = &signedBeaconBlockContainerJson{}
} else if currentEpoch < params.BeaconConfig().BellatrixForkEpoch {
endpoint.PostRequest = &signedBeaconBlockAltairContainerJson{}
} else {
endpoint.PostRequest = &signedBeaconBlockBellatrixContainerJson{}
endpoint.PostRequest = &signedBeaconBlockAltairContainerJson{}
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
return true, nil
}
// In preparePublishedBlock we transform the PostRequest.
// gRPC expects an XXX_block field in the JSON object, but we have a message field at this point.
// gRPC expects either a phase0_block or an altair_block field in the JSON object, but we have a message field at this point.
// We do a simple conversion depending on the type of endpoint.PostRequest
// (which was filled out previously in setInitialPublishBlockPostRequest).
func preparePublishedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWriter, _ *http.Request) apimiddleware.ErrorJson {
@@ -270,15 +239,6 @@ func preparePublishedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWrit
endpoint.PostRequest = actualPostReq
return nil
}
if block, ok := endpoint.PostRequest.(*signedBeaconBlockBellatrixContainerJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &bellatrixPublishBlockRequestJson{
BellatrixBlock: block.Message,
Signature: block.Signature,
}
endpoint.PostRequest = actualPostReq
return nil
}
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
}
@@ -435,11 +395,6 @@ type altairProduceBlockResponseJson struct {
Data *beaconBlockAltairJson `json:"data"`
}
type bellatrixProduceBlockResponseJson struct {
Version string `json:"version"`
Data *beaconBlockBellatrixJson `json:"data"`
}
func serializeProducedV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
respContainer, ok := response.(*produceBlockResponseV2Json)
if !ok {
@@ -457,11 +412,6 @@ func serializeProducedV2Block(response interface{}) (apimiddleware.RunDefault, [
Version: respContainer.Version,
Data: respContainer.Data.AltairBlock,
}
} else if strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())) {
actualRespContainer = &bellatrixProduceBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.BellatrixBlock,
}
} else {
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
}

View File

@@ -363,11 +363,6 @@ func TestWrapSignedContributionAndProofsArray(t *testing.T) {
}
func TestSetInitialPublishBlockPostRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = params.BeaconConfig().AltairForkEpoch + 1
params.OverrideBeaconConfig(cfg)
endpoint := &apimiddleware.Endpoint{}
s := struct {
Message struct {
@@ -402,21 +397,6 @@ func TestSetInitialPublishBlockPostRequest(t *testing.T) {
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(signedBeaconBlockAltairContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
t.Run("Bellatrix", func(t *testing.T) {
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
require.NoError(t, err)
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
j, err := json.Marshal(s)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(j)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(signedBeaconBlockBellatrixContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
}
func TestPreparePublishedBlock(t *testing.T) {
@@ -448,20 +428,6 @@ func TestPreparePublishedBlock(t *testing.T) {
assert.Equal(t, true, ok)
})
t.Run("Bellatrix", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &signedBeaconBlockBellatrixContainerJson{
Message: &beaconBlockBellatrixJson{
Body: &beaconBlockBodyBellatrixJson{},
},
},
}
errJson := preparePublishedBlock(endpoint, nil, nil)
require.Equal(t, true, errJson == nil)
_, ok := endpoint.PostRequest.(*bellatrixPublishBlockRequestJson)
assert.Equal(t, true, ok)
})
t.Run("unsupported block type", func(t *testing.T) {
errJson := preparePublishedBlock(&apimiddleware.Endpoint{}, nil, nil)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block type"))
@@ -505,7 +471,8 @@ func TestSerializeV2Block(t *testing.T) {
StateRoot: "root",
Body: &beaconBlockBodyJson{},
},
Signature: "sig",
AltairBlock: nil,
Signature: "sig",
},
}
runDefault, j, errJson := serializeV2Block(response)
@@ -528,6 +495,7 @@ func TestSerializeV2Block(t *testing.T) {
response := &blockV2ResponseJson{
Version: ethpbv2.Version_ALTAIR.String(),
Data: &signedBeaconBlockContainerV2Json{
Phase0Block: nil,
AltairBlock: &beaconBlockAltairJson{
Slot: "1",
ProposerIndex: "1",
@@ -554,36 +522,6 @@ func TestSerializeV2Block(t *testing.T) {
require.NotNil(t, beaconBlock.Body)
})
t.Run("Bellatrix", func(t *testing.T) {
response := &blockV2ResponseJson{
Version: ethpbv2.Version_BELLATRIX.String(),
Data: &signedBeaconBlockContainerV2Json{
BellatrixBlock: &beaconBlockBellatrixJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &beaconBlockBodyBellatrixJson{},
},
Signature: "sig",
},
}
runDefault, j, errJson := serializeV2Block(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &bellatrixBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data.Message)
beaconBlock := resp.Data.Message
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("incorrect response type", func(t *testing.T) {
response := &types.Empty{}
runDefault, j, errJson := serializeV2Block(response)
@@ -671,7 +609,7 @@ func TestSerializeV2State(t *testing.T) {
})
}
func TestSerializeProducedV2Block(t *testing.T) {
func TestSerializeProduceV2Block(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
response := &produceBlockResponseV2Json{
Version: ethpbv2.Version_PHASE0.String(),
@@ -683,6 +621,7 @@ func TestSerializeProducedV2Block(t *testing.T) {
StateRoot: "root",
Body: &beaconBlockBodyJson{},
},
AltairBlock: nil,
},
}
runDefault, j, errJson := serializeProducedV2Block(response)
@@ -705,6 +644,7 @@ func TestSerializeProducedV2Block(t *testing.T) {
response := &produceBlockResponseV2Json{
Version: ethpbv2.Version_ALTAIR.String(),
Data: &beaconBlockContainerV2Json{
Phase0Block: nil,
AltairBlock: &beaconBlockAltairJson{
Slot: "1",
ProposerIndex: "1",
@@ -730,35 +670,6 @@ func TestSerializeProducedV2Block(t *testing.T) {
require.NotNil(t, beaconBlock.Body)
})
t.Run("Bellatrix", func(t *testing.T) {
response := &produceBlockResponseV2Json{
Version: ethpbv2.Version_BELLATRIX.String(),
Data: &beaconBlockContainerV2Json{
BellatrixBlock: &beaconBlockBellatrixJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &beaconBlockBodyBellatrixJson{},
},
},
}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &bellatrixProduceBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data)
beaconBlock := resp.Data
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("incorrect response type", func(t *testing.T) {
response := &types.Empty{}
runDefault, j, errJson := serializeProducedV2Block(response)

View File

@@ -63,7 +63,6 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/validator/aggregate_and_proofs",
"/eth/v1/validator/sync_committee_contribution",
"/eth/v1/validator/contribution_and_proofs",
"/eth/v1/validator/prepare_beacon_proposer",
}
}
@@ -239,11 +238,6 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSignedContributionAndProofsArray,
}
case "/eth/v1/validator/prepare_beacon_proposer":
endpoint.PostRequest = &feeRecipientsRequestJSON{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapFeeRecipientsArray,
}
default:
return nil, errors.New("invalid path")
}

View File

@@ -19,11 +19,6 @@ type genesisResponse_GenesisJson struct {
GenesisForkVersion string `json:"genesis_fork_version" hex:"true"`
}
// feeRecipientsRequestJson is used in /validator/prepare_beacon_proposers API endpoint.
type feeRecipientsRequestJSON struct {
Recipients []*feeRecipientJson `json:"recipients"`
}
// stateRootResponseJson is used in /beacon/states/{state_id}/root API endpoint.
type stateRootResponseJson struct {
Data *stateRootResponse_StateRootJson `json:"data"`
@@ -476,11 +471,6 @@ type indexedAttestationJson struct {
Signature string `json:"signature" hex:"true"`
}
type feeRecipientJson struct {
ValidatorIndex uint64 `json:"validator_index"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
}
type attestationJson struct {
AggregationBits string `json:"aggregation_bits" hex:"true"`
Data *attestationDataJson `json:"data"`

View File

@@ -100,7 +100,7 @@ func TestGetSpec(t *testing.T) {
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
config.TerminalBlockHashActivationEpoch = 72
config.TerminalTotalDifficulty = "73"
config.DefaultFeeRecipient = common.HexToAddress("DefaultFeeRecipient")
config.FeeRecipient = common.HexToAddress("FeeRecipient")
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
@@ -329,8 +329,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(v))
case "TERMINAL_TOTAL_DIFFICULTY":
assert.Equal(t, "73", v)
case "DefaultFeeRecipient":
assert.Equal(t, common.HexToAddress("DefaultFeeRecipient"), v)
case "FeeRecipient":
assert.Equal(t, common.HexToAddress("FeeRecipient"), v)
case "PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":
assert.Equal(t, "3", v)
case "MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":

View File

@@ -61,20 +61,6 @@ func TestGetBeaconStateV2(t *testing.T) {
assert.NotNil(t, resp)
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
})
t.Run("Bellatrix", func(t *testing.T) {
fakeState, _ := util.DeterministicGenesisStateBellatrix(t, 1)
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
})
}
func TestGetBeaconStateSSZ(t *testing.T) {
@@ -133,24 +119,6 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
require.NoError(t, err)
assert.NotNil(t, resp)
assert.DeepEqual(t, sszState, resp.Data)
})
t.Run("Bellatrix", func(t *testing.T) {
fakeState, _ := util.DeterministicGenesisStateBellatrix(t, 1)
sszState, err := fakeState.MarshalSSZ()
require.NoError(t, err)
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
}
resp, err := server.GetBeaconStateSSZV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.DeepEqual(t, sszState, resp.Data)
})
}

View File

@@ -30,8 +30,6 @@ go_library(
"//proto/migration:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
@@ -63,7 +61,6 @@ go_test(
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1/mocks:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
@@ -74,7 +71,6 @@ go_test(
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",
@@ -84,7 +80,6 @@ go_test(
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",

View File

@@ -3,13 +3,10 @@ package validator
import (
"bytes"
"context"
"fmt"
"sort"
"strconv"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
@@ -255,11 +252,6 @@ func (vs *Server) ProduceBlock(ctx context.Context, req *ethpbv1.ProduceBlockReq
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlock")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
block, err := vs.v1BeaconBlock(ctx, req)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block: %v", err)
@@ -271,9 +263,18 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
epoch := slots.ToEpoch(req.Slot)
if epoch < params.BeaconConfig().AltairForkEpoch {
block, err := vs.v1BeaconBlock(ctx, req)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlockResponseV2{
Version: ethpbv2.Version_PHASE0,
Data: &ethpbv2.BeaconBlockContainerV2{
Block: &ethpbv2.BeaconBlockContainerV2_Phase0Block{Phase0Block: block},
},
}, nil
}
v1alpha1req := &ethpbalpha.BlockRequest{
@@ -286,71 +287,27 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
// We simply return err because it's already of a gRPC error type.
return nil, err
}
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
if ok {
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlockResponseV2{
Version: ethpbv2.Version_PHASE0,
Data: &ethpbv2.BeaconBlockContainerV2{
Block: &ethpbv2.BeaconBlockContainerV2_Phase0Block{Phase0Block: block},
},
}, nil
}
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
if ok {
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlockResponseV2{
Version: ethpbv2.Version_ALTAIR,
Data: &ethpbv2.BeaconBlockContainerV2{
Block: &ethpbv2.BeaconBlockContainerV2_AltairBlock{AltairBlock: block},
},
}, nil
if !ok {
return nil, status.Errorf(codes.Internal, "Could not get Altair block: %v", err)
}
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
if ok {
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Bellatrix)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlockResponseV2{
Version: ethpbv2.Version_BELLATRIX,
Data: &ethpbv2.BeaconBlockContainerV2{
Block: &ethpbv2.BeaconBlockContainerV2_BellatrixBlock{BellatrixBlock: block},
},
}, nil
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
return &ethpbv2.ProduceBlockResponseV2{
Version: ethpbv2.Version_ALTAIR,
Data: &ethpbv2.BeaconBlockContainerV2{
Block: &ethpbv2.BeaconBlockContainerV2_AltairBlock{AltairBlock: block},
},
}, nil
}
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
// PrepareBeaconProposer --
func (vs *Server) PrepareBeaconProposer(
ctx context.Context, request *ethpbv1.PrepareBeaconProposerRequest,
_ context.Context, _ *ethpbv1.PrepareBeaconProposerRequest,
) (*emptypb.Empty, error) {
_, span := trace.StartSpan(ctx, "validator.PrepareBeaconProposer")
defer span.End()
var feeRecipients []common.Address
var validatorIndices []types.ValidatorIndex
for _, recipientContainer := range request.Recipients {
recipient := hexutil.Encode(recipientContainer.FeeRecipient)
if !common.IsHexAddress(recipient) {
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid fee recipient address: %v", recipient))
}
feeRecipients = append(feeRecipients, common.BytesToAddress(recipientContainer.FeeRecipient))
validatorIndices = append(validatorIndices, recipientContainer.ValidatorIndex)
}
if err := vs.V1Alpha1Server.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, validatorIndices, feeRecipients); err != nil {
return nil, status.Errorf(codes.Internal, "Could not save fee recipients: %v", err)
}
log.WithFields(log.Fields{
"validatorIndices": validatorIndices,
}).Info("Updated fee recipient addresses for validator indices")
return &emptypb.Empty{}, nil
return &emptypb.Empty{}, status.Error(codes.Unimplemented, "Unimplemented")
}
// ProduceAttestationData requests that the beacon node produces attestation data for

View File

@@ -7,7 +7,6 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
@@ -25,7 +24,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
p2pmock "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1/mocks"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
v1alpha1validator "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/validator"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/testutil"
@@ -36,7 +34,6 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
"github.com/prysmaticlabs/prysm/proto/migration"
@@ -613,7 +610,6 @@ func TestProduceBlock(t *testing.T) {
}
v1Server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
V1Alpha1Server: v1Alpha1Server,
}
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
@@ -645,17 +641,6 @@ func TestProduceBlock(t *testing.T) {
assert.DeepEqual(t, expectedAttSlashings, resp.Data.Body.AttesterSlashings)
}
func TestProduceBlock_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
}
_, err := vs.ProduceBlock(context.Background(), &ethpbv1.ProduceBlockRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
func TestProduceBlockV2(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
db := dbutil.SetupDB(t)
@@ -716,7 +701,6 @@ func TestProduceBlockV2(t *testing.T) {
v1Server := &Server{
V1Alpha1Server: v1Alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
require.NoError(t, err)
@@ -850,7 +834,6 @@ func TestProduceBlockV2(t *testing.T) {
v1Server := &Server{
V1Alpha1Server: v1Alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
require.NoError(t, err)
@@ -891,166 +874,6 @@ func TestProduceBlockV2(t *testing.T) {
assert.DeepEqual(t, expectedBits, blk.Body.SyncAggregate.SyncCommitteeBits)
assert.DeepEqual(t, aggregatedSig, blk.Body.SyncAggregate.SyncCommitteeSignature)
})
t.Run("Bellatrix", func(t *testing.T) {
db := dbutil.SetupDB(t)
ctx := context.Background()
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.AltairForkEpoch = types.Epoch(0)
bc.BellatrixForkEpoch = types.Epoch(1)
params.OverrideBeaconConfig(bc)
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().SyncCommitteeSize)
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, beaconState.SetNextSyncCommittee(syncCommittee))
stateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
genesisBlock := util.NewBeaconBlockBellatrix()
genesisBlock.Block.StateRoot = stateRoot[:]
wrappedBellatrixBlock, err := wrapper.WrappedBellatrixSignedBeaconBlock(genesisBlock)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wrappedBellatrixBlock))
parentRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
v1Alpha1Server := &v1alpha1validator.Server{
ExecutionEngineCaller: &mocks.EngineClient{
ExecutionBlock: &enginev1.ExecutionBlock{
TotalDifficulty: "0x1",
},
},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
MockEth1Votes: true,
AttPool: attestations.NewPool(),
SlashingsPool: slashings.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateGen: stategen.New(db),
SyncCommitteePool: synccommittee.NewStore(),
}
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
beaconState,
privKeys[i],
i,
)
require.NoError(t, err)
proposerSlashings[i] = proposerSlashing
err = v1Alpha1Server.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
require.NoError(t, err)
}
attSlashings := make([]*ethpbalpha.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
beaconState,
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
)
require.NoError(t, err)
attSlashings[i] = attesterSlashing
err = v1Alpha1Server.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
require.NoError(t, err)
}
aggregationBits := bitfield.NewBitvector128()
for i := range aggregationBits {
aggregationBits[i] = 0xAA
}
syncCommitteeIndices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
require.NoError(t, err)
sigs := make([]bls.Signature, 0, len(syncCommitteeIndices))
for i, indice := range syncCommitteeIndices {
if aggregationBits.BitAt(uint64(i)) {
b := p2pType.SSZBytes(parentRoot[:])
sb, err := signing.ComputeDomainAndSign(beaconState, coreTime.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs = append(sigs, sig)
}
}
aggregatedSig := bls.AggregateSignatures(sigs).Marshal()
contribution := &ethpbalpha.SyncCommitteeContribution{
Slot: params.BeaconConfig().SlotsPerEpoch,
BlockRoot: parentRoot[:],
SubcommitteeIndex: 0,
AggregationBits: aggregationBits,
Signature: aggregatedSig,
}
require.NoError(t, v1Alpha1Server.SyncCommitteePool.SaveSyncCommitteeContribution(contribution))
v1Server := &Server{
V1Alpha1Server: v1Alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
randaoReveal, err := util.RandaoReveal(beaconState, 1, privKeys)
require.NoError(t, err)
graffiti := bytesutil.ToBytes32([]byte("eth2"))
req := &ethpbv1.ProduceBlockRequest{
Slot: params.BeaconConfig().SlotsPerEpoch + 1,
RandaoReveal: randaoReveal,
Graffiti: graffiti[:],
}
resp, err := v1Server.ProduceBlockV2(ctx, req)
require.NoError(t, err)
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
containerBlock, ok := resp.Data.Block.(*ethpbv2.BeaconBlockContainerV2_BellatrixBlock)
require.Equal(t, true, ok)
blk := containerBlock.BellatrixBlock
assert.Equal(t, req.Slot, blk.Slot, "Expected block to have slot of 1")
assert.DeepEqual(t, parentRoot[:], blk.ParentRoot, "Expected block to have correct parent root")
assert.DeepEqual(t, randaoReveal, blk.Body.RandaoReveal, "Expected block to have correct randao reveal")
assert.DeepEqual(t, req.Graffiti, blk.Body.Graffiti, "Expected block to have correct graffiti")
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(blk.Body.ProposerSlashings)))
expectedPropSlashings := make([]*ethpbv1.ProposerSlashing, len(proposerSlashings))
for i, slash := range proposerSlashings {
expectedPropSlashings[i] = migration.V1Alpha1ProposerSlashingToV1(slash)
}
assert.DeepEqual(t, expectedPropSlashings, blk.Body.ProposerSlashings)
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(blk.Body.AttesterSlashings)))
expectedAttSlashings := make([]*ethpbv1.AttesterSlashing, len(attSlashings))
for i, slash := range attSlashings {
expectedAttSlashings[i] = migration.V1Alpha1AttSlashingToV1(slash)
}
assert.DeepEqual(t, expectedAttSlashings, blk.Body.AttesterSlashings)
expectedBits := bitfield.NewBitvector512()
for i := 0; i <= 15; i++ {
expectedBits[i] = 0xAA
}
assert.DeepEqual(t, expectedBits, blk.Body.SyncAggregate.SyncCommitteeBits)
assert.DeepEqual(t, aggregatedSig, blk.Body.SyncAggregate.SyncCommitteeSignature)
})
}
func TestProduceBlockV2_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
}
_, err := vs.ProduceBlockV2(context.Background(), &ethpbv1.ProduceBlockRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
func TestProduceAttestationData(t *testing.T) {
@@ -2012,65 +1835,3 @@ func TestSubmitContributionAndProofs(t *testing.T) {
require.DeepEqual(t, expectedContributions, savedMsgs)
})
}
func TestPrepareBeaconProposer(t *testing.T) {
type args struct {
request *ethpbv1.PrepareBeaconProposerRequest
}
tests := []struct {
name string
args args
wantErr string
}{
{
name: "Happy Path",
args: args{
request: &ethpbv1.PrepareBeaconProposerRequest{
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
{
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
ValidatorIndex: 1,
},
},
},
},
wantErr: "",
},
{
name: "invalid fee recipient length",
args: args{
request: &ethpbv1.PrepareBeaconProposerRequest{
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
{
FeeRecipient: make([]byte, fieldparams.BLSPubkeyLength),
ValidatorIndex: 1,
},
},
},
},
wantErr: "Invalid fee recipient address",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := dbutil.SetupDB(t)
ctx := context.Background()
v1Server := &v1alpha1validator.Server{
BeaconDB: db,
}
server := &Server{
V1Alpha1Server: v1Server,
}
_, err := server.PrepareBeaconProposer(ctx, tt.args.request)
if tt.wantErr != "" {
require.ErrorContains(t, tt.wantErr, err)
return
}
require.NoError(t, err)
address, err := server.V1Alpha1Server.BeaconDB.FeeRecipientByValidatorID(ctx, 1)
require.NoError(t, err)
require.Equal(t, common.BytesToAddress(tt.args.request.Recipients[0].FeeRecipient), address)
})
}
}

Some files were not shown because too many files have changed in this diff Show More