mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
317 Commits
applystate
...
fusaka-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0e3fecfc5b | ||
|
|
3cb64ae1ac | ||
|
|
9d8e00ebd4 | ||
|
|
06b350ce7c | ||
|
|
0ff0f48554 | ||
|
|
503f29ccdd | ||
|
|
f8abf0565f | ||
|
|
11a6af9bf9 | ||
|
|
6f8a654874 | ||
|
|
f0c01fdb4b | ||
|
|
9e014da0b9 | ||
|
|
a015ae6a29 | ||
|
|
d8fedacc26 | ||
|
|
3def16caaa | ||
|
|
4e5bfa9760 | ||
|
|
70ac53f991 | ||
|
|
6f5ff03b42 | ||
|
|
499d27b6ae | ||
|
|
56e8881bc1 | ||
|
|
78f8411ad2 | ||
|
|
457aa117f3 | ||
|
|
83943b5dd8 | ||
|
|
bc7e4f7751 | ||
|
|
02f8726e46 | ||
|
|
16b567f6af | ||
|
|
5c1d827335 | ||
|
|
68d7df0e4f | ||
|
|
d302b494df | ||
|
|
b3db1b6b74 | ||
|
|
288b33750d | ||
|
|
f4f48d6372 | ||
|
|
f2d57f0b5f | ||
|
|
7025e50a6c | ||
|
|
961ea05454 | ||
|
|
da5525096c | ||
|
|
2d2507b907 | ||
|
|
a701f07f3a | ||
|
|
f4bbe5ca40 | ||
|
|
66e4d5e816 | ||
|
|
4be8de2476 | ||
|
|
fac509a3e6 | ||
|
|
41f109aa5b | ||
|
|
cfd4ceb4dd | ||
|
|
b1ac8209b2 | ||
|
|
74c9586c66 | ||
|
|
f0ad3dfaeb | ||
|
|
2540196747 | ||
|
|
f133751cce | ||
|
|
bddcc158e4 | ||
|
|
df211c3384 | ||
|
|
bc7664321b | ||
|
|
89e78d7da3 | ||
|
|
97f416b3a7 | ||
|
|
e76ea84596 | ||
|
|
f10d6e8e16 | ||
|
|
1c1e0f38bb | ||
|
|
91eb43b595 | ||
|
|
121914d0d7 | ||
|
|
90710ec57d | ||
|
|
3dc65f991e | ||
|
|
4d9789401b | ||
|
|
e8625cd89d | ||
|
|
f72d59b004 | ||
|
|
667aaf1564 | ||
|
|
e020907d2a | ||
|
|
e25497be3e | ||
|
|
9927cea35a | ||
|
|
d4233471d2 | ||
|
|
d63ae69920 | ||
|
|
b9fd32dfff | ||
|
|
8897a26f84 | ||
|
|
559d02bf4d | ||
|
|
b2a26f2b62 | ||
|
|
09659010f8 | ||
|
|
62fec4d1f3 | ||
|
|
6a13ba9125 | ||
|
|
2a3876427f | ||
|
|
f8b88db1a4 | ||
|
|
589042df20 | ||
|
|
312b93e9b1 | ||
|
|
d12da8cbe0 | ||
|
|
f86f76e447 | ||
|
|
6087875da5 | ||
|
|
214f4a76fb | ||
|
|
f5a9394c77 | ||
|
|
c311e652eb | ||
|
|
6a5d78a331 | ||
|
|
4095da8568 | ||
|
|
a2fd30497e | ||
|
|
f1288a18ec | ||
|
|
a94561f8dc | ||
|
|
af875b78c9 | ||
|
|
543ebe857e | ||
|
|
e569df5ebc | ||
|
|
8c324cc491 | ||
|
|
265d84569c | ||
|
|
79b064a6cc | ||
|
|
182c18a7b2 | ||
|
|
8b9c161560 | ||
|
|
4a4532f3ba | ||
|
|
91b44360fc | ||
|
|
472c5da49e | ||
|
|
61207bd3ac | ||
|
|
a0060fa794 | ||
|
|
341c7abd7f | ||
|
|
3300866572 | ||
|
|
711984d942 | ||
|
|
9b626864f0 | ||
|
|
0b6fcd7d17 | ||
|
|
3a3bd3902c | ||
|
|
2c09bc65a4 | ||
|
|
ba860fd96b | ||
|
|
0d5a52d20d | ||
|
|
994565acdd | ||
|
|
e34313c752 | ||
|
|
fe2766e716 | ||
|
|
00204ffa6a | ||
|
|
f8d895a5ed | ||
|
|
9135d765e1 | ||
|
|
58b5aac201 | ||
|
|
58f08672c0 | ||
|
|
ec74bac725 | ||
|
|
eca87f29d1 | ||
|
|
99cd90f335 | ||
|
|
74aca49741 | ||
|
|
3dfd3d0416 | ||
|
|
00821c8f55 | ||
|
|
b20821dd8e | ||
|
|
e2f0b057b0 | ||
|
|
3d4e2c5568 | ||
|
|
fa744ff78f | ||
|
|
bb5807fd08 | ||
|
|
d6bbfff8b7 | ||
|
|
a8ce85f8de | ||
|
|
00bb3ff2b8 | ||
|
|
edab145001 | ||
|
|
7fd3902b75 | ||
|
|
6b6370bc59 | ||
|
|
17204ca817 | ||
|
|
5bbcfe5237 | ||
|
|
c1b99b74c7 | ||
|
|
f02955676b | ||
|
|
1dea6857d5 | ||
|
|
4b9e92bcd7 | ||
|
|
eafef8c7c8 | ||
|
|
b01d9005b8 | ||
|
|
0b3289361c | ||
|
|
c4abdef874 | ||
|
|
64cbaec326 | ||
|
|
63a0641957 | ||
|
|
2737ace5a8 | ||
|
|
9e3d73c1c2 | ||
|
|
325ec97355 | ||
|
|
eea53eb6dc | ||
|
|
6f9a93ac89 | ||
|
|
93a5fdd8f3 | ||
|
|
0251fd78e9 | ||
|
|
b4a66a0993 | ||
|
|
d38064f181 | ||
|
|
7f89bb3c6f | ||
|
|
a69c033e35 | ||
|
|
35151c7bc8 | ||
|
|
c07479b99a | ||
|
|
8d812d5f0e | ||
|
|
0d3b7f0ade | ||
|
|
dd9a5fba59 | ||
|
|
7da7019a20 | ||
|
|
24a3cb2a8b | ||
|
|
24cf930952 | ||
|
|
66d1d3e248 | ||
|
|
97a95dddfc | ||
|
|
6df476835c | ||
|
|
99933678ea | ||
|
|
34f8e1e92b | ||
|
|
a6a41a8755 | ||
|
|
f110b94fac | ||
|
|
33023aa282 | ||
|
|
eeb3cdc99e | ||
|
|
1e7147f060 | ||
|
|
8936beaff3 | ||
|
|
c00283f247 | ||
|
|
a4269cf308 | ||
|
|
91f3c8a4d0 | ||
|
|
30c7ee9c7b | ||
|
|
456d8b9eb9 | ||
|
|
4fe3e6d31a | ||
|
|
01ee1c80b4 | ||
|
|
c14fe47a81 | ||
|
|
b9deabbf0a | ||
|
|
5d66a98e78 | ||
|
|
2d46d6ffae | ||
|
|
57107e50a7 | ||
|
|
47271254f6 | ||
|
|
f304028874 | ||
|
|
8abc5e159a | ||
|
|
b1ac53c4dd | ||
|
|
27ab68c856 | ||
|
|
ddf5a3953b | ||
|
|
92d2fc101d | ||
|
|
8996000d2b | ||
|
|
a2fcba2349 | ||
|
|
abe8638991 | ||
|
|
0b5064b474 | ||
|
|
da9d4cf5b9 | ||
|
|
a62cca15dd | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
a216cb4105 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
2
.github/workflows/changelog.yml
vendored
2
.github/workflows/changelog.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
uses: dsaltares/fetch-gh-release-asset@aa2ab1243d6e0d5b405b973c89fa4d06a2d0fff7 # 1.1.2
|
||||
with:
|
||||
repo: OffchainLabs/unclog
|
||||
version: "tags/v0.1.3"
|
||||
version: "tags/v0.1.5"
|
||||
file: "unclog"
|
||||
|
||||
- name: Get new changelog files
|
||||
|
||||
@@ -194,6 +194,7 @@ nogo(
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
|
||||
145
CHANGELOG.md
145
CHANGELOG.md
@@ -4,6 +4,151 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v6.0.4](https://github.com/prysmaticlabs/prysm/compare/v6.0.3...v6.0.4) - 2025-06-05
|
||||
|
||||
This release has more work on PeerDAS, and light client support. Additionally, we have a few bug fixes:
|
||||
- Blob cache size now correctly set at startup.
|
||||
- A fix for slashing protection history exports where the validator database was in a nested folder.
|
||||
- Corrected behavior of the API call for state committees with an invalid request.
|
||||
- `/bin/sh` is now symlinked to `/bin/bash` for Prysm docker images.
|
||||
|
||||
In the [Hoodi](https://github.com/eth-clients/hoodi) testnet, the default gas limit is raised to 60M gas.
|
||||
|
||||
### Added
|
||||
|
||||
- Add light client mainnet spec test. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15295)
|
||||
- Add support for light client req/resp domain. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15281)
|
||||
- Added /bin/sh simlink to docker images. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15294)
|
||||
- Added Prysm build data to otel tracing spans. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15302)
|
||||
- Add light client minimal spec test support for `update_ranking` tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15297)
|
||||
- Add fulu operation and epoch processing spec tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15284)
|
||||
- Updated e2e Beacon API evaluator to support more endpoints, including the ones introduced in Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15304)
|
||||
- Data column sidecars verification methods. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15232)
|
||||
- Implement data column sidecars filesystem. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15257)
|
||||
- Add blob schedule support from https://github.com/ethereum/consensus-specs/pull/4277. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15272)
|
||||
- random forkchoice spec tests for fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15287)
|
||||
- Add ability to download nightly test vectors. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15312)
|
||||
- PeerDAS: Validation pipeline for data column sidecars received via gossip. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15310)
|
||||
- PeerDAS: Implement P2P. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15347)
|
||||
- PeerDAS: Implement the blockchain package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15350)
|
||||
|
||||
### Changed
|
||||
|
||||
- Update spec tests to v1.6.0-alpha.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15306)
|
||||
- PeerDAS: Refactor the reconstruction pipeline. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15309)
|
||||
- PeerDAS: `DataColumnStorage.Get` - Exit early no columns are available. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15309)
|
||||
- Default hoodi testnet builder gas limit to 60M. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15361)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix cyclical dependencies issue when using testing/util package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15248)
|
||||
- Set seen blob cache size correctly based on current slot time at start up. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15348)
|
||||
- Fix `slashing-protection-history export` failing when `validator.db` is in a nested folder like `data/direct/`. (#14954). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15351)
|
||||
- Made `/eth/v1/beacon/states/{state_id}/committees` endpoint return `400` when slot does not belong to the specified epoch, aligning with the Beacon API spec (#15355). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15356)
|
||||
- Removed eager validator context cancellation that was causing validator builder registrations to fail occasionally. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15369)
|
||||
|
||||
## [v6.0.3](https://github.com/prysmaticlabs/prysm/compare/v6.0.2...v6.0.3) - 2025-05-21
|
||||
|
||||
This release has important bugfixes for users of the [Beacon API](https://ethereum.github.io/beacon-APIs/). These fixes include:
|
||||
- Fixed pending consolidations endpoint to return the correct response.
|
||||
- Fixed incorrect field name from pending partial withdrawals response.
|
||||
- Fixed attester slashing to return an empty array instead of nil/null.
|
||||
- Fixed validator participation and active set changes endpoints to accept a `{state_id}` parameter.
|
||||
|
||||
Other improvements include:
|
||||
- Disabled deposit log processing routine for Electra and beyond.
|
||||
|
||||
Operators are encouraged to update at their own convenience.
|
||||
|
||||
### Added
|
||||
|
||||
- ssz static spec tests for fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15279)
|
||||
- finality and merkle proof spec tests for fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15286)
|
||||
- sanity and rewards spec tests for fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15285)
|
||||
|
||||
### Changed
|
||||
|
||||
- Added more tracing spans to various helpers related to GetDuties. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15271)
|
||||
- Disable log processing after deposit requests are activated. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15274)
|
||||
|
||||
### Fixed
|
||||
|
||||
- fixed wrong handler for get pending consolidations endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15290)
|
||||
- Fixed /eth/v2/beacon/pool/attester_slashings no slashings returns empty array instead of nil. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15291)
|
||||
- Fix Prysm endpoints `/prysm/v1/validators/{state_id}/participation` and `/prysm/v1/validators/{state_id}/active_set_changes` to properly handle `{state_id}`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15245)
|
||||
|
||||
## [v6.0.2](https://github.com/prysmaticlabs/prysm/compare/v6.0.1...v6.0.2) - 2025-05-12
|
||||
|
||||
This is a patch release to fix a few important bugs. Most importantly, we have adjusted the index limit for field tries in the beacon state to better support Pectra states. This should alleviate memory issues that clients are seeing since Pectra mainnet fork.
|
||||
|
||||
### Added
|
||||
|
||||
- Enable light client gossip for optimistic and finality updates. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15220)
|
||||
- Implement peerDAS core functions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15192)
|
||||
- Force duties start on received blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15251)
|
||||
- Added additional tracing spans for the GetDuties routine. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15258)
|
||||
|
||||
### Changed
|
||||
|
||||
- Use otelgrpc for tracing grpc server and client. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15237)
|
||||
- Upgraded ristretto to v2.2.0, for RISC-V support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15170)
|
||||
- Update spec to v1.5.0 compliance which changes minimal execution requests size. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15256)
|
||||
- Increase indices limit in field trie rebuilding. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15252)
|
||||
- Increase sepolia gas limit to 60M. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15253)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed wrong field name in pending partial withdrawals was returned on state json representation, described in https://github.com/ethereum/consensus-specs/blob/dev/specs/electra/beacon-chain.md#pendingpartialwithdrawal. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15254)
|
||||
- Fixed gocognit on propose block rest path. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15147)
|
||||
|
||||
## [v6.0.1](https://github.com/prysmaticlabs/prysm/compare/v6.0.0...v6.0.1) - 2025-05-02
|
||||
|
||||
This release fixes two bugs related to the `payload_attributes` [event stream](https://ethereum.github.io/beacon-APIs/#/Events/eventstream). If you are using or planning to use this endpoint, upgrading to version 6.0.1 is mandatory.
|
||||
Also, a reminder: like other Beacon API endpoints, when a node is syncing, it may return historical data as `finalized` or `head`. Until the node is fully synced to the head of the chain, you should avoid using this data, depending on your application's needs.
|
||||
|
||||
We currently recommend against using the `--enable-beacon-rest-api` flag on Mainnet. As you may have noticed, we put a deprecation notice on our gRPC code, in particular on gRPC-related flags. The reason for this is that we want to eventually remove gRPC and have REST HTTP as the standard way of communication between the validator client and the beacon node. That being said, the REST option is still unstable and thus marked as experimental in the flag's description (the flag is `--enable-beacon-rest-api`). Therefore we encourage everyone to keep using gRPC, which is currently the default. It is fine to test the REST option on testnets, but doing it on Mainnet can lead to missing attestations and even missing blocks.
|
||||
|
||||
**Updating to v6.0.0 or later is required for Pectra Mainnet!**
|
||||
|
||||
This patch release has a few important fixes from v6.0.0. Updating to this release is encouraged.
|
||||
|
||||
### Added
|
||||
|
||||
- `UpgradeToFulu` spectests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15190)
|
||||
- PeerDAS related KZG wrappers. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15186)
|
||||
- Add light client p2p broadcaster functions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15175)
|
||||
- Added immediate broadcasting of proposer slashings when equivocating blocks are detected during block processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14693)
|
||||
- Added 2 new errors: `HeadStateErr` and `ErrCouldNotVerifyBlockHeader`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14693)
|
||||
- Implement pending consolidations Beacon API endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15219)
|
||||
- PeerDAS: Add needed proto files and corresponding generated code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15187)
|
||||
- Add light client p2p validator and subscriber functions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15214)
|
||||
|
||||
### Changed
|
||||
|
||||
- Refactored internal function `reValidateSubscriptions` to `pruneSubscriptions` in `beacon-chain/sync/subscriber.go` for improved clarity, addressing a TODO comment. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15160)
|
||||
- Updated geth to v1.15.9. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15216)
|
||||
- Removed the slot from `UpdateDuties`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15223)
|
||||
- Update hoodie bootnodes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15240)
|
||||
|
||||
### Fixed
|
||||
|
||||
- avoid nondeterministic default fork value when generate genesis. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15151)
|
||||
- `UpgradeToFulu`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15190)
|
||||
- fixed underflow with balances in leaking edge case with expected withdrawals. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15191)
|
||||
- Fixes our generated ssz files to have the correct package name. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15199)
|
||||
- Fixes our blob sidecar by root request lists for electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15209)
|
||||
- Ensure that the `payload_attributes` event has a consistent view of the head state by passing the head block in the event and using stategen to retrieve the corresponding state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15213)
|
||||
- Pass parent context to update duties when dependent roots change. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15221)
|
||||
- Process slots across epoch for payload attribute event. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15228)
|
||||
- extend the payload attribute computation deadline to the beginning of the proposal slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15230)
|
||||
|
||||
### Security
|
||||
|
||||
- Fix CVE-2025-22869. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15204)
|
||||
- Fix CVE-2025-22870. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15204)
|
||||
- Fix CVE-2025-22872. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15204)
|
||||
- Fix CVE-2025-30204. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15204)
|
||||
|
||||
## [v6.0.0](https://github.com/prysmaticlabs/prysm/compare/v5.3.2...v6.0.0) - 2025-04-21
|
||||
|
||||
This release introduces Mainnet support for the upcoming Electra + Prague (Pectra) fork. The fork is scheduled for mainnet epoch 364032 (May 7, 2025, 10:05:11 UTC). You MUST update Prysm Beacon Node, Prysm Validator Client, and your execution layer client to the Pectra ready release prior to the fork to stay on the correct chain.
|
||||
|
||||
@@ -4,7 +4,7 @@ Note: The latest and most up-to-date documentation can be found on our [docs por
|
||||
|
||||
Excited by our work and want to get involved in building out our sharding releases? Or maybe you haven't learned as much about the Ethereum protocol but are a savvy developer?
|
||||
|
||||
You can explore our [Open Issues](https://github.com/OffchainLabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PR’s after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/CTYGPUJ) drop us a line there if you want to get more involved or have any questions on our implementation!
|
||||
You can explore our [Open Issues](https://github.com/OffchainLabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PR’s after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/prysm) drop us a line there if you want to get more involved or have any questions on our implementation!
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Please, **do not send pull requests for trivial changes**, such as typos, these will be rejected. These types of pull requests incur a cost to reviewers and do not provide much value to the project. If you are unsure, please open an issue first to discuss the change.
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
[](https://goreportcard.com/report/github.com/OffchainLabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.4.0)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
|
||||
[](https://discord.gg/OffchainLabs)
|
||||
[](https://discord.gg/prysm)
|
||||
[](https://www.gitpoap.io/gh/OffchainLabs/prysm)
|
||||
|
||||
</div>
|
||||
@@ -25,7 +25,7 @@ See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details
|
||||
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://docs.prylabs.network)**.
|
||||
|
||||
💬 **Need help?** Join our **[Discord Community](https://discord.gg/OffchainLabs)** for support.
|
||||
💬 **Need help?** Join our **[Discord Community](https://discord.gg/prysm)** for support.
|
||||
|
||||
---
|
||||
|
||||
|
||||
66
WORKSPACE
66
WORKSPACE
@@ -1,7 +1,7 @@
|
||||
workspace(name = "prysm")
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "rules_pkg",
|
||||
@@ -16,8 +16,6 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "toolchains_protoc",
|
||||
sha256 = "abb1540f8a9e045422730670ebb2f25b41fa56ca5a7cf795175a110a0a68f4ad",
|
||||
@@ -255,56 +253,18 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-beta.5"
|
||||
consensus_spec_version = "v1.6.0-alpha.1"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
http_archive(
|
||||
name = "consensus_spec_tests_general",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.ssz_snappy",
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-H+Pt4z+HCVDnEBAv814yvsjR7f5l1IpumjFoTj2XnLE=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "consensus_spec_tests_minimal",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.ssz_snappy",
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Dqiwf5BG7yYyURGf+i87AIdArAyztvcgjoi2kSxrGvo=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "consensus_spec_tests_mainnet",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.ssz_snappy",
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-xrmsFF243pzXHAjh1EQYKS9gtcwmtHK3wRZDSLlVVRk=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
|
||||
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
|
||||
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -318,11 +278,13 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-c+gGapqifCvFtmtxfhOwieBDO2Syxp13GECWEpWM/Ho=",
|
||||
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
http_archive(
|
||||
name = "bls_spec_tests",
|
||||
build_file_content = """
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []structs.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"health.go",
|
||||
"interfaces.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/client/beacon/health",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@org_uber_go_mock//gomock:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["health_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["@org_uber_go_mock//gomock:go_default_library"],
|
||||
)
|
||||
@@ -1,58 +0,0 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type NodeHealthTracker struct {
|
||||
isHealthy *bool
|
||||
healthChan chan bool
|
||||
node Node
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewTracker(node Node) Tracker {
|
||||
return &NodeHealthTracker{
|
||||
node: node,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// HealthUpdates provides a read-only channel for health updates.
|
||||
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
|
||||
return n.healthChan
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) IsHealthy(_ context.Context) bool {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
if n.isHealthy == nil {
|
||||
return false
|
||||
}
|
||||
return *n.isHealthy
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
newStatus := n.node.IsHealthy(ctx)
|
||||
if n.isHealthy == nil {
|
||||
n.isHealthy = &newStatus
|
||||
}
|
||||
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
if isStatusChanged {
|
||||
// Update the health status
|
||||
n.isHealthy = &newStatus
|
||||
// Send the new status to the health channel, potentially overwriting the existing value
|
||||
select {
|
||||
case <-n.healthChan:
|
||||
n.healthChan <- newStatus
|
||||
default:
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
}
|
||||
return newStatus
|
||||
}
|
||||
@@ -1,111 +0,0 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestNodeHealth_IsHealthy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
isHealthy bool
|
||||
want bool
|
||||
}{
|
||||
{"initially healthy", true, true},
|
||||
{"initially unhealthy", false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.isHealthy,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
if got := n.IsHealthy(context.Background()); got != tt.want {
|
||||
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
initial bool // Initial health status
|
||||
newStatus bool // Status to update to
|
||||
shouldSend bool // Should a message be sent through the channel
|
||||
}{
|
||||
{"healthy to unhealthy", true, false, true},
|
||||
{"unhealthy to healthy", false, true, true},
|
||||
{"remain healthy", true, true, false},
|
||||
{"remain unhealthy", false, false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := NewMockHealthClient(ctrl)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.initial,
|
||||
node: client,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
|
||||
s := n.CheckHealth(context.Background())
|
||||
// Check if health status was updated
|
||||
if s != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
|
||||
}
|
||||
|
||||
select {
|
||||
case status := <-n.HealthUpdates():
|
||||
if !tt.shouldSend {
|
||||
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
|
||||
} else if status != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
|
||||
}
|
||||
default:
|
||||
if tt.shouldSend {
|
||||
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := NewMockHealthClient(ctrl)
|
||||
n := NewTracker(client)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Number of goroutines to spawn for both reading and writing
|
||||
numGoroutines := 6
|
||||
|
||||
wg.Add(numGoroutines * 2) // for readers and writers
|
||||
|
||||
// Concurrently update health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
}()
|
||||
}
|
||||
|
||||
// Concurrently read health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = n.IsHealthy(context.Background()) // Just read the value
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait() // Wait for all goroutines to finish
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package health
|
||||
|
||||
import "context"
|
||||
|
||||
type Tracker interface {
|
||||
HealthUpdates() <-chan bool
|
||||
CheckHealth(ctx context.Context) bool
|
||||
Node
|
||||
}
|
||||
|
||||
type Node interface {
|
||||
IsHealthy(ctx context.Context) bool
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = Node(&MockHealthClient{})
|
||||
)
|
||||
|
||||
// MockHealthClient is a mock of HealthClient interface.
|
||||
type MockHealthClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockHealthClientMockRecorder
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
|
||||
type MockHealthClientMockRecorder struct {
|
||||
mock *MockHealthClient
|
||||
}
|
||||
|
||||
// IsHealthy mocks base method.
|
||||
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsHealthy", arg0)
|
||||
ret0, ok := ret[0].(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return ret0
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// IsHealthy indicates an expected call of IsHealthy.
|
||||
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
|
||||
mr.mock.Lock()
|
||||
defer mr.mock.Unlock()
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
|
||||
}
|
||||
|
||||
// NewMockHealthClient creates a new mock instance.
|
||||
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
|
||||
mock := &MockHealthClient{ctrl: ctrl}
|
||||
mock.recorder = &MockHealthClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -28,7 +27,6 @@ go_library(
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -72,7 +72,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
log.WithFields(log.Fields{
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
}).Info("Builder http request")
|
||||
return nil
|
||||
}
|
||||
t := io.TeeReader(r.Body, b)
|
||||
@@ -89,7 +89,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
log.WithFields(log.Fields{
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
}).Info("Builder http request")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -101,7 +101,7 @@ type BuilderClient interface {
|
||||
NodeURL() string
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
Status(ctx context.Context) error
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
return nil, errors.Wrap(err, "error getting header from builder server")
|
||||
}
|
||||
|
||||
bid, err := c.parseHeaderResponse(data, header)
|
||||
bid, err := c.parseHeaderResponse(data, header, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(
|
||||
err,
|
||||
@@ -254,7 +254,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
return bid, nil
|
||||
}
|
||||
|
||||
func (c *Client) parseHeaderResponse(data []byte, header http.Header) (SignedBid, error) {
|
||||
func (c *Client) parseHeaderResponse(data []byte, header http.Header, slot primitives.Slot) (SignedBid, error) {
|
||||
var versionHeader string
|
||||
if c.sszEnabled || header.Get(api.VersionHeader) != "" {
|
||||
versionHeader = header.Get(api.VersionHeader)
|
||||
@@ -276,7 +276,7 @@ func (c *Client) parseHeaderResponse(data []byte, header http.Header) (SignedBid
|
||||
}
|
||||
|
||||
if ver >= version.Electra {
|
||||
return c.parseHeaderElectra(data)
|
||||
return c.parseHeaderElectra(data, slot)
|
||||
}
|
||||
if ver >= version.Deneb {
|
||||
return c.parseHeaderDeneb(data)
|
||||
@@ -291,7 +291,7 @@ func (c *Client) parseHeaderResponse(data []byte, header http.Header) (SignedBid
|
||||
return nil, fmt.Errorf("unsupported header version %s", versionHeader)
|
||||
}
|
||||
|
||||
func (c *Client) parseHeaderElectra(data []byte) (SignedBid, error) {
|
||||
func (c *Client) parseHeaderElectra(data []byte, slot primitives.Slot) (SignedBid, error) {
|
||||
if c.sszEnabled {
|
||||
sb := ðpb.SignedBuilderBidElectra{}
|
||||
if err := sb.UnmarshalSSZ(data); err != nil {
|
||||
@@ -303,7 +303,7 @@ func (c *Client) parseHeaderElectra(data []byte) (SignedBid, error) {
|
||||
if err := json.Unmarshal(data, hr); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal ExecHeaderResponseElectra JSON")
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
p, err := hr.ToProto(slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert ExecHeaderResponseElectra to proto")
|
||||
}
|
||||
@@ -446,6 +446,9 @@ func sszValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]
|
||||
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
|
||||
|
||||
func getVersionsBlockToPayload(blockVersion int) (int, error) {
|
||||
if blockVersion >= version.Fulu {
|
||||
return version.Fulu, nil
|
||||
}
|
||||
if blockVersion >= version.Deneb {
|
||||
return version.Deneb, nil
|
||||
}
|
||||
@@ -460,7 +463,7 @@ func getVersionsBlockToPayload(blockVersion int) (int, error) {
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full execution payload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
body, postOpts, err := c.buildBlindedBlockRequest(sb)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -558,7 +561,7 @@ func (c *Client) buildBlindedBlockRequest(sb interfaces.ReadOnlySignedBeaconBloc
|
||||
func (c *Client) parseBlindedBlockResponse(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
if c.sszEnabled {
|
||||
return c.parseBlindedBlockResponseSSZ(respBytes, forkVersion)
|
||||
}
|
||||
@@ -568,8 +571,18 @@ func (c *Client) parseBlindedBlockResponse(
|
||||
func (c *Client) parseBlindedBlockResponseSSZ(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if forkVersion >= version.Deneb {
|
||||
) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
if forkVersion >= version.Fulu {
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundleV2{}
|
||||
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundleV2 SSZ")
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(payloadAndBlobs.Payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to wrap execution data for %s", version.String(forkVersion))
|
||||
}
|
||||
return ed, payloadAndBlobs.BlobsBundle, nil
|
||||
} else if forkVersion >= version.Deneb {
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{}
|
||||
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundle SSZ")
|
||||
|
||||
@@ -2,7 +2,6 @@ package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -31,7 +30,7 @@ func (fn roundtrip) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
func TestClient_Status(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
statusPath := "/eth/v1/builder/status"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -84,7 +83,7 @@ func TestClient_Status(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClient_RegisterValidator(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
t.Run("JSON success", func(t *testing.T) {
|
||||
@@ -168,7 +167,7 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
var slot primitives.Slot = 23
|
||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
@@ -532,7 +531,7 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
epr := &ExecHeaderResponseElectra{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseElectra), epr))
|
||||
pro, err := epr.ToProto()
|
||||
pro, err := epr.ToProto(100)
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -601,7 +600,7 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("bellatrix", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
@@ -640,9 +639,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
epr := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
|
||||
ep := &ExecutionPayload{}
|
||||
ep := &structs.ExecutionPayload{}
|
||||
require.NoError(t, json.Unmarshal(epr.Data, ep))
|
||||
pro, err := ep.ToProto()
|
||||
pro, err := ep.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -710,9 +709,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
epr := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadCapella), epr))
|
||||
ep := &ExecutionPayloadCapella{}
|
||||
ep := &structs.ExecutionPayloadCapella{}
|
||||
require.NoError(t, json.Unmarshal(epr.Data, ep))
|
||||
pro, err := ep.ToProto()
|
||||
pro, err := ep.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -1559,7 +1558,7 @@ func TestRequestLogger(t *testing.T) {
|
||||
c, err := NewClient("localhost:3500", wo)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, getStatus, r.URL.Path)
|
||||
|
||||
@@ -41,7 +41,7 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock --
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -328,72 +328,72 @@ func TestExecutionHeaderResponseUnmarshal(t *testing.T) {
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ParentHash),
|
||||
actual: hr.Data.Message.Header.ParentHash,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ParentHash",
|
||||
},
|
||||
{
|
||||
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.FeeRecipient),
|
||||
actual: hr.Data.Message.Header.FeeRecipient,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.FeeRecipient",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.StateRoot),
|
||||
actual: hr.Data.Message.Header.StateRoot,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.StateRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ReceiptsRoot),
|
||||
actual: hr.Data.Message.Header.ReceiptsRoot,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ReceiptsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.LogsBloom),
|
||||
actual: hr.Data.Message.Header.LogsBloom,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.LogsBloom",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.PrevRandao),
|
||||
actual: hr.Data.Message.Header.PrevRandao,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.PrevRandao",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BlockNumber),
|
||||
actual: hr.Data.Message.Header.BlockNumber,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockNumber",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasLimit),
|
||||
actual: hr.Data.Message.Header.GasLimit,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasLimit",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasUsed),
|
||||
actual: hr.Data.Message.Header.GasUsed,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasUsed",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.Timestamp),
|
||||
actual: hr.Data.Message.Header.Timestamp,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.Timestamp",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ExtraData),
|
||||
actual: hr.Data.Message.Header.ExtraData,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ExtraData",
|
||||
},
|
||||
{
|
||||
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BaseFeePerGas),
|
||||
actual: hr.Data.Message.Header.BaseFeePerGas,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BaseFeePerGas",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.BlockHash),
|
||||
actual: hr.Data.Message.Header.BlockHash,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockHash",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.TransactionsRoot),
|
||||
actual: hr.Data.Message.Header.TransactionsRoot,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.TransactionsRoot",
|
||||
},
|
||||
}
|
||||
@@ -427,77 +427,77 @@ func TestExecutionHeaderResponseCapellaUnmarshal(t *testing.T) {
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ParentHash),
|
||||
actual: hr.Data.Message.Header.ParentHash,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ParentHash",
|
||||
},
|
||||
{
|
||||
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.FeeRecipient),
|
||||
actual: hr.Data.Message.Header.FeeRecipient,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.FeeRecipient",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.StateRoot),
|
||||
actual: hr.Data.Message.Header.StateRoot,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.StateRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ReceiptsRoot),
|
||||
actual: hr.Data.Message.Header.ReceiptsRoot,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ReceiptsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.LogsBloom),
|
||||
actual: hr.Data.Message.Header.LogsBloom,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.LogsBloom",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.PrevRandao),
|
||||
actual: hr.Data.Message.Header.PrevRandao,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.PrevRandao",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BlockNumber),
|
||||
actual: hr.Data.Message.Header.BlockNumber,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockNumber",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasLimit),
|
||||
actual: hr.Data.Message.Header.GasLimit,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasLimit",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasUsed),
|
||||
actual: hr.Data.Message.Header.GasUsed,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasUsed",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.Timestamp),
|
||||
actual: hr.Data.Message.Header.Timestamp,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.Timestamp",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ExtraData),
|
||||
actual: hr.Data.Message.Header.ExtraData,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ExtraData",
|
||||
},
|
||||
{
|
||||
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BaseFeePerGas),
|
||||
actual: hr.Data.Message.Header.BaseFeePerGas,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BaseFeePerGas",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.BlockHash),
|
||||
actual: hr.Data.Message.Header.BlockHash,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockHash",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.TransactionsRoot),
|
||||
actual: hr.Data.Message.Header.TransactionsRoot,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.TransactionsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.WithdrawalsRoot),
|
||||
actual: hr.Data.Message.Header.WithdrawalsRoot,
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.WithdrawalsRoot",
|
||||
},
|
||||
}
|
||||
@@ -867,88 +867,6 @@ var testExampleExecutionPayloadDenebDifferentProofCount = fmt.Sprintf(`{
|
||||
}
|
||||
}`, hexutil.Encode(make([]byte, fieldparams.BlobLength)))
|
||||
|
||||
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
|
||||
epr := &ExecPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
|
||||
cases := []struct {
|
||||
expected string
|
||||
actual string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ParentHash),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
|
||||
},
|
||||
{
|
||||
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
actual: hexutil.Encode(epr.Data.FeeRecipient),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.StateRoot),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ReceiptsRoot),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
actual: hexutil.Encode(epr.Data.LogsBloom),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.PrevRandao),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.BlockNumber),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.GasLimit),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.GasUsed),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.Timestamp),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExtraData),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
|
||||
},
|
||||
{
|
||||
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: fmt.Sprintf("%d", epr.Data.BaseFeePerGas),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.BlockHash),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
|
||||
}
|
||||
require.Equal(t, 1, len(epr.Data.Transactions))
|
||||
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
require.Equal(t, txHash, hexutil.Encode(epr.Data.Transactions[0]))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseCapellaUnmarshal(t *testing.T) {
|
||||
epr := &ExecPayloadResponseCapella{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadCapella), epr))
|
||||
@@ -959,67 +877,67 @@ func TestExecutionPayloadResponseCapellaUnmarshal(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ParentHash),
|
||||
actual: epr.Data.ParentHash,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
|
||||
},
|
||||
{
|
||||
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
actual: hexutil.Encode(epr.Data.FeeRecipient),
|
||||
actual: epr.Data.FeeRecipient,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.StateRoot),
|
||||
actual: epr.Data.StateRoot,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ReceiptsRoot),
|
||||
actual: epr.Data.ReceiptsRoot,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
actual: hexutil.Encode(epr.Data.LogsBloom),
|
||||
actual: epr.Data.LogsBloom,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.PrevRandao),
|
||||
actual: epr.Data.PrevRandao,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.BlockNumber),
|
||||
actual: epr.Data.BlockNumber,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.GasLimit),
|
||||
actual: epr.Data.GasLimit,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.GasUsed),
|
||||
actual: epr.Data.GasUsed,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.Timestamp),
|
||||
actual: epr.Data.Timestamp,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExtraData),
|
||||
actual: epr.Data.ExtraData,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
|
||||
},
|
||||
{
|
||||
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: fmt.Sprintf("%d", epr.Data.BaseFeePerGas),
|
||||
actual: epr.Data.BaseFeePerGas,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.BlockHash),
|
||||
actual: epr.Data.BlockHash,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
|
||||
},
|
||||
}
|
||||
@@ -1028,14 +946,14 @@ func TestExecutionPayloadResponseCapellaUnmarshal(t *testing.T) {
|
||||
}
|
||||
require.Equal(t, 1, len(epr.Data.Transactions))
|
||||
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
require.Equal(t, txHash, hexutil.Encode(epr.Data.Transactions[0]))
|
||||
require.Equal(t, txHash, epr.Data.Transactions[0])
|
||||
|
||||
require.Equal(t, 1, len(epr.Data.Withdrawals))
|
||||
w := epr.Data.Withdrawals[0]
|
||||
assert.Equal(t, uint64(1), w.Index.Uint64())
|
||||
assert.Equal(t, uint64(1), w.ValidatorIndex.Uint64())
|
||||
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.Address.String())
|
||||
assert.Equal(t, uint64(1), w.Amount.Uint64())
|
||||
assert.Equal(t, "1", w.WithdrawalIndex)
|
||||
assert.Equal(t, "1", w.ValidatorIndex)
|
||||
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.ExecutionAddress)
|
||||
assert.Equal(t, "1", w.Amount)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseDenebUnmarshal(t *testing.T) {
|
||||
@@ -1048,77 +966,77 @@ func TestExecutionPayloadResponseDenebUnmarshal(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.ParentHash),
|
||||
actual: epr.Data.ExecutionPayload.ParentHash,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
|
||||
},
|
||||
{
|
||||
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.FeeRecipient),
|
||||
actual: epr.Data.ExecutionPayload.FeeRecipient,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.StateRoot),
|
||||
actual: epr.Data.ExecutionPayload.StateRoot,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.ReceiptsRoot),
|
||||
actual: epr.Data.ExecutionPayload.ReceiptsRoot,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.LogsBloom),
|
||||
actual: epr.Data.ExecutionPayload.LogsBloom,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.PrevRandao),
|
||||
actual: epr.Data.ExecutionPayload.PrevRandao,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BlockNumber),
|
||||
actual: epr.Data.ExecutionPayload.BlockNumber,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.GasLimit),
|
||||
actual: epr.Data.ExecutionPayload.GasLimit,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.GasUsed),
|
||||
actual: epr.Data.ExecutionPayload.GasUsed,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.Timestamp),
|
||||
actual: epr.Data.ExecutionPayload.Timestamp,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.ExtraData),
|
||||
actual: epr.Data.ExecutionPayload.ExtraData,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
|
||||
},
|
||||
{
|
||||
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BaseFeePerGas),
|
||||
actual: epr.Data.ExecutionPayload.BaseFeePerGas,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.BlockHash),
|
||||
actual: epr.Data.ExecutionPayload.BlockHash,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
|
||||
},
|
||||
{
|
||||
expected: "2",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BlobGasUsed),
|
||||
actual: epr.Data.ExecutionPayload.BlobGasUsed,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlobGasUsed",
|
||||
},
|
||||
{
|
||||
expected: "3",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.ExcessBlobGas),
|
||||
actual: epr.Data.ExecutionPayload.ExcessBlobGas,
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ExcessBlobGas",
|
||||
},
|
||||
}
|
||||
@@ -1127,64 +1045,16 @@ func TestExecutionPayloadResponseDenebUnmarshal(t *testing.T) {
|
||||
}
|
||||
require.Equal(t, 1, len(epr.Data.ExecutionPayload.Transactions))
|
||||
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
require.Equal(t, txHash, hexutil.Encode(epr.Data.ExecutionPayload.Transactions[0]))
|
||||
require.Equal(t, txHash, epr.Data.ExecutionPayload.Transactions[0])
|
||||
|
||||
require.Equal(t, 1, len(epr.Data.ExecutionPayload.Withdrawals))
|
||||
w := epr.Data.ExecutionPayload.Withdrawals[0]
|
||||
assert.Equal(t, uint64(1), w.Index.Uint64())
|
||||
assert.Equal(t, uint64(1), w.ValidatorIndex.Uint64())
|
||||
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.Address.String())
|
||||
assert.Equal(t, uint64(1), w.Amount.Uint64())
|
||||
assert.Equal(t, uint64(2), uint64(epr.Data.ExecutionPayload.BlobGasUsed))
|
||||
assert.Equal(t, uint64(3), uint64(epr.Data.ExecutionPayload.ExcessBlobGas))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseToProto(t *testing.T) {
|
||||
hr := &ExecPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), hr))
|
||||
p, err := hr.ToProto()
|
||||
require.NoError(t, err)
|
||||
|
||||
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
|
||||
tx, err := hexutil.Decode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")
|
||||
require.NoError(t, err)
|
||||
txList := [][]byte{tx}
|
||||
|
||||
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.NoError(t, err)
|
||||
expected := &v1.ExecutionPayload{
|
||||
ParentHash: parentHash,
|
||||
FeeRecipient: feeRecipient,
|
||||
StateRoot: stateRoot,
|
||||
ReceiptsRoot: receiptsRoot,
|
||||
LogsBloom: logsBloom,
|
||||
PrevRandao: prevRandao,
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: extraData,
|
||||
BaseFeePerGas: bfpg.SSZBytes(),
|
||||
BlockHash: blockHash,
|
||||
Transactions: txList,
|
||||
}
|
||||
require.DeepEqual(t, expected, p)
|
||||
assert.Equal(t, "1", w.WithdrawalIndex)
|
||||
assert.Equal(t, "1", w.ValidatorIndex)
|
||||
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.ExecutionAddress)
|
||||
assert.Equal(t, "1", w.Amount)
|
||||
assert.Equal(t, "2", epr.Data.ExecutionPayload.BlobGasUsed)
|
||||
assert.Equal(t, "3", epr.Data.ExecutionPayload.ExcessBlobGas)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseCapellaToProto(t *testing.T) {
|
||||
@@ -1352,16 +1222,6 @@ func pbEth1Data() *eth.Eth1Data {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1DataMarshal(t *testing.T) {
|
||||
ed := &Eth1Data{
|
||||
Eth1Data: pbEth1Data(),
|
||||
}
|
||||
b, err := json.Marshal(ed)
|
||||
require.NoError(t, err)
|
||||
expected := `{"deposit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","deposit_count":"23","block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbSyncAggregate() *eth.SyncAggregate {
|
||||
return ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
@@ -1369,14 +1229,6 @@ func pbSyncAggregate() *eth.SyncAggregate {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncAggregate_MarshalJSON(t *testing.T) {
|
||||
sa := &SyncAggregate{pbSyncAggregate()}
|
||||
b, err := json.Marshal(sa)
|
||||
require.NoError(t, err)
|
||||
expected := `{"sync_committee_bits":"0x01","sync_committee_signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbDeposit(t *testing.T) *eth.Deposit {
|
||||
return ð.Deposit{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
@@ -1389,16 +1241,6 @@ func pbDeposit(t *testing.T) *eth.Deposit {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeposit_MarshalJSON(t *testing.T) {
|
||||
d := &Deposit{
|
||||
Deposit: pbDeposit(t),
|
||||
}
|
||||
b, err := json.Marshal(d)
|
||||
require.NoError(t, err)
|
||||
expected := `{"proof":["0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"],"data":{"pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","withdrawal_credentials":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","amount":"1","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbSignedVoluntaryExit(t *testing.T) *eth.SignedVoluntaryExit {
|
||||
return ð.SignedVoluntaryExit{
|
||||
Exit: ð.VoluntaryExit{
|
||||
@@ -1409,16 +1251,6 @@ func pbSignedVoluntaryExit(t *testing.T) *eth.SignedVoluntaryExit {
|
||||
}
|
||||
}
|
||||
|
||||
func TestVoluntaryExit(t *testing.T) {
|
||||
ve := &SignedVoluntaryExit{
|
||||
SignedVoluntaryExit: pbSignedVoluntaryExit(t),
|
||||
}
|
||||
b, err := json.Marshal(ve)
|
||||
require.NoError(t, err)
|
||||
expected := `{"message":{"epoch":"1","validator_index":"1"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbAttestation(t *testing.T) *eth.Attestation {
|
||||
return ð.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
@@ -1439,16 +1271,6 @@ func pbAttestation(t *testing.T) *eth.Attestation {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestationMarshal(t *testing.T) {
|
||||
a := &Attestation{
|
||||
Attestation: pbAttestation(t),
|
||||
}
|
||||
b, err := json.Marshal(a)
|
||||
require.NoError(t, err)
|
||||
expected := `{"aggregation_bits":"0x01","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbAttesterSlashing(t *testing.T) *eth.AttesterSlashing {
|
||||
return ð.AttesterSlashing{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
@@ -1489,9 +1311,7 @@ func pbAttesterSlashing(t *testing.T) *eth.AttesterSlashing {
|
||||
}
|
||||
|
||||
func TestAttesterSlashing_MarshalJSON(t *testing.T) {
|
||||
as := &AttesterSlashing{
|
||||
AttesterSlashing: pbAttesterSlashing(t),
|
||||
}
|
||||
as := structs.AttesterSlashingFromConsensus(pbAttesterSlashing(t))
|
||||
b, err := json.Marshal(as)
|
||||
require.NoError(t, err)
|
||||
expected := `{"attestation_1":{"attesting_indices":["1"],"data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"},"attestation_2":{"attesting_indices":["1"],"data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}`
|
||||
@@ -1599,9 +1419,8 @@ func pbExecutionPayloadHeaderDeneb(t *testing.T) *v1.ExecutionPayloadHeaderDeneb
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
|
||||
h := &ExecutionPayloadHeader{
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
||||
}
|
||||
h, err := structs.ExecutionPayloadHeaderFromConsensus(pbExecutionPayloadHeader(t))
|
||||
require.NoError(t, err)
|
||||
b, err := json.Marshal(h)
|
||||
require.NoError(t, err)
|
||||
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}`
|
||||
@@ -1609,9 +1428,9 @@ func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderCapella_MarshalJSON(t *testing.T) {
|
||||
h := &ExecutionPayloadHeaderCapella{
|
||||
ExecutionPayloadHeaderCapella: pbExecutionPayloadHeaderCapella(t),
|
||||
}
|
||||
h, err := structs.ExecutionPayloadHeaderCapellaFromConsensus(pbExecutionPayloadHeaderCapella(t))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
b, err := json.Marshal(h)
|
||||
require.NoError(t, err)
|
||||
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}`
|
||||
@@ -1619,9 +1438,8 @@ func TestExecutionPayloadHeaderCapella_MarshalJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderDeneb_MarshalJSON(t *testing.T) {
|
||||
h := &ExecutionPayloadHeaderDeneb{
|
||||
ExecutionPayloadHeaderDeneb: pbExecutionPayloadHeaderDeneb(t),
|
||||
}
|
||||
h, err := structs.ExecutionPayloadHeaderDenebFromConsensus(pbExecutionPayloadHeaderDeneb(t))
|
||||
require.NoError(t, err)
|
||||
b, err := json.Marshal(h)
|
||||
require.NoError(t, err)
|
||||
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","blob_gas_used":"1","excess_blob_gas":"2"}`
|
||||
@@ -1850,12 +1668,13 @@ func TestRoundTripUint256(t *testing.T) {
|
||||
func TestRoundTripProtoUint256(t *testing.T) {
|
||||
h := pbExecutionPayloadHeader(t)
|
||||
h.BaseFeePerGas = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
hm := &ExecutionPayloadHeader{ExecutionPayloadHeader: h}
|
||||
hm, err := structs.ExecutionPayloadHeaderFromConsensus(h)
|
||||
require.NoError(t, err)
|
||||
m, err := json.Marshal(hm)
|
||||
require.NoError(t, err)
|
||||
hu := &ExecutionPayloadHeader{}
|
||||
hu := &structs.ExecutionPayloadHeader{}
|
||||
require.NoError(t, json.Unmarshal(m, hu))
|
||||
hp, err := hu.ToProto()
|
||||
hp, err := hu.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, h.BaseFeePerGas, hp.BaseFeePerGas)
|
||||
}
|
||||
@@ -1863,7 +1682,7 @@ func TestRoundTripProtoUint256(t *testing.T) {
|
||||
func TestExecutionPayloadHeaderRoundtrip(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/execution-payload.json")
|
||||
require.NoError(t, err)
|
||||
hu := &ExecutionPayloadHeader{}
|
||||
hu := &structs.ExecutionPayloadHeader{}
|
||||
require.NoError(t, json.Unmarshal(expected, hu))
|
||||
m, err := json.Marshal(hu)
|
||||
require.NoError(t, err)
|
||||
@@ -1873,7 +1692,7 @@ func TestExecutionPayloadHeaderRoundtrip(t *testing.T) {
|
||||
func TestExecutionPayloadHeaderCapellaRoundtrip(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/execution-payload-capella.json")
|
||||
require.NoError(t, err)
|
||||
hu := &ExecutionPayloadHeaderCapella{}
|
||||
hu := &structs.ExecutionPayloadHeaderCapella{}
|
||||
require.NoError(t, json.Unmarshal(expected, hu))
|
||||
m, err := json.Marshal(hu)
|
||||
require.NoError(t, err)
|
||||
@@ -1994,11 +1813,9 @@ func TestEmptyResponseBody(t *testing.T) {
|
||||
epr := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal(encoded, epr))
|
||||
pp, err := epr.ParsePayload()
|
||||
require.NoError(t, err)
|
||||
pb, err := pp.PayloadProto()
|
||||
if err == nil {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, pb == nil)
|
||||
require.Equal(t, false, pp == nil)
|
||||
} else {
|
||||
require.ErrorIs(t, err, consensusblocks.ErrNilObject)
|
||||
}
|
||||
|
||||
@@ -14,22 +14,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
EventHead = "head"
|
||||
EventBlock = "block"
|
||||
EventAttestation = "attestation"
|
||||
EventVoluntaryExit = "voluntary_exit"
|
||||
EventBlsToExecutionChange = "bls_to_execution_change"
|
||||
EventProposerSlashing = "proposer_slashing"
|
||||
EventAttesterSlashing = "attester_slashing"
|
||||
EventFinalizedCheckpoint = "finalized_checkpoint"
|
||||
EventChainReorg = "chain_reorg"
|
||||
EventContributionAndProof = "contribution_and_proof"
|
||||
EventLightClientFinalityUpdate = "light_client_finality_update"
|
||||
EventLightClientOptimisticUpdate = "light_client_optimistic_update"
|
||||
EventPayloadAttributes = "payload_attributes"
|
||||
EventBlobSidecar = "blob_sidecar"
|
||||
EventError = "error"
|
||||
EventConnectionError = "connection_error"
|
||||
EventHead = "head"
|
||||
|
||||
EventError = "error"
|
||||
EventConnectionError = "connection_error"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -30,7 +29,7 @@ func TestNewEventStream(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics)
|
||||
_, err := NewEventStream(t.Context(), &http.Client{}, tt.host, tt.topics)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
@@ -56,7 +55,7 @@ func TestEventStream(t *testing.T) {
|
||||
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics)
|
||||
stream, err := NewEventStream(t.Context(), http.DefaultClient, server.URL, topics)
|
||||
require.NoError(t, err)
|
||||
go stream.Subscribe(eventsChannel)
|
||||
|
||||
@@ -83,8 +82,7 @@ func TestEventStream(t *testing.T) {
|
||||
func TestEventStreamRequestError(t *testing.T) {
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
ctx := t.Context()
|
||||
|
||||
// use valid url that will result in failed request with nil body
|
||||
stream, err := NewEventStream(ctx, http.DefaultClient, "http://badhost:1234", topics)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -16,7 +15,7 @@ type customErrorData struct {
|
||||
|
||||
func TestAppendHeaders(t *testing.T) {
|
||||
t.Run("one_header", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1"})
|
||||
ctx := AppendHeaders(t.Context(), []string{"first=value1"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
@@ -24,7 +23,7 @@ func TestAppendHeaders(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("multiple_headers", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1", "second=value2"})
|
||||
ctx := AppendHeaders(t.Context(), []string{"first=value1", "second=value2"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 2, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
@@ -33,7 +32,7 @@ func TestAppendHeaders(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("one_empty_header", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1", ""})
|
||||
ctx := AppendHeaders(t.Context(), []string{"first=value1", ""})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
@@ -42,7 +41,7 @@ func TestAppendHeaders(t *testing.T) {
|
||||
|
||||
t.Run("incorrect_header", func(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1", "second"})
|
||||
ctx := AppendHeaders(t.Context(), []string{"first=value1", "second"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
@@ -51,7 +50,7 @@ func TestAppendHeaders(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("header_value_with_equal_sign", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value=1"})
|
||||
ctx := AppendHeaders(t.Context(), []string{"first=value=1"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
@@ -34,7 +33,7 @@ func TestServer_StartStop(t *testing.T) {
|
||||
WithRouter(handler),
|
||||
}
|
||||
|
||||
g, err := New(context.Background(), opts...)
|
||||
g, err := New(t.Context(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
g.Start()
|
||||
@@ -62,7 +61,7 @@ func TestServer_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
|
||||
WithRouter(handler),
|
||||
}
|
||||
|
||||
g, err := New(context.Background(), opts...)
|
||||
g, err := New(t.Context(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
@@ -44,6 +45,7 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -57,6 +59,7 @@ go_test(
|
||||
deps = [
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -7,12 +7,15 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -132,6 +135,13 @@ func (e *ExecutionPayload) ToConsensus() (*enginev1.ExecutionPayload, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *ExecutionPayload) PayloadProto() (proto.Message, error) {
|
||||
if r == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload")
|
||||
}
|
||||
return r.ToConsensus()
|
||||
}
|
||||
|
||||
func ExecutionPayloadHeaderFromConsensus(payload *enginev1.ExecutionPayloadHeader) (*ExecutionPayloadHeader, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
@@ -383,6 +393,13 @@ func (e *ExecutionPayloadCapella) ToConsensus() (*enginev1.ExecutionPayloadCapel
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *ExecutionPayloadCapella) PayloadProto() (proto.Message, error) {
|
||||
if p == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil capella execution payload")
|
||||
}
|
||||
return p.ToConsensus()
|
||||
}
|
||||
|
||||
func ExecutionPayloadHeaderCapellaFromConsensus(payload *enginev1.ExecutionPayloadHeaderCapella) (*ExecutionPayloadHeaderCapella, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
|
||||
@@ -26,10 +26,7 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -77,7 +74,7 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
|
||||
}
|
||||
|
||||
return &BeaconState{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -116,10 +113,7 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -183,7 +177,7 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
|
||||
}
|
||||
|
||||
return &BeaconStateAltair{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -225,10 +219,7 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -304,7 +295,7 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
|
||||
}
|
||||
|
||||
return &BeaconStateBellatrix{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -347,10 +338,7 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -442,7 +430,7 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
|
||||
}
|
||||
|
||||
return &BeaconStateCapella{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -488,10 +476,7 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -583,7 +568,7 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
|
||||
}
|
||||
|
||||
return &BeaconStateDeneb{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -629,10 +614,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -760,7 +742,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
}
|
||||
|
||||
return &BeaconStateElectra{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -815,10 +797,7 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -944,9 +923,16 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcLookahead, err := st.ProposerLookahead()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookahead := make([]string, len(srcLookahead))
|
||||
for i, v := range srcLookahead {
|
||||
lookahead[i] = fmt.Sprintf("%d", uint64(v))
|
||||
}
|
||||
return &BeaconStateFulu{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -983,5 +969,6 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
ProposerLookahead: lookahead,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"testing"
|
||||
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func TestDepositSnapshotFromConsensus(t *testing.T) {
|
||||
@@ -102,12 +104,266 @@ func TestProposerSlashing_ToConsensus(t *testing.T) {
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestProposerSlashing_FromConsensus(t *testing.T) {
|
||||
input := []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 2,
|
||||
ParentRoot: []byte{3},
|
||||
StateRoot: []byte{4},
|
||||
BodyRoot: []byte{5},
|
||||
},
|
||||
Signature: []byte{6},
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 7,
|
||||
ProposerIndex: 8,
|
||||
ParentRoot: []byte{9},
|
||||
StateRoot: []byte{10},
|
||||
BodyRoot: []byte{11},
|
||||
},
|
||||
Signature: []byte{12},
|
||||
},
|
||||
},
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 13,
|
||||
ProposerIndex: 14,
|
||||
ParentRoot: []byte{15},
|
||||
StateRoot: []byte{16},
|
||||
BodyRoot: []byte{17},
|
||||
},
|
||||
Signature: []byte{18},
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 19,
|
||||
ProposerIndex: 20,
|
||||
ParentRoot: []byte{21},
|
||||
StateRoot: []byte{22},
|
||||
BodyRoot: []byte{23},
|
||||
},
|
||||
Signature: []byte{24},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedResult := []*ProposerSlashing{
|
||||
{
|
||||
SignedHeader1: &SignedBeaconBlockHeader{
|
||||
Message: &BeaconBlockHeader{
|
||||
Slot: "1",
|
||||
ProposerIndex: "2",
|
||||
ParentRoot: hexutil.Encode([]byte{3}),
|
||||
StateRoot: hexutil.Encode([]byte{4}),
|
||||
BodyRoot: hexutil.Encode([]byte{5}),
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{6}),
|
||||
},
|
||||
SignedHeader2: &SignedBeaconBlockHeader{
|
||||
Message: &BeaconBlockHeader{
|
||||
Slot: "7",
|
||||
ProposerIndex: "8",
|
||||
ParentRoot: hexutil.Encode([]byte{9}),
|
||||
StateRoot: hexutil.Encode([]byte{10}),
|
||||
BodyRoot: hexutil.Encode([]byte{11}),
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{12}),
|
||||
},
|
||||
},
|
||||
{
|
||||
SignedHeader1: &SignedBeaconBlockHeader{
|
||||
Message: &BeaconBlockHeader{
|
||||
Slot: "13",
|
||||
ProposerIndex: "14",
|
||||
ParentRoot: hexutil.Encode([]byte{15}),
|
||||
StateRoot: hexutil.Encode([]byte{16}),
|
||||
BodyRoot: hexutil.Encode([]byte{17}),
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{18}),
|
||||
},
|
||||
SignedHeader2: &SignedBeaconBlockHeader{
|
||||
Message: &BeaconBlockHeader{
|
||||
Slot: "19",
|
||||
ProposerIndex: "20",
|
||||
ParentRoot: hexutil.Encode([]byte{21}),
|
||||
StateRoot: hexutil.Encode([]byte{22}),
|
||||
BodyRoot: hexutil.Encode([]byte{23}),
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{24}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := ProposerSlashingsFromConsensus(input)
|
||||
assert.DeepEqual(t, expectedResult, result)
|
||||
}
|
||||
|
||||
func TestAttesterSlashing_ToConsensus(t *testing.T) {
|
||||
a := &AttesterSlashing{Attestation1: nil, Attestation2: nil}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAttesterSlashing_FromConsensus(t *testing.T) {
|
||||
input := []*eth.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1, 2},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 3,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: []byte{5},
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 6,
|
||||
Root: []byte{7},
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 8,
|
||||
Root: []byte{9},
|
||||
},
|
||||
},
|
||||
Signature: []byte{10},
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{11, 12},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 13,
|
||||
CommitteeIndex: 14,
|
||||
BeaconBlockRoot: []byte{15},
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 16,
|
||||
Root: []byte{17},
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 18,
|
||||
Root: []byte{19},
|
||||
},
|
||||
},
|
||||
Signature: []byte{20},
|
||||
},
|
||||
},
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{21, 22},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 23,
|
||||
CommitteeIndex: 24,
|
||||
BeaconBlockRoot: []byte{25},
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 26,
|
||||
Root: []byte{27},
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 28,
|
||||
Root: []byte{29},
|
||||
},
|
||||
},
|
||||
Signature: []byte{30},
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{31, 32},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 33,
|
||||
CommitteeIndex: 34,
|
||||
BeaconBlockRoot: []byte{35},
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 36,
|
||||
Root: []byte{37},
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 38,
|
||||
Root: []byte{39},
|
||||
},
|
||||
},
|
||||
Signature: []byte{40},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedResult := []*AttesterSlashing{
|
||||
{
|
||||
Attestation1: &IndexedAttestation{
|
||||
AttestingIndices: []string{"1", "2"},
|
||||
Data: &AttestationData{
|
||||
Slot: "3",
|
||||
CommitteeIndex: "4",
|
||||
BeaconBlockRoot: hexutil.Encode([]byte{5}),
|
||||
Source: &Checkpoint{
|
||||
Epoch: "6",
|
||||
Root: hexutil.Encode([]byte{7}),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: "8",
|
||||
Root: hexutil.Encode([]byte{9}),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{10}),
|
||||
},
|
||||
Attestation2: &IndexedAttestation{
|
||||
AttestingIndices: []string{"11", "12"},
|
||||
Data: &AttestationData{
|
||||
Slot: "13",
|
||||
CommitteeIndex: "14",
|
||||
BeaconBlockRoot: hexutil.Encode([]byte{15}),
|
||||
Source: &Checkpoint{
|
||||
Epoch: "16",
|
||||
Root: hexutil.Encode([]byte{17}),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: "18",
|
||||
Root: hexutil.Encode([]byte{19}),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{20}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Attestation1: &IndexedAttestation{
|
||||
AttestingIndices: []string{"21", "22"},
|
||||
Data: &AttestationData{
|
||||
Slot: "23",
|
||||
CommitteeIndex: "24",
|
||||
BeaconBlockRoot: hexutil.Encode([]byte{25}),
|
||||
Source: &Checkpoint{
|
||||
Epoch: "26",
|
||||
Root: hexutil.Encode([]byte{27}),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: "28",
|
||||
Root: hexutil.Encode([]byte{29}),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{30}),
|
||||
},
|
||||
Attestation2: &IndexedAttestation{
|
||||
AttestingIndices: []string{"31", "32"},
|
||||
Data: &AttestationData{
|
||||
Slot: "33",
|
||||
CommitteeIndex: "34",
|
||||
BeaconBlockRoot: hexutil.Encode([]byte{35}),
|
||||
Source: &Checkpoint{
|
||||
Epoch: "36",
|
||||
Root: hexutil.Encode([]byte{37}),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: "38",
|
||||
Root: hexutil.Encode([]byte{39}),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{40}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := AttesterSlashingsFromConsensus(input)
|
||||
assert.DeepEqual(t, expectedResult, result)
|
||||
}
|
||||
|
||||
func TestIndexedAttestation_ToConsensus(t *testing.T) {
|
||||
a := &IndexedAttestation{
|
||||
AttestingIndices: []string{"1"},
|
||||
|
||||
@@ -25,6 +25,13 @@ type BlockGossipEvent struct {
|
||||
Block string `json:"block"`
|
||||
}
|
||||
|
||||
type DataColumnGossipEvent struct {
|
||||
Slot string `json:"slot"`
|
||||
Index string `json:"index"`
|
||||
BlockRoot string `json:"block_root"`
|
||||
KzgCommitments []string `json:"kzg_commitments"`
|
||||
}
|
||||
|
||||
type AggregatedAttEventSource struct {
|
||||
Aggregate *Attestation `json:"aggregate"`
|
||||
}
|
||||
|
||||
@@ -25,16 +25,24 @@ type Identity struct {
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
SeqNumber string `json:"seq_number"`
|
||||
Attnets string `json:"attnets"`
|
||||
SeqNumber string `json:"seq_number"`
|
||||
Attnets string `json:"attnets"`
|
||||
Syncnets string `json:"syncnets"`
|
||||
CustodyGroupCount string `json:"custody_group_count"`
|
||||
}
|
||||
|
||||
type GetPeerResponse struct {
|
||||
Data *Peer `json:"data"`
|
||||
}
|
||||
|
||||
// Added Meta to align with beacon-api: https://ethereum.github.io/beacon-APIs/#/Node/getPeers
|
||||
type Meta struct {
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type GetPeersResponse struct {
|
||||
Data []*Peer `json:"data"`
|
||||
Meta Meta `json:"meta"`
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
|
||||
@@ -253,7 +253,7 @@ type PendingDeposit struct {
|
||||
}
|
||||
|
||||
type PendingPartialWithdrawal struct {
|
||||
Index string `json:"index"`
|
||||
Index string `json:"validator_index"`
|
||||
Amount string `json:"amount"`
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
@@ -219,4 +219,5 @@ type BeaconStateFulu struct {
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
func TestDebounce_NoEvents(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
wg := &sync.WaitGroup{}
|
||||
@@ -39,7 +39,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
|
||||
func TestDebounce_CtxClosing(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
wg := &sync.WaitGroup{}
|
||||
@@ -75,7 +75,7 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
|
||||
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
@@ -93,7 +93,7 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
|
||||
func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
|
||||
@@ -19,10 +19,10 @@ func RunEvery(ctx context.Context, period time.Duration, f func()) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.WithField("function", funcName).Trace("running")
|
||||
log.WithField("function", funcName).Trace("Running")
|
||||
f()
|
||||
case <-ctx.Done():
|
||||
log.WithField("function", funcName).Debug("context is closed, exiting")
|
||||
log.WithField("function", funcName).Debug("Context is closed, exiting")
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestEveryRuns(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
|
||||
i := int32(0)
|
||||
async.RunEvery(ctx, 100*time.Millisecond, func() {
|
||||
|
||||
@@ -4,6 +4,6 @@ This is the main project folder for the beacon chain implementation of Ethereum
|
||||
|
||||
You can also read our main [README](https://github.com/prysmaticlabs/prysm/blob/master/README.md) and join our active chat room on Discord.
|
||||
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
[](https://discord.gg/prysm)
|
||||
|
||||
Also, read the official beacon chain [specification](https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at Prysmatic Labs.
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"setup_forchoice.go",
|
||||
"tracked_proposer.go",
|
||||
@@ -50,6 +51,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -146,6 +148,7 @@ go_test(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
@@ -163,6 +166,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -177,6 +181,7 @@ go_test(
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -41,7 +41,7 @@ type ForkchoiceFetcher interface {
|
||||
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
|
||||
CachedHeadRoot() [32]byte
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(uint64)
|
||||
SetForkChoiceGenesisTime(time.Time)
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
@@ -514,7 +514,7 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
s.genesisTime = t.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
}
|
||||
|
||||
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -27,7 +28,7 @@ func (s *Service) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp time.Time) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -15,82 +11,71 @@ import (
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
}()
|
||||
s.HeadSlot()
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
s.head = &head{root: [32]byte{'A'}}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadRoot(context.Background())
|
||||
_, err = s.HeadRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
head: &head{block: wsb},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
s.head = &head{block: wsb}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadBlock(context.Background())
|
||||
_, err = s.HeadBlock(t.Context())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := s.cfg.BeaconDB
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
root := bytesutil.ToBytes32(bytesutil.PadTo([]byte{'s'}, 32))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(context.Background(), root))
|
||||
require.NoError(t, beaconDB.SaveState(context.Background(), st, root))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(t.Context(), root))
|
||||
require.NoError(t, beaconDB.SaveState(t.Context(), st, root))
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadState(context.Background())
|
||||
_, err = s.HeadState(t.Context())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
@@ -15,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -84,7 +84,7 @@ func prepareForkchoiceState(
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
headRoot, err := c.HeadRoot(context.Background())
|
||||
headRoot, err := c.HeadRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
|
||||
}
|
||||
@@ -137,8 +137,8 @@ func TestFinalizedBlockHash(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ctx := t.Context()
|
||||
service := testServiceWithDB(t)
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -153,7 +153,7 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
@@ -200,10 +200,10 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{block: wsb, state: s}
|
||||
|
||||
received, err := c.HeadBlock(context.Background())
|
||||
received, err := c.HeadBlock(t.Context())
|
||||
require.NoError(t, err)
|
||||
pb, err := received.Proto()
|
||||
require.NoError(t, err)
|
||||
@@ -213,15 +213,16 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
headState, err := c.HeadState(context.Background())
|
||||
headState, err := c.HeadState(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received")
|
||||
}
|
||||
|
||||
func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
c := &Service{genesisTime: time.Unix(999, 0)}
|
||||
c := testServiceNoDB(t)
|
||||
c.genesisTime = time.Unix(999, 0)
|
||||
wanted := time.Unix(999, 0)
|
||||
assert.Equal(t, wanted, c.GenesisTime(), "Did not get wanted genesis time")
|
||||
}
|
||||
@@ -230,7 +231,7 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
f := ðpb.Fork{Epoch: 999}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Fork: f})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
if !proto.Equal(c.CurrentFork(), f) {
|
||||
t.Error("Received incorrect fork version")
|
||||
@@ -242,7 +243,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
if !proto.Equal(c.CurrentFork(), f) {
|
||||
t.Error("Received incorrect fork version")
|
||||
}
|
||||
@@ -250,7 +251,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
|
||||
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
|
||||
// Should not panic if head state is nil.
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||
@@ -269,7 +270,7 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||
d := ðpb.Eth1Data{DepositCount: 999}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Eth1Data: d})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
if !proto.Equal(c.HeadETH1Data(), d) {
|
||||
t.Error("Received incorrect eth1 data")
|
||||
@@ -277,7 +278,7 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsCanonical_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
|
||||
@@ -298,22 +299,22 @@ func TestIsCanonical_Ok(t *testing.T) {
|
||||
|
||||
func TestService_HeadValidatorsIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
|
||||
c.head = &head{}
|
||||
indices, err := c.HeadValidatorsIndices(context.Background(), 0)
|
||||
indices, err := c.HeadValidatorsIndices(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(indices))
|
||||
|
||||
c.head = &head{state: s}
|
||||
indices, err = c.HeadValidatorsIndices(context.Background(), 0)
|
||||
indices, err = c.HeadValidatorsIndices(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(indices))
|
||||
}
|
||||
|
||||
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
|
||||
c.head = &head{}
|
||||
root := c.HeadGenesisValidatorsRoot()
|
||||
@@ -331,8 +332,8 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
// ---------- D
|
||||
|
||||
func TestService_ChainHeads(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -366,7 +367,7 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
_, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
@@ -381,7 +382,7 @@ func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = nil
|
||||
|
||||
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
@@ -396,10 +397,10 @@ func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
||||
|
||||
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
|
||||
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
@@ -409,15 +410,15 @@ func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = nil
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
|
||||
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
|
||||
|
||||
c.head = &head{state: nil}
|
||||
p, err = c.HeadValidatorIndexToPublicKey(context.Background(), 0)
|
||||
p, err = c.HeadValidatorIndexToPublicKey(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
|
||||
}
|
||||
@@ -428,10 +429,12 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := testServiceWithDB(t)
|
||||
c.SetGenesisTime(time.Now())
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
@@ -450,22 +453,24 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
// If head is nil, for some reason, an error should be returned rather than panic.
|
||||
c = &Service{}
|
||||
c = testServiceNoDB(t)
|
||||
_, err = c.IsOptimistic(ctx)
|
||||
require.ErrorIs(t, err, ErrNilHead)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{genesisTime: time.Now()}
|
||||
ctx := t.Context()
|
||||
c := testServiceNoDB(t)
|
||||
c.genesisTime = time.Now()
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -481,28 +486,29 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, optimisticBlock)
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, validatedBlock)
|
||||
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
@@ -524,48 +530,48 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
// Before the first finalized epoch, finalized root could be zeros.
|
||||
validatedCheckpoint = ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, optimisticBlock)
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, validatedBlock)
|
||||
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, validated)
|
||||
@@ -573,15 +579,15 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, [32]byte{}))
|
||||
_, err = c.IsOptimisticForRoot(ctx, br)
|
||||
assert.NoError(t, err)
|
||||
@@ -593,9 +599,9 @@ func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsFinalized(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
r1 := [32]byte{'a'}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Root: r1,
|
||||
@@ -616,9 +622,10 @@ func TestService_IsFinalized(t *testing.T) {
|
||||
|
||||
func Test_hashForGenesisRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 10)
|
||||
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
|
||||
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
|
||||
root, err := beaconDB.GenesisBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,7 +2,6 @@ package blockchain
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -17,9 +16,6 @@ var stateDefragmentationTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
// a higher number of fragmented indexes are reallocated to a new separate slice for
|
||||
// that field.
|
||||
func (s *Service) defragmentState(st state.BeaconState) {
|
||||
if !features.Get().EnableExperimentalState {
|
||||
return
|
||||
}
|
||||
startTime := time.Now()
|
||||
st.Defragment()
|
||||
elapsedTime := time.Since(startTime)
|
||||
|
||||
@@ -40,10 +40,12 @@ var (
|
||||
errNotGenesisRoot = errors.New("root is not the genesis block root")
|
||||
// errBlacklistedBlock is returned when a block is blacklisted as invalid.
|
||||
errBlacklistedRoot = verification.AsVerificationFailure(errors.New("block root is blacklisted"))
|
||||
// errMaxBlobsExceeded is returned when the number of blobs in a block exceeds the maximum allowed.
|
||||
errMaxBlobsExceeded = verification.AsVerificationFailure(errors.New("expected commitments in block exceeds MAX_BLOBS_PER_BLOCK"))
|
||||
// errMaxDataColumnsExceeded is returned when the number of data columns exceeds the maximum allowed.
|
||||
errMaxDataColumnsExceeded = verification.AsVerificationFailure(errors.New("expected data columns for node exceeds NUMBER_OF_COLUMNS"))
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = verification.AsVerificationFailure(errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK"))
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
// Some examples of invalid blocks are:
|
||||
|
||||
@@ -141,7 +141,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, r, b, st); err != nil {
|
||||
log.WithError(err).Error("could not save head after pruning invalid blocks")
|
||||
log.WithError(err).Error("Could not save head after pruning invalid blocks")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -354,7 +354,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
t, err := slots.StartTime(s.genesisTime, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timestamp to get payload attribute")
|
||||
return emptyAttri
|
||||
@@ -439,6 +439,9 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
|
||||
// Blobs may not exist for some blocks, leading to deletion failures. Log such errors at debug level.
|
||||
log.WithError(err).Debug("Could not remove blob from blob storage")
|
||||
}
|
||||
if err := s.dataColumnStorage.Remove(root); err != nil {
|
||||
log.WithError(err).Errorf("Could not remove data columns from data column storage for root %#x", root)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -309,6 +310,7 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
block: wba,
|
||||
}
|
||||
|
||||
genesis.StoreStateDuringTest(t, st)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
a := &fcuConfig{
|
||||
@@ -403,6 +405,7 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
|
||||
@@ -76,14 +76,14 @@ func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fc
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not compute payload attributes")
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice with payload attributes for proposal")
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
|
||||
@@ -114,7 +114,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
currentSlot := s.CurrentSlot()
|
||||
if proposingSlot == currentSlot {
|
||||
@@ -132,17 +132,17 @@ func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitiv
|
||||
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
|
||||
return true
|
||||
}
|
||||
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
|
||||
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not compute seconds since slot start")
|
||||
log.WithError(err).Error("Could not compute seconds since slot start")
|
||||
}
|
||||
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
if sss >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
|
||||
doublylinkedtree.ProcessAttestationsThreshold)
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
"sinceSlotStart": sss,
|
||||
"threshold": doublylinkedtree.ProcessAttestationsThreshold,
|
||||
}).Info("Attempted late block reorg aborted due to attestations after threshold")
|
||||
lateBlockFailedAttemptFirstThreshold.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -36,29 +35,29 @@ func TestService_isNewHead(t *testing.T) {
|
||||
func TestService_getHeadStateAndBlock(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
_, _, err := service.getStateAndBlock(context.Background(), [32]byte{})
|
||||
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{})
|
||||
require.ErrorContains(t, "block does not exist", err)
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{Signature: []byte{1}}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), blk))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(t.Context(), blk))
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), st, r))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), st, r))
|
||||
|
||||
gotState, err := service.cfg.BeaconDB.State(context.Background(), r)
|
||||
gotState, err := service.cfg.BeaconDB.State(t.Context(), r)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, st.ToProto(), gotState.ToProto())
|
||||
|
||||
gotBlk, err := service.cfg.BeaconDB.Block(context.Background(), r)
|
||||
gotBlk, err := service.cfg.BeaconDB.Block(t.Context(), r)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, blk, gotBlk)
|
||||
}
|
||||
|
||||
func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
@@ -161,6 +160,7 @@ func TestShouldOverrideFCU(t *testing.T) {
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
fcs.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
headRoot := [32]byte{'b'}
|
||||
parentRoot := [32]byte{'a'}
|
||||
ojc := ðpb.Checkpoint{}
|
||||
@@ -181,11 +181,12 @@ func TestShouldOverrideFCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, headRoot, head)
|
||||
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
|
||||
wantLog := "aborted due to attestations after threshold"
|
||||
fcs.SetGenesisTime(time.Now().Add(-29 * time.Second))
|
||||
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsDoNotContain(t, hook, "10 seconds")
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
|
||||
require.LogsDoNotContain(t, hook, wantLog)
|
||||
fcs.SetGenesisTime(time.Now().Add(-24 * time.Second))
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
|
||||
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsContain(t, hook, "10 seconds")
|
||||
require.LogsContain(t, hook, wantLog)
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
oldHeadRoot := bytesutil.ToBytes32(r)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if node is optimistically synced")
|
||||
log.WithError(err).Error("Could not check if node is optimistically synced")
|
||||
}
|
||||
if headBlock.Block().ParentRoot() != oldHeadRoot {
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
@@ -111,11 +111,11 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
|
||||
@@ -18,9 +18,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Initialize the state cache for sync committees.
|
||||
var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
||||
|
||||
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
|
||||
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
|
||||
type HeadSyncCommitteeFetcher interface {
|
||||
@@ -143,7 +140,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
|
||||
defer mLock.Unlock()
|
||||
|
||||
// If there's already a head state exists with the request slot, we don't need to process slots.
|
||||
cachedState, err := syncCommitteeHeadStateCache.Get(slot)
|
||||
cachedState, err := s.syncCommitteeHeadState.Get(slot)
|
||||
switch {
|
||||
case err == nil:
|
||||
syncHeadStateHit.Inc()
|
||||
@@ -166,7 +163,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
|
||||
return nil, err
|
||||
}
|
||||
syncHeadStateMiss.Inc()
|
||||
err = syncCommitteeHeadStateCache.Put(slot, headState)
|
||||
err = s.syncCommitteeHeadState.Put(slot, headState)
|
||||
return headState, err
|
||||
default:
|
||||
// In the event, we encounter another error
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
dbTest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -16,35 +14,35 @@ import (
|
||||
|
||||
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Current period
|
||||
slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
a, err := c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2
|
||||
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
b, err := c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, a, b)
|
||||
|
||||
// Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1
|
||||
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
b, err = c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, a, b)
|
||||
}
|
||||
|
||||
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
indices, err := c.headCurrentSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
|
||||
@@ -53,12 +51,12 @@ func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
indices, err := c.headNextSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
@@ -67,12 +65,12 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
|
||||
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), primitives.Slot(slot), 0)
|
||||
pubkeys, err := c.HeadSyncCommitteePubKeys(t.Context(), primitives.Slot(slot), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Any subcommittee should match the subcommittee size.
|
||||
@@ -82,13 +80,13 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
|
||||
d, err := c.HeadSyncCommitteeDomain(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
@@ -96,13 +94,13 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
|
||||
d, err := c.HeadSyncContributionProofDomain(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
@@ -110,30 +108,27 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
|
||||
d, err := c.HeadSyncSelectionProofDomain(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
|
||||
c := syncCommitteeHeadStateCache
|
||||
t.Cleanup(func() {
|
||||
syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
||||
})
|
||||
s := testServiceNoDB(t)
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
|
||||
require.NoError(t, beaconState.SetSlot(100))
|
||||
cachedState, err := c.Get(101)
|
||||
cachedState, err := s.syncCommitteeHeadState.Get(101)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
require.Equal(t, nil, cachedState)
|
||||
require.NoError(t, c.Put(101, beaconState))
|
||||
cachedState, err = c.Get(101)
|
||||
require.NoError(t, s.syncCommitteeHeadState.Put(101, beaconState))
|
||||
cachedState, err = s.syncCommitteeHeadState.Get(101)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, beaconState, cachedState)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -34,17 +33,17 @@ func TestSaveHead_Same(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.saveHead(context.Background(), r, b, st))
|
||||
require.NoError(t, service.saveHead(t.Context(), r, b, st))
|
||||
assert.Equal(t, primitives.Slot(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
|
||||
}
|
||||
|
||||
func TestSaveHead_Different(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -61,7 +60,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
newHeadSignedBlock.Block.Slot = 1
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
@@ -74,13 +73,13 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState))
|
||||
|
||||
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
cachedRoot, err := service.HeadRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change")
|
||||
headBlock, err := service.headBlock()
|
||||
@@ -92,12 +91,12 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
hook := logTest.NewGlobal()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -120,7 +119,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
@@ -129,13 +128,13 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState))
|
||||
|
||||
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
cachedRoot, err := service.HeadRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
if !bytes.Equal(cachedRoot, newRoot[:]) {
|
||||
t.Error("Head did not change")
|
||||
@@ -154,20 +153,16 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
t.Run("genesis_state_root", func(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
srv := &Service{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
originBlockRoot: [32]byte{1},
|
||||
}
|
||||
st, blk, err := prepareForkchoiceState(context.Background(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
srv.originBlockRoot = [32]byte{1}
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(context.Background(), st, blk))
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
require.NoError(t, srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:]))
|
||||
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), 1, bState, newHeadStateRoot[:], newHeadRoot[:]))
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
|
||||
@@ -185,18 +180,14 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
})
|
||||
t.Run("non_genesis_values", func(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
genesisRoot := [32]byte{1}
|
||||
srv := &Service{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
originBlockRoot: genesisRoot,
|
||||
}
|
||||
st, blk, err := prepareForkchoiceState(context.Background(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
srv.originBlockRoot = genesisRoot
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(context.Background(), st, blk))
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
epoch1Start, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
epoch2Start, err := slots.EpochStart(1)
|
||||
@@ -205,7 +196,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
err = srv.notifyNewHeadEvent(context.Background(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
err = srv.notifyNewHeadEvent(t.Context(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
require.NoError(t, err)
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
@@ -225,11 +216,11 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.head = &head{
|
||||
@@ -243,7 +234,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
@@ -256,9 +247,9 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState))
|
||||
|
||||
rOnlyState, err := service.HeadStateReadOnly(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -267,7 +258,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
@@ -333,7 +324,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAttsElectra(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
@@ -404,10 +395,10 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
config.ShardCommitteePeriod = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
@@ -481,7 +472,7 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.cfg.BLSToExecPool = blstoexec.NewPool()
|
||||
@@ -539,7 +530,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
@@ -604,7 +595,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
@@ -11,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestService_getBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := setupBeaconChain(t, beaconDB)
|
||||
b1 := util.NewBeaconBlock()
|
||||
@@ -42,7 +41,7 @@ func TestService_getBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_hasBlockInInitSyncOrDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := setupBeaconChain(t, beaconDB)
|
||||
b1 := util.NewBeaconBlock()
|
||||
|
||||
@@ -29,8 +29,8 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//crypto/random:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/random"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
)
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
blob := util.GetRandBlob(123)
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
expectedCommitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
|
||||
|
||||
@@ -26,9 +26,6 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
if len(b.Body().Attestations()) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||
}
|
||||
if len(b.Body().Deposits()) > 0 {
|
||||
log = log.WithField("deposits", len(b.Body().Deposits()))
|
||||
}
|
||||
if len(b.Body().AttesterSlashings()) > 0 {
|
||||
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
|
||||
}
|
||||
@@ -89,8 +86,8 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.ToTime(genesisTime, block.Slot())
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -111,7 +108,6 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
@@ -159,7 +155,9 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
if len(changes) > 0 {
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
}
|
||||
}
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
|
||||
@@ -53,7 +53,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has attester slashing",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -93,7 +93,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has payload",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -9,23 +8,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 0}))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
err = reportEpochMetrics(t.Context(), s, h)
|
||||
require.ErrorContains(t, "attestation with inclusion delay of 0", err)
|
||||
}
|
||||
|
||||
@@ -36,6 +25,6 @@ func TestReportEpochMetrics_SlashedValidatorOutOfBound(t *testing.T) {
|
||||
v.Slashed = true
|
||||
require.NoError(t, h.UpdateValidatorAtIndex(0, v))
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 1, Data: util.HydrateAttestationData(ð.AttestationData{})}))
|
||||
err = reportEpochMetrics(context.Background(), h, h)
|
||||
err = reportEpochMetrics(t.Context(), h, h)
|
||||
require.ErrorContains(t, "slot 0 out of bounds", err)
|
||||
}
|
||||
|
||||
@@ -8,9 +8,10 @@ import (
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
func testServiceOptsWithDB(t testing.TB) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
cs := startup.NewClockSynchronizer()
|
||||
@@ -31,3 +32,15 @@ func testServiceOptsNoDB() []Option {
|
||||
cs := startup.NewClockSynchronizer()
|
||||
return []Option{WithClockSynchronizer(cs)}
|
||||
}
|
||||
|
||||
func testServiceNoDB(t testing.TB) *Service {
|
||||
s, err := NewService(t.Context(), testServiceOptsNoDB()...)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
func testServiceWithDB(t testing.TB) *Service {
|
||||
s, err := NewService(t.Context(), testServiceOptsWithDB(t)...)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
@@ -30,6 +33,14 @@ func WithMaxGoroutines(x int) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithLCStore for light client store access.
|
||||
func WithLCStore() Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithWeakSubjectivityCheckpoint for checkpoint sync.
|
||||
func WithWeakSubjectivityCheckpoint(c *ethpb.Checkpoint) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -127,9 +138,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Accessor) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -208,6 +219,15 @@ func WithBlobStorage(b *filesystem.BlobStorage) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the data column storage backend for the blockchain service.
|
||||
func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.dataColumnStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSyncChecker sets the sync checker for the blockchain service.
|
||||
func WithSyncChecker(checker Checker) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.SyncChecker = checker
|
||||
@@ -215,6 +235,15 @@ func WithSyncChecker(checker Checker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustodyInfo sets the custody info for the blockchain service.
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.CustodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled sets whether the slasher is enabled or not.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
s.slasherEnabled = enabled
|
||||
@@ -222,9 +251,27 @@ func WithSlasherEnabled(enabled bool) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithGenesisTime sets the genesis time for the blockchain service.
|
||||
func WithGenesisTime(genesisTime time.Time) Option {
|
||||
return func(s *Service) error {
|
||||
s.genesisTime = genesisTime.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLightClientStore sets the light client store for the blockchain service.
|
||||
func WithLightClientStore(lcs *lightclient.Store) Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lcs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithStartWaitingDataColumnSidecars sets a channel that the `areDataColumnsAvailable` function will fill
|
||||
// in when starting to wait for additional data columns.
|
||||
func WithStartWaitingDataColumnSidecars(c chan bool) Option {
|
||||
return func(s *Service) error {
|
||||
s.startWaitingDataColumnSidecars = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -59,10 +60,8 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
return err
|
||||
}
|
||||
|
||||
genesisTime := uint64(s.genesisTime.Unix())
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Add(disparity).Unix()), tgt); err != nil {
|
||||
if err := verifyAttTargetEpoch(ctx, s.genesisTime, time.Now().Add(disparity), tgt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -75,7 +74,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := slots.VerifyTime(genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
if err := slots.VerifyTime(s.genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -88,7 +87,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := attestation.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
|
||||
if err := attestation.IsValidAttestationIndices(ctx, indexedAtt, params.BeaconConfig().MaxValidatorsPerCommittee, params.BeaconConfig().MaxCommitteesPerSlot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -139,8 +139,8 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := primitives.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
||||
func verifyAttTargetEpoch(_ context.Context, genesis, now time.Time, c *ethpb.Checkpoint) error {
|
||||
currentSlot := slots.At(genesis, now)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
var prevEpoch primitives.Epoch
|
||||
// Prevents previous epoch under flow
|
||||
|
||||
@@ -161,7 +161,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
func TestService_GetRecentPreState(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -183,7 +183,7 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
|
||||
func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -353,29 +353,29 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
|
||||
nowTime := time.Unix(int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
nowTime := time.Unix(int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err := verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
|
||||
nowTime := time.Unix(2*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
err := verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
|
||||
assert.ErrorContains(t, "target epoch 0 does not match current epoch 2 or prev epoch 1", err)
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -385,7 +385,7 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
@@ -402,7 +402,7 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
|
||||
@@ -3,10 +3,12 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
@@ -70,8 +72,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
defer s.saveLightClientUpdate(cfg)
|
||||
defer s.saveLightClientBootstrap(cfg)
|
||||
}
|
||||
defer s.sendStateFeedOnBlock(cfg)
|
||||
defer reportProcessingTime(startTime)
|
||||
@@ -239,8 +239,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
@@ -328,7 +329,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
// The latest block header is from the previous epoch
|
||||
r, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
// The proposer indices cache takes the target root for the previous
|
||||
@@ -338,12 +339,12 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
err = helpers.UpdateCachedCheckpointToStateRoot(st, &forkchoicetypes.Checkpoint{Epoch: e, Root: target})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -561,7 +562,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||
func (s *Service) runLateBlockTasks() {
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("failed to wait for initial sync")
|
||||
log.WithError(err).Error("Failed to wait for initial sync")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -578,12 +579,12 @@ func (s *Service) runLateBlockTasks() {
|
||||
}
|
||||
}
|
||||
|
||||
// missingIndices uses the expected commitments from the block to determine
|
||||
// missingBlobIndices uses the expected commitments from the block to determine
|
||||
// which BlobSidecar indices would need to be in the database for DA success.
|
||||
// It returns a map where each key represents a missing BlobSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// BlobSidecars against the set of known missing sidecars.
|
||||
func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte, slot primitives.Slot) (map[uint64]struct{}, error) {
|
||||
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
@@ -592,29 +593,238 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for i := range expected {
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
missing[uint64(i)] = struct{}{}
|
||||
missing[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
// missingDataColumnIndices uses the expected data columns from the block to determine
|
||||
// which DataColumnSidecar indices would need to be in the database for DA success.
|
||||
// It returns a map where each key represents a missing DataColumnSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// DataColumns against the set of known missing sidecars.
|
||||
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
if uint64(len(expected)) > numberOfColumns {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
for column := range expected {
|
||||
if !summary.HasIndex(column) {
|
||||
missing[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all sidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the sidecar notifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
signedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) error {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
return s.areBlobsAvailable(ctx, root, block)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.CustodyInfo.Mut.RLock()
|
||||
defer s.cfg.CustodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Subscribe to newly data columns stored in the database.
|
||||
subscription, identsChan := s.dataColumnStorage.Subscribe()
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if storedDataColumnsCount >= minimumColumnCountToReconstruct {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.startWaitingDataColumnSidecars != nil {
|
||||
s.startWaitingDataColumnSidecars <- true
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine slot start time: %w", err)
|
||||
}
|
||||
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
timer := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(peerInfo.CustodyColumns))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
}).Warning("Data columns still missing at slot end")
|
||||
})
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case idents := <-identsChan:
|
||||
if idents.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
for _, index := range idents.Indices {
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[index]; ok {
|
||||
storedDataColumnsCount++
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if storedDataColumnsCount >= minimumColumnCountToReconstruct {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, index)
|
||||
|
||||
// Return if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// areBlobsAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
blockSlot := block.Slot()
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
@@ -634,9 +844,9 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
return nil
|
||||
}
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
missing, err := missingBlobIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -648,15 +858,23 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
nc := s.blobNotifiers.forRoot(root, block.Slot())
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine slot start time: %w", err)
|
||||
}
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockSlot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -678,13 +896,14 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
@@ -693,7 +912,7 @@ func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int
|
||||
// it also updates the next slot cache and the proposer index cache to deal with skipped slots.
|
||||
func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
currentSlot := s.CurrentSlot()
|
||||
if s.CurrentSlot() == s.HeadSlot() {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
@@ -714,10 +933,10 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -731,7 +950,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if attribute.IsEmpty() {
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("unable to retrieve head block to fire payload attributes event")
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
@@ -770,7 +989,7 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [fieldparams.RootLength]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
}
|
||||
|
||||
// getFCUArgs returns the arguments to call forkchoice update
|
||||
@@ -45,7 +45,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
@@ -68,11 +68,11 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
@@ -131,6 +131,12 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
if err := s.processLightClientUpdate(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client update")
|
||||
}
|
||||
if err := s.processLightClientBootstrap(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client bootstrap")
|
||||
}
|
||||
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client optimistic update")
|
||||
}
|
||||
@@ -139,38 +145,33 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
// saveLightClientUpdate saves the light client update for this block
|
||||
// processLightClientUpdate saves the light client update for this block
|
||||
// if it's better than the already saved one, when feature flag is enabled.
|
||||
func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
|
||||
func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
attestedRoot := cfg.roblock.Block().ParentRoot()
|
||||
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Saving light client update failed: Could not get attested block for root %#x", attestedRoot)
|
||||
return
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
if attestedBlock == nil || attestedBlock.IsNil() {
|
||||
log.Error("Saving light client update failed: Attested block is nil")
|
||||
return
|
||||
return errors.New("attested block is nil")
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Saving light client update failed: Could not get attested state for root %#x", attestedRoot)
|
||||
return
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
if attestedState == nil || attestedState.IsNil() {
|
||||
log.Error("Saving light client update failed: Attested state is nil")
|
||||
return
|
||||
return errors.New("attested state is nil")
|
||||
}
|
||||
|
||||
finalizedRoot := attestedState.FinalizedCheckpoint().Root
|
||||
finalizedBlock, err := s.getBlock(cfg.ctx, [32]byte(finalizedRoot))
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping saving light client update: Finalized block is nil for root %#x", finalizedRoot)
|
||||
} else {
|
||||
log.WithError(err).Errorf("Saving light client update failed: Could not get finalized block for root %#x", finalizedRoot)
|
||||
log.Debugf("Skipping saving light client update because finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
}
|
||||
return
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientUpdateFromBeaconState(
|
||||
@@ -183,57 +184,52 @@ func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
|
||||
finalizedBlock,
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Saving light client update failed: Could not create light client update")
|
||||
return
|
||||
return errors.Wrapf(err, "could not create light client update")
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(attestedState.Slot()))
|
||||
|
||||
oldUpdate, err := s.cfg.BeaconDB.LightClientUpdate(cfg.ctx, period)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Saving light client update failed: Could not get current light client update")
|
||||
return
|
||||
return errors.Wrapf(err, "could not get current light client update")
|
||||
}
|
||||
|
||||
if oldUpdate == nil {
|
||||
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
|
||||
log.WithError(err).Error("Saving light client update failed: Could not save light client update")
|
||||
} else {
|
||||
log.WithField("period", period).Debug("Saving light client update: Saved new update")
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
return
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
|
||||
isNewUpdateBetter, err := lightclient.IsBetterUpdate(update, oldUpdate)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Saving light client update failed: Could not compare light client updates")
|
||||
return
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
|
||||
if isNewUpdateBetter {
|
||||
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
|
||||
log.WithError(err).Error("Saving light client update failed: Could not save light client update")
|
||||
} else {
|
||||
log.WithField("period", period).Debug("Saving light client update: Saved new update")
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
} else {
|
||||
log.WithField("period", period).Debug("Saving light client update: New update is not better than the current one. Skipping save.")
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveLightClientBootstrap saves a light client bootstrap for this block
|
||||
// processLightClientBootstrap saves a light client bootstrap for this block
|
||||
// when feature flag is enabled.
|
||||
func (s *Service) saveLightClientBootstrap(cfg *postBlockProcessConfig) {
|
||||
func (s *Service) processLightClientBootstrap(cfg *postBlockProcessConfig) error {
|
||||
blockRoot := cfg.roblock.Root()
|
||||
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(cfg.ctx, s.CurrentSlot(), cfg.postState, cfg.roblock)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Saving light client bootstrap failed: Could not create light client bootstrap")
|
||||
return
|
||||
return errors.Wrapf(err, "could not create light client bootstrap")
|
||||
}
|
||||
err = s.cfg.BeaconDB.SaveLightClientBootstrap(cfg.ctx, blockRoot[:], bootstrap)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Saving light client bootstrap failed: Could not save light client bootstrap in DB")
|
||||
if err := s.lcStore.SaveLightClientBootstrap(cfg.ctx, blockRoot, bootstrap); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client bootstrap")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientFinalityUpdate(
|
||||
@@ -309,6 +305,11 @@ func (s *Service) processLightClientFinalityUpdate(
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client finality update")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -358,6 +359,10 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client optimistic update")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -427,7 +432,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the future.
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
if err := slots.VerifyTime(s.genesisTime, b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -522,7 +527,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
// is meant to be asynchronous and run in the background rather than being
|
||||
// tied to the execution of a block.
|
||||
if err := s.cfg.StateGen.MigrateToCold(s.ctx, fRoot); err != nil {
|
||||
log.WithError(err).Error("could not migrate to cold")
|
||||
log.WithError(err).Error("Could not migrate to cold")
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
@@ -611,7 +616,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not fetch finalized state")
|
||||
log.WithError(err).Error("Could not fetch finalized state")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -629,7 +634,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not cast eth1 deposit index")
|
||||
log.WithError(err).Error("Could not cast eth1 deposit index")
|
||||
return
|
||||
}
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
@@ -638,12 +643,12 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
finalizedEth1DepIdx := eth1DepositIndex - 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx), common.Hash(finalizedState.Eth1Data().BlockHash),
|
||||
0 /* Setting a zero value as we have no access to block height */); err != nil {
|
||||
log.WithError(err).Error("could not insert finalized deposits")
|
||||
log.WithError(err).Error("Could not insert finalized deposits")
|
||||
return
|
||||
}
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
|
||||
log.WithError(err).Error("could not prune deposit proofs")
|
||||
log.WithError(err).Error("Could not prune deposit proofs")
|
||||
}
|
||||
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
|
||||
// to the provided eth1 deposit index.
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations/kv"
|
||||
mockp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -33,6 +35,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -49,7 +52,7 @@ import (
|
||||
)
|
||||
|
||||
func Test_pruneAttsFromPool_Electra(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
logHook := logTest.NewGlobal()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
@@ -57,11 +60,8 @@ func Test_pruneAttsFromPool_Electra(t *testing.T) {
|
||||
cfg.TargetCommitteeSize = 8
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
s := Service{
|
||||
cfg: &config{
|
||||
AttPool: kv.NewAttCaches(),
|
||||
},
|
||||
}
|
||||
s := testServiceNoDB(t)
|
||||
s.cfg.AttPool = kv.NewAttCaches()
|
||||
|
||||
data := ðpb.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
@@ -239,7 +239,7 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
t.Context(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -282,7 +282,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
t.Context(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -292,7 +292,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
|
||||
for i, rt := range wantedRoots {
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
|
||||
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
|
||||
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(t.Context(), bytesutil.ToBytes32(rt)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -338,7 +338,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
// Set finalized epoch to 2.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
t.Context(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 1 node: block 65
|
||||
@@ -371,7 +371,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
t.Context(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
@@ -449,33 +449,34 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(beaconBlock)
|
||||
require.NoError(t, err)
|
||||
if err := beaconDB.SaveBlock(context.Background(), wsb); err != nil {
|
||||
if err := beaconDB.SaveBlock(t.Context(), wsb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
|
||||
if err := beaconDB.SaveState(t.Context(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save state")
|
||||
}
|
||||
}
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), r1); err != nil {
|
||||
if err := beaconDB.SaveState(t.Context(), st.Copy(), r1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), r7); err != nil {
|
||||
if err := beaconDB.SaveState(t.Context(), st.Copy(), r7); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), r8); err != nil {
|
||||
if err := beaconDB.SaveState(t.Context(), st.Copy(), r8); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
|
||||
}
|
||||
|
||||
func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
svc := Service{genesisTime: prysmTime.Now().Add(1 * time.Hour)}
|
||||
svc := testServiceNoDB(t)
|
||||
svc.genesisTime = prysmTime.Now().Add(1 * time.Hour)
|
||||
|
||||
slot := svc.CurrentSlot()
|
||||
require.Equal(t, primitives.Slot(0), slot, "Unexpected slot")
|
||||
}
|
||||
func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -508,18 +509,18 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
beaconBlock := util.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, beaconBlock)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, beaconBlock)
|
||||
}
|
||||
|
||||
// Slots 100 to 200 are skip slots. Requesting root at 150 will yield root at 100. The last physical block.
|
||||
r, err := service.Ancestor(context.Background(), r200[:], 150)
|
||||
r, err := service.Ancestor(t.Context(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r100 {
|
||||
t.Error("Did not get correct root")
|
||||
}
|
||||
|
||||
// Slots 1 to 100 are skip slots. Requesting root at 50 will yield root at 1. The last physical block.
|
||||
r, err = service.Ancestor(context.Background(), r200[:], 50)
|
||||
r, err = service.Ancestor(t.Context(), r200[:], 50)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r1 {
|
||||
t.Error("Did not get correct root")
|
||||
@@ -527,7 +528,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -555,12 +556,12 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(t.Context(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
}
|
||||
|
||||
r, err := service.Ancestor(context.Background(), r200[:], 150)
|
||||
r, err := service.Ancestor(t.Context(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r100 {
|
||||
t.Error("Did not get correct root")
|
||||
@@ -592,14 +593,14 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
beaconBlock := util.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, beaconBlock)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, beaconBlock)
|
||||
}
|
||||
|
||||
st, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(t.Context(), 200, r200, r200, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
r, err := service.Ancestor(context.Background(), r200[:], 150)
|
||||
r, err := service.Ancestor(t.Context(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r100 {
|
||||
t.Error("Did not get correct root")
|
||||
@@ -607,7 +608,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -621,7 +622,7 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -920,7 +921,7 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
@@ -933,7 +934,7 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
service.pruneAttsFromPool(context.Background(), nil /* state not needed pre-Electra */, wsb)
|
||||
service.pruneAttsFromPool(t.Context(), nil /* state not needed pre-Electra */, wsb)
|
||||
require.LogsDoNotContain(t, logHook, "Could not prune attestations")
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
@@ -1357,7 +1358,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1378,7 +1379,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1399,7 +1400,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1548,7 +1549,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1568,7 +1569,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1589,7 +1590,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1740,7 +1741,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1761,7 +1762,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1812,7 +1813,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
// import blocks 13 through 18 to justify 12
|
||||
invalidRoots := make([][32]byte, 19-13)
|
||||
for i := 13; i < 19; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -1907,7 +1908,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// Import blocks 21--30 (Epoch 3 was not enough to justify 2)
|
||||
for i := 21; i < 30; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
@@ -1980,20 +1981,22 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
genesisState, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
gb := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(gb)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
genesisRoot, err := gb.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
genesis.StoreStateDuringTest(t, genesisState)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -2013,7 +2016,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -2062,7 +2065,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
// import blocks 13 through 18 to justify 12
|
||||
for i := 13; i < 19; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -2305,6 +2308,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
service, tr := minimalTestService(t)
|
||||
service.SetGenesisTime(time.Now())
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
@@ -2317,26 +2321,29 @@ func TestFillMissingBlockPayloadId_PrepareAllPayloads(t *testing.T) {
|
||||
defer resetCfg()
|
||||
|
||||
service, tr := minimalTestService(t)
|
||||
service.SetGenesisTime(time.Now())
|
||||
service.SetForkChoiceGenesisTime(time.Now())
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot, delay int64) {
|
||||
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) + delay
|
||||
newTime := time.Unix(time.Now().Unix()-offset, 0)
|
||||
s.SetGenesisTime(newTime)
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(newTime.Unix()))
|
||||
func driftGenesisTime(s *Service, slot primitives.Slot, delay time.Duration) {
|
||||
now := time.Now()
|
||||
slotDuration := time.Duration(slot) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
genesis := now.Add(-slotDuration - delay)
|
||||
s.SetGenesisTime(genesis)
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(genesis)
|
||||
}
|
||||
|
||||
func TestMissingIndices(t *testing.T) {
|
||||
func TestMissingBlobIndices(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
expected [][]byte
|
||||
present []uint64
|
||||
result map[uint64]struct{}
|
||||
root [32]byte
|
||||
root [fieldparams.RootLength]byte
|
||||
err error
|
||||
}{
|
||||
{
|
||||
@@ -2394,7 +2401,7 @@ func TestMissingIndices(t *testing.T) {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
|
||||
missing, err := missingIndices(bs, c.root, c.expected, 0)
|
||||
missing, err := missingBlobIndices(bs, c.root, c.expected, 0)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
@@ -2402,9 +2409,70 @@ func TestMissingIndices(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(c.result), len(missing))
|
||||
for key := range c.result {
|
||||
m, ok := missing[key]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, c.result[key], m)
|
||||
require.Equal(t, true, missing[key])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMissingDataColumnIndices(t *testing.T) {
|
||||
countPlusOne := params.BeaconConfig().NumberOfColumns + 1
|
||||
tooManyColumns := make(map[uint64]bool, countPlusOne)
|
||||
for i := range countPlusOne {
|
||||
tooManyColumns[uint64(i)] = true
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
storedIndices []uint64
|
||||
input map[uint64]bool
|
||||
expected map[uint64]bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "zero len expected",
|
||||
input: map[uint64]bool{},
|
||||
},
|
||||
{
|
||||
name: "expected exceeds max",
|
||||
input: tooManyColumns,
|
||||
err: errMaxDataColumnsExceeded,
|
||||
},
|
||||
{
|
||||
name: "all missing",
|
||||
storedIndices: []uint64{},
|
||||
input: map[uint64]bool{0: true, 1: true, 2: true},
|
||||
expected: map[uint64]bool{0: true, 1: true, 2: true},
|
||||
},
|
||||
{
|
||||
name: "none missing",
|
||||
input: map[uint64]bool{0: true, 1: true, 2: true},
|
||||
expected: map[uint64]bool{},
|
||||
storedIndices: []uint64{0, 1, 2, 3, 4}, // Extra columns stored but not expected
|
||||
},
|
||||
{
|
||||
name: "some missing",
|
||||
storedIndices: []uint64{0, 20},
|
||||
input: map[uint64]bool{0: true, 10: true, 20: true, 30: true},
|
||||
expected: map[uint64]bool{10: true, 30: true},
|
||||
},
|
||||
}
|
||||
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
dcm, dcs := filesystem.NewEphemeralDataColumnStorageWithMocker(t)
|
||||
err := dcm.CreateFakeIndices(emptyRoot, 0, tc.storedIndices...)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the function
|
||||
actual, err := missingDataColumnIndices(dcs, emptyRoot, tc.input)
|
||||
require.ErrorIs(t, err, tc.err)
|
||||
|
||||
require.Equal(t, len(tc.expected), len(actual))
|
||||
for key := range tc.expected {
|
||||
require.Equal(t, true, actual[key])
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -2604,7 +2672,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set deadlined context when processing the block
|
||||
cancCtx, canc := context.WithCancel(context.Background())
|
||||
cancCtx, canc := context.WithCancel(t.Context())
|
||||
canc()
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
@@ -2643,7 +2711,7 @@ func fakeResult(missing []uint64) map[uint64]struct{} {
|
||||
return r
|
||||
}
|
||||
|
||||
func TestSaveLightClientUpdate(t *testing.T) {
|
||||
func TestProcessLightClientUpdate(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
@@ -2684,7 +2752,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
@@ -2733,13 +2801,13 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
@@ -2785,7 +2853,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
@@ -2800,7 +2868,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
@@ -2843,7 +2911,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
@@ -2891,13 +2959,13 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
@@ -2943,7 +3011,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
@@ -2958,7 +3026,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
@@ -3001,7 +3069,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
@@ -3049,13 +3117,13 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
@@ -3101,7 +3169,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
@@ -3116,7 +3184,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
@@ -3129,120 +3197,280 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
reset()
|
||||
}
|
||||
|
||||
func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
|
||||
s, tr := minimalTestService(t)
|
||||
s, tr := minimalTestService(t, WithLCStore())
|
||||
ctx := tr.ctx
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().VersionToForkEpochMap()[testVersion])*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
require.NoError(t, s.processLightClientBootstrap(cfg))
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.lcStore.LightClientBootstrap(ctx, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
stateRoot, err := l.State.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), testVersion)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testIsAvailableParams struct {
|
||||
options []Option
|
||||
blobKzgCommitmentsCount uint64
|
||||
columnsToSave []uint64
|
||||
}
|
||||
|
||||
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
|
||||
service, _ := minimalTestService(t, options...)
|
||||
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
|
||||
|
||||
err := service.saveGenesisData(ctx, genesisState)
|
||||
require.NoError(t, err)
|
||||
|
||||
conf := util.DefaultBlockGenConfig()
|
||||
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
|
||||
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := signedBeaconBlock.Block
|
||||
bodyRoot, err := block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(params.columnsToSave))
|
||||
for _, i := range params.columnsToSave {
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: block.Slot,
|
||||
ProposerIndex: block.ProposerIndex,
|
||||
ParentRoot: block.ParentRoot,
|
||||
StateRoot: block.StateRoot,
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParam)
|
||||
}
|
||||
|
||||
s.saveLightClientBootstrap(cfg)
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.cfg.BeaconDB.LightClientBootstrap(ctx, currentBlockRoot[:])
|
||||
err = dataColumnStorage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ctx, cancel, service, root, signed
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
t.Run("Fulu - out of retention window", func(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
stateRoot, err := l.State.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), version.Altair)
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Capella)
|
||||
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
s.saveLightClientBootstrap(cfg)
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.cfg.BeaconDB.LightClientBootstrap(ctx, currentBlockRoot[:])
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
stateRoot, err := l.State.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), version.Capella)
|
||||
})
|
||||
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Deneb)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
s.saveLightClientBootstrap(cfg)
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.cfg.BeaconDB.LightClientBootstrap(ctx, currentBlockRoot[:])
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
stateRoot, err := l.State.HashTreeRoot(ctx)
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), version.Deneb)
|
||||
})
|
||||
|
||||
reset()
|
||||
t.Run("Fulu - no missing data columns", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some initially missing data columns (no reconstruction)", func(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: []uint64{1, 17, 19, 75, 102, 117, 119}, // 119 is not needed, 42 and 87 are missing
|
||||
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, verifiedSidecarsWrongRoot := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
[]util.DataColumnParam{
|
||||
{Index: 42, Slot: slot + 1}, // Needed index, but not for this slot.
|
||||
})
|
||||
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Index: 87, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Needed index
|
||||
{Index: 1, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Not needed index
|
||||
{Index: 42, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Needed index
|
||||
})
|
||||
|
||||
go func() {
|
||||
<-startWaiting
|
||||
|
||||
err := service.dataColumnStorage.Save(verifiedSidecarsWrongRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.dataColumnStorage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some initially missing data columns (reconstruction)", func(t *testing.T) {
|
||||
const (
|
||||
missingColumns = uint64(2)
|
||||
cgc = 128
|
||||
)
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
|
||||
|
||||
for i := range minimumColumnsCountToReconstruct - missingColumns {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&custodyInfo), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnParams := make([]util.DataColumnParam, 0, missingColumns)
|
||||
for i := minimumColumnsCountToReconstruct - missingColumns; i < minimumColumnsCountToReconstruct; i++ {
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnParams = append(dataColumnParams, dataColumnParam)
|
||||
}
|
||||
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParams)
|
||||
|
||||
go func() {
|
||||
<-startWaiting
|
||||
|
||||
err := service.dataColumnStorage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some columns are definitively missing", func(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, cancel, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
go func() {
|
||||
<-startWaiting
|
||||
cancel()
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func setupLightClientTestRequirements(ctx context.Context, t *testing.T, s *Service, v int, options ...util.LightClientOption) (*util.TestLightClient, *postBlockProcessConfig) {
|
||||
@@ -3309,6 +3537,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
params.OverrideBeaconConfig(beaconCfg)
|
||||
|
||||
s, tr := minimalTestService(t)
|
||||
s.cfg.P2P = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
|
||||
testCases := []struct {
|
||||
@@ -3354,7 +3583,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
expectedVersion = version.Altair
|
||||
case 2:
|
||||
forkEpoch = uint64(params.BeaconConfig().BellatrixForkEpoch)
|
||||
expectedVersion = version.Altair
|
||||
expectedVersion = version.Bellatrix
|
||||
case 3:
|
||||
forkEpoch = uint64(params.BeaconConfig().CapellaForkEpoch)
|
||||
expectedVersion = version.Capella
|
||||
@@ -3363,7 +3592,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
expectedVersion = version.Deneb
|
||||
case 5:
|
||||
forkEpoch = uint64(params.BeaconConfig().ElectraForkEpoch)
|
||||
expectedVersion = version.Deneb
|
||||
expectedVersion = version.Electra
|
||||
default:
|
||||
t.Errorf("Unsupported fork version %s", version.String(testVersion))
|
||||
}
|
||||
@@ -3444,6 +3673,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
params.OverrideBeaconConfig(beaconCfg)
|
||||
|
||||
s, tr := minimalTestService(t)
|
||||
s.cfg.P2P = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
|
||||
testCases := []struct {
|
||||
@@ -3507,7 +3737,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
expectedVersion = version.Altair
|
||||
case 2:
|
||||
forkEpoch = uint64(params.BeaconConfig().BellatrixForkEpoch)
|
||||
expectedVersion = version.Altair
|
||||
expectedVersion = version.Bellatrix
|
||||
case 3:
|
||||
forkEpoch = uint64(params.BeaconConfig().CapellaForkEpoch)
|
||||
expectedVersion = version.Capella
|
||||
|
||||
@@ -43,7 +43,7 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
if err := slots.ValidateClock(ss, s.genesisTime); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
|
||||
@@ -69,7 +69,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
if s.genesisTime.IsZero() {
|
||||
@@ -103,7 +103,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("could not process new slot")
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
@@ -144,7 +144,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block")
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
return
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
@@ -161,7 +161,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.GetData().Slot + 1
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
|
||||
if err := slots.VerifyTime(s.genesisTime, nextSlot, disparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -32,7 +31,7 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
e := primitives.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
|
||||
_, err := service.AttestationTargetState(context.Background(), ðpb.Checkpoint{Epoch: e})
|
||||
_, err := service.AttestationTargetState(t.Context(), ðpb.Checkpoint{Epoch: e})
|
||||
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
|
||||
}
|
||||
|
||||
@@ -56,11 +55,11 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
a.Data.Target.Root = []byte{'c'}
|
||||
r33Root := r33.Root()
|
||||
a.Data.BeaconBlockRoot = r33Root[:]
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(t.Context(), a))
|
||||
|
||||
r32Root := r32.Root()
|
||||
a.Data.Target.Root = r32Root[:]
|
||||
err = service.VerifyLmdFfgConsistency(context.Background(), a)
|
||||
err = service.VerifyLmdFfgConsistency(t.Context(), a)
|
||||
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
|
||||
@@ -71,7 +70,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, genesisState.SetGenesisTime(time.Now().Add(-1*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -52,6 +53,13 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
|
||||
ReceiveDataColumns([]blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -74,6 +82,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
@@ -82,6 +91,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
@@ -97,10 +107,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
@@ -171,7 +183,7 @@ func (s *Service) updateCheckpoints(
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
|
||||
log.WithError(err).Error("could not report epoch metrics")
|
||||
log.WithError(err).Error("Could not report epoch metrics")
|
||||
}
|
||||
}
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {
|
||||
@@ -227,26 +239,34 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (time.Duration, error) {
|
||||
daStartTime := time.Now()
|
||||
if avs != nil {
|
||||
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability")
|
||||
}(time.Now())
|
||||
|
||||
if avs == nil {
|
||||
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
return daWaitedTime, nil
|
||||
|
||||
var rob blocks.ROBlock
|
||||
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
@@ -263,7 +283,7 @@ func (s *Service) reportPostBlockProcessing(
|
||||
// Log block sync status.
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -9,6 +8,7 @@ import (
|
||||
blockchainTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
func TestService_ReceiveBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
copiedGen := genesis.Copy()
|
||||
@@ -180,6 +180,19 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
func TestHandleDA(t *testing.T) {
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := minimalTestService(t)
|
||||
elapsed, err := s.handleDA(t.Context(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
s, tr := minimalTestService(t,
|
||||
@@ -215,7 +228,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot primitives.Slot) *ethpb.SignedBeaconBlock {
|
||||
@@ -280,23 +293,23 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
func TestService_HasBlock(t *testing.T) {
|
||||
s, _ := minimalTestService(t)
|
||||
r := [32]byte{'a'}
|
||||
if s.HasBlock(context.Background(), r) {
|
||||
if s.HasBlock(t.Context(), r) {
|
||||
t.Error("Should not have block")
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveInitSyncBlock(context.Background(), r, wsb))
|
||||
if !s.HasBlock(context.Background(), r) {
|
||||
require.NoError(t, s.saveInitSyncBlock(t.Context(), r, wsb))
|
||||
if !s.HasBlock(t.Context(), r) {
|
||||
t.Error("Should have block")
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
util.SaveBlock(t, context.Background(), s.cfg.BeaconDB, b)
|
||||
util.SaveBlock(t, t.Context(), s.cfg.BeaconDB, b)
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.HasBlock(context.Background(), r))
|
||||
require.Equal(t, true, s.HasBlock(t.Context(), r))
|
||||
s.blockBeingSynced.set(r)
|
||||
require.Equal(t, false, s.HasBlock(context.Background(), r))
|
||||
require.Equal(t, false, s.HasBlock(t.Context(), r))
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
@@ -305,7 +318,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
|
||||
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
@@ -316,19 +329,19 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
|
||||
assert.LogsContain(t, hook, "Exiting mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
s.genesisTime = time.Now()
|
||||
s.SetGenesisTime(time.Now())
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
@@ -336,8 +349,9 @@ func TestHandleCaches_EnablingLargeSize(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
helpers.ClearCache()
|
||||
require.NoError(t, s.handleCaches())
|
||||
assert.LogsContain(t, hook, "Expanding committee cache size")
|
||||
}
|
||||
@@ -443,7 +457,7 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
|
||||
headState, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
finalizedStRoot, err := headState.HashTreeRoot(context.Background())
|
||||
finalizedStRoot, err := headState.HashTreeRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
|
||||
25
beacon-chain/blockchain/receive_data_column.go
Normal file
25
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ReceiveDataColumns receives a batch of data columns.
|
||||
func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save(dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
// (It is only a wrapper around ReceiveDataColumns.)
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -46,26 +47,29 @@ import (
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
startWaitingDataColumnSidecars chan bool // for testing purposes only
|
||||
syncCommitteeHeadState *cache.SyncCommitteeHeadStateCache
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -81,7 +85,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Accessor
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -93,6 +97,7 @@ type config struct {
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
SyncChecker Checker
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// Checker is an interface used to determine if a node is in initial sync
|
||||
@@ -106,22 +111,26 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][]bool
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -132,7 +141,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -142,12 +153,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -173,17 +187,20 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
syncCommitteeHeadState: cache.NewSyncCommitteeHeadState(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
@@ -202,17 +219,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
defer s.removeStartupState()
|
||||
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.StartFromSavedState(saved); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := s.startFromExecutionChain(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
@@ -261,8 +270,11 @@ func (s *Service) Status() error {
|
||||
|
||||
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
|
||||
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if state.IsNil(saved) {
|
||||
return errors.New("Last finalized state at startup is nil")
|
||||
}
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
s.genesisTime = saved.GenesisTime()
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
|
||||
originRoot, err := s.originRootFromSavedState(s.ctx)
|
||||
@@ -352,62 +364,6 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startFromExecutionChain() error {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
return errors.New("not configured execution chain")
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case e := <-stateChannel:
|
||||
if e.Type == statefeed.ChainStarted {
|
||||
data, ok := e.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
s.onExecutionChainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state forRoot failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// onExecutionChainStart initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not initialize beacon chain")
|
||||
}
|
||||
// We start a counter to genesis, if needed.
|
||||
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not hash tree root genesis state")
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
|
||||
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
|
||||
}
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
@@ -418,7 +374,7 @@ func (s *Service) initializeBeaconChain(
|
||||
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisTime = genesisTime.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
@@ -479,7 +435,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "Could not set optimistic status of genesis block to false")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
|
||||
|
||||
if err := s.setHead(&head{
|
||||
genesisBlkRoot,
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -18,15 +16,12 @@ func init() {
|
||||
}
|
||||
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
}()
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -42,7 +43,7 @@ import (
|
||||
)
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
var web3Service *execution.Service
|
||||
var err error
|
||||
srv, endpoint, err := mockExecution.SetupRPCServer()
|
||||
@@ -51,6 +52,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
srv.Stop()
|
||||
})
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
mockTrie, err := trie.NewTrie(0)
|
||||
@@ -71,20 +73,22 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
DepositContainers: []*ethpb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err = execution.NewService(
|
||||
ctx,
|
||||
execution.WithDatabase(beaconDB),
|
||||
execution.WithHttpEndpoint(endpoint),
|
||||
execution.WithDepositContractAddress(common.Address{}),
|
||||
execution.WithDepositCache(depositCache),
|
||||
)
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
@@ -97,7 +101,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccessor{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
@@ -115,7 +119,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
|
||||
func TestChainStartStop_Initialized(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
@@ -127,7 +131,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -152,7 +156,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
|
||||
func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
@@ -164,7 +168,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
@@ -184,7 +188,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
bc := setupBeaconChain(t, beaconDB)
|
||||
var err error
|
||||
@@ -226,7 +230,7 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
@@ -238,7 +242,7 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -295,7 +299,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
r, err := c.HeadRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
if !bytes.Equal(headRoot[:], r) {
|
||||
t.Error("head slot incorrect")
|
||||
@@ -345,12 +349,8 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
fc := doublylinkedtree.New()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, fc), ForkChoiceStore: fc},
|
||||
}
|
||||
ctx := t.Context()
|
||||
s := testServiceWithDB(t)
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
r, err := blk.HashTreeRoot()
|
||||
@@ -370,11 +370,8 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
ctx := t.Context()
|
||||
s := testServiceWithDB(t)
|
||||
b := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -391,48 +388,21 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
s.initSyncBlocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
|
||||
bb := util.NewBeaconBlock()
|
||||
r, err := bb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveInitSyncBlock(ctx, r, wsb))
|
||||
require.NoError(t, s.saveInitSyncBlock(s.ctx, r, wsb))
|
||||
require.NoError(t, s.Stop())
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
mgs := &MockClockSetter{}
|
||||
service.clockSetter = mgs
|
||||
gt := time.Now()
|
||||
service.onExecutionChainStart(context.Background(), gt)
|
||||
gs, err := beaconDB.GenesisState(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, nil, gs)
|
||||
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
|
||||
var zero [32]byte
|
||||
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
|
||||
require.Equal(t, gt, mgs.G.GenesisTime())
|
||||
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
ctx := b.Context()
|
||||
s := testServiceWithDB(b)
|
||||
blk := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
@@ -447,11 +417,8 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
ctx := b.Context()
|
||||
s := testServiceWithDB(b)
|
||||
blk := util.NewBeaconBlock()
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
@@ -587,7 +554,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ func (s *Service) startupHeadRoot() [32]byte {
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
@@ -46,7 +46,7 @@ func (s *Service) startupHeadRoot() [32]byte {
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
@@ -64,16 +64,16 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
}
|
||||
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not get head block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("Head block is older than finalized block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
chain, err := s.buildForkchoiceChain(s.ctx, blk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
@@ -170,6 +170,6 @@ func (s *Service) setupForkchoiceCheckpoints() error {
|
||||
Root: fRoot}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "could not get head block root, starting with finalized block as head")
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
@@ -20,8 +21,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -46,6 +52,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccessor struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -66,6 +77,21 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastLightClientOptimisticUpdate(_ context.Context, _ interfaces.LightClientOptimisticUpdate) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfaces.LightClientFinalityUpdate) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar, _ ...chan<- bool) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
@@ -85,9 +111,11 @@ type testServiceRequirements struct {
|
||||
}
|
||||
|
||||
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
genesis := time.Now().Add(-1 * 4 * time.Duration(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().SecondsPerSlot)) * time.Second) // Genesis was 4 epochs ago.
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
fcs.SetGenesisTime(genesis)
|
||||
sg := stategen.New(beaconDB, fcs)
|
||||
notif := &mockBeaconNode{}
|
||||
fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot)
|
||||
@@ -121,9 +149,12 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithDepositCache(dc),
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccessor{}),
|
||||
WithLightClientStore(&lightclient.Store{}),
|
||||
WithGenesisTime(genesis),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -75,6 +75,7 @@ type ChainService struct {
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
DataColumns []blocks.VerifiedRODataColumn
|
||||
TargetRoot [32]byte
|
||||
MockHeadSlot *primitives.Slot
|
||||
}
|
||||
@@ -554,11 +555,11 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not update head")
|
||||
logrus.WithError(err).Error("Could not update head")
|
||||
}
|
||||
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not insert node to forkchoice")
|
||||
logrus.WithError(err).Error("Could not insert node to forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -640,7 +641,7 @@ func (s *ChainService) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime mocks the same method in the chain service
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp time.Time) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
s.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
@@ -715,6 +716,17 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
|
||||
c.DataColumns = append(c.DataColumns, dc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumns implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -18,11 +15,8 @@ import (
|
||||
)
|
||||
|
||||
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1792480
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -69,17 +63,17 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := s.cfg.BeaconDB
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, err)
|
||||
fcs := doublylinkedtree.New()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt, ForkChoiceStore: fcs},
|
||||
wsVerifier: wv,
|
||||
}
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
s.cfg.WeakSubjectivityCheckpt = tt.checkpt
|
||||
s.wsVerifier = wv
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), cp.Epoch)
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(t.Context(), cp.Epoch)
|
||||
if tt.wantErr == nil {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
|
||||
@@ -24,7 +24,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
|
||||
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
@@ -87,7 +87,7 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock submits a blinded block to the builder relay network.
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -15,19 +14,19 @@ import (
|
||||
)
|
||||
|
||||
func Test_NewServiceWithBuilder(t *testing.T) {
|
||||
s, err := NewService(context.Background(), WithBuilderClient(&buildertesting.MockClient{}))
|
||||
s, err := NewService(t.Context(), WithBuilderClient(&buildertesting.MockClient{}))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, s.Configured())
|
||||
}
|
||||
|
||||
func Test_NewServiceWithoutBuilder(t *testing.T) {
|
||||
s, err := NewService(context.Background())
|
||||
s, err := NewService(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, s.Configured())
|
||||
}
|
||||
|
||||
func Test_RegisterValidator(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
db := dbtesting.SetupDB(t)
|
||||
headFetcher := &blockchainTesting.ChainService{}
|
||||
builder := buildertesting.NewClient()
|
||||
@@ -40,7 +39,7 @@ func Test_RegisterValidator(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_RegisterValidator_WithCache(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
headFetcher := &blockchainTesting.ChainService{}
|
||||
builder := buildertesting.NewClient()
|
||||
s, err := NewService(ctx, WithRegistrationCache(), WithHeadFetcher(headFetcher), WithBuilderClient(&builder))
|
||||
@@ -55,16 +54,16 @@ func Test_RegisterValidator_WithCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_BuilderMethodsWithouClient(t *testing.T) {
|
||||
s, err := NewService(context.Background())
|
||||
s, err := NewService(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, s.Configured())
|
||||
|
||||
_, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{})
|
||||
_, err = s.GetHeader(t.Context(), 0, [32]byte{}, [48]byte{})
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
|
||||
_, _, err = s.SubmitBlindedBlock(context.Background(), nil)
|
||||
_, _, err = s.SubmitBlindedBlock(t.Context(), nil)
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
|
||||
err = s.RegisterValidator(context.Background(), nil)
|
||||
err = s.RegisterValidator(t.Context(), nil)
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *MockBuilderService) Configured() bool {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
switch b.Version() {
|
||||
case version.Bellatrix:
|
||||
w, err := blocks.WrappedExecutionPayload(s.Payload)
|
||||
|
||||
11
beacon-chain/cache/committee.go
vendored
11
beacon-chain/cache/committee.go
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -104,11 +105,16 @@ func (c *CommitteeCache) CompressCommitteeCache() {
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *CommitteeCache) Committee(ctx context.Context, slot primitives.Slot, seed [32]byte, index primitives.CommitteeIndex) ([]primitives.ValidatorIndex, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "committeeCache.Committee")
|
||||
defer span.End()
|
||||
span.SetAttributes(trace.Int64Attribute("slot", int64(slot)), trace.Int64Attribute("index", int64(index))) // lint:ignore uintcast -- OK for tracing.
|
||||
|
||||
if err := c.checkInProgress(ctx, seed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, exists := c.CommitteeCache.Get(key(seed))
|
||||
span.SetAttributes(trace.BoolAttribute("cache_hit", exists))
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
@@ -157,11 +163,14 @@ func (c *CommitteeCache) AddCommitteeShuffledList(ctx context.Context, committee
|
||||
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(ctx context.Context, seed [32]byte) ([]primitives.ValidatorIndex, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "committeeCache.ActiveIndices")
|
||||
defer span.End()
|
||||
|
||||
if err := c.checkInProgress(ctx, seed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, exists := c.CommitteeCache.Get(key(seed))
|
||||
|
||||
span.SetAttributes(trace.BoolAttribute("cache_hit", exists))
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
|
||||
9
beacon-chain/cache/committee_fuzz_test.go
vendored
9
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -3,7 +3,6 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -30,8 +29,8 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
|
||||
_, err := cache.Committee(context.Background(), 0, c.Seed, 0)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
_, err := cache.Committee(t.Context(), 0, c.Seed, 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -45,9 +44,9 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
|
||||
indices, err := cache.ActiveIndices(context.Background(), c.Seed)
|
||||
indices, err := cache.ActiveIndices(t.Context(), c.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, c.SortedIndices, indices)
|
||||
}
|
||||
|
||||
28
beacon-chain/cache/committee_test.go
vendored
28
beacon-chain/cache/committee_test.go
vendored
@@ -44,15 +44,15 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
|
||||
slot := params.BeaconConfig().SlotsPerEpoch
|
||||
committeeIndex := primitives.CommitteeIndex(1)
|
||||
indices, err := cache.Committee(context.Background(), slot, item.Seed, committeeIndex)
|
||||
indices, err := cache.Committee(t.Context(), slot, item.Seed, committeeIndex)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee not to exist in empty cache")
|
||||
}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
|
||||
|
||||
wantedIndex := primitives.CommitteeIndex(0)
|
||||
indices, err = cache.Committee(context.Background(), slot, item.Seed, wantedIndex)
|
||||
indices, err = cache.Committee(t.Context(), slot, item.Seed, wantedIndex)
|
||||
require.NoError(t, err)
|
||||
|
||||
start, end := startEndIndices(item, uint64(wantedIndex))
|
||||
@@ -63,15 +63,15 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(context.Background(), item.Seed)
|
||||
indices, err := cache.ActiveIndices(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee not to exist in empty cache")
|
||||
}
|
||||
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
|
||||
|
||||
indices, err = cache.ActiveIndices(context.Background(), item.Seed)
|
||||
indices, err = cache.ActiveIndices(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, item.SortedIndices, indices)
|
||||
}
|
||||
@@ -80,13 +80,13 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
count, err := cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
count, err := cache.ActiveIndicesCount(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
|
||||
|
||||
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
count, err = cache.ActiveIndicesCount(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(item.SortedIndices), count)
|
||||
}
|
||||
@@ -100,7 +100,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
for i := start; i < end; i++ {
|
||||
s := []byte(strconv.Itoa(i))
|
||||
item := &Committees{Seed: bytesutil.ToBytes32(s)}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.Keys()
|
||||
@@ -130,7 +130,7 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
_ = cache.CommitteeCache.Add(key, comms)
|
||||
|
||||
_, err = cache.Committee(context.Background(), 0, seed, math.MaxUint64) // Overflow!
|
||||
_, err = cache.Committee(t.Context(), 0, seed, math.MaxUint64) // Overflow!
|
||||
require.NotNil(t, err, "Did not fail as expected")
|
||||
}
|
||||
|
||||
@@ -138,15 +138,15 @@ func TestCommitteeCache_DoesNothingWhenCancelledContext(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
count, err := cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
count, err := cache.ActiveIndicesCount(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
|
||||
cancelled, cancel := context.WithCancel(context.Background())
|
||||
cancelled, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
require.ErrorIs(t, cache.AddCommitteeShuffledList(cancelled, item), context.Canceled)
|
||||
|
||||
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
count, err = cache.ActiveIndicesCount(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package depositsnapshot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
@@ -55,7 +54,7 @@ func TestAllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
}
|
||||
dc.deposits = deposits
|
||||
|
||||
d := dc.AllDeposits(context.Background(), nil)
|
||||
d := dc.AllDeposits(t.Context(), nil)
|
||||
assert.Equal(t, len(deposits), len(d))
|
||||
}
|
||||
|
||||
@@ -95,7 +94,7 @@ func TestAllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
|
||||
}
|
||||
dc.deposits = deposits
|
||||
|
||||
d := dc.AllDeposits(context.Background(), big.NewInt(11))
|
||||
d := dc.AllDeposits(t.Context(), big.NewInt(11))
|
||||
assert.Equal(t, 5, len(d))
|
||||
}
|
||||
|
||||
@@ -127,7 +126,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(13))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(13))
|
||||
assert.Equal(t, 4, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
@@ -143,7 +142,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10))
|
||||
assert.Equal(t, 1, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
@@ -169,7 +168,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10))
|
||||
assert.Equal(t, 2, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
@@ -185,7 +184,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(7))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(7))
|
||||
assert.Equal(t, 0, int(n))
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, root)
|
||||
})
|
||||
@@ -201,7 +200,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10))
|
||||
assert.Equal(t, 1, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
@@ -237,7 +236,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(9))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(9))
|
||||
assert.Equal(t, 3, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
@@ -288,10 +287,10 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
dc.InsertDepositContainers(context.Background(), ctrs)
|
||||
dc.InsertDepositContainers(t.Context(), ctrs)
|
||||
|
||||
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
|
||||
dep, blkNum := dc.DepositByPubkey(context.Background(), pk1)
|
||||
dep, blkNum := dc.DepositByPubkey(t.Context(), pk1)
|
||||
|
||||
if dep == nil || !bytes.Equal(dep.Data.PublicKey, pk1) {
|
||||
t.Error("Returned wrong deposit")
|
||||
@@ -303,7 +302,7 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
func TestInsertDepositContainers_NotNil(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
dc.InsertDepositContainers(context.Background(), nil)
|
||||
dc.InsertDepositContainers(t.Context(), nil)
|
||||
assert.DeepEqual(t, []*ethpb.DepositContainer{}, dc.deposits)
|
||||
}
|
||||
|
||||
@@ -359,10 +358,10 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
err = dc.finalizedDeposits.depositTree.pushLeaf(root)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex())
|
||||
@@ -425,15 +424,15 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
err = dc.finalizedDeposits.Deposits().Insert(root[:], 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
|
||||
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
require.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex())
|
||||
@@ -459,10 +458,10 @@ func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex())
|
||||
@@ -509,10 +508,10 @@ func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex())
|
||||
@@ -592,14 +591,14 @@ func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reinsert finalized deposits with a lower index.
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex())
|
||||
@@ -670,10 +669,10 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
Index: 3,
|
||||
DepositRoot: rootCreator('D'),
|
||||
})
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
|
||||
deps := dc.NonFinalizedDeposits(t.Context(), 1, nil)
|
||||
assert.Equal(t, 2, len(deps))
|
||||
}
|
||||
|
||||
@@ -681,7 +680,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits_Nil(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 0, nil)
|
||||
deps := dc.NonFinalizedDeposits(t.Context(), 0, nil)
|
||||
assert.Equal(t, 0, len(deps))
|
||||
}
|
||||
|
||||
@@ -740,10 +739,10 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
Index: 3,
|
||||
DepositRoot: rootCreator('D'),
|
||||
})
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
|
||||
deps := dc.NonFinalizedDeposits(t.Context(), 1, big.NewInt(10))
|
||||
assert.Equal(t, 1, len(deps))
|
||||
}
|
||||
|
||||
@@ -785,21 +784,21 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a nonsensical ordering
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 3, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 4, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 4, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 4, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 4, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Mimic finalized deposit trie fetch.
|
||||
fd, err := dc.FinalizedDeposits(context.Background())
|
||||
fd, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), nil)
|
||||
deps := dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), nil)
|
||||
insertIndex := fd.MerkleTrieIndex() + 1
|
||||
|
||||
for _, dep := range deps {
|
||||
@@ -810,24 +809,24 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 6, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 6, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 9, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 9, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 12, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 12, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 15, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 15, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 15, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 15, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 14, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 14, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
fd, err = dc.FinalizedDeposits(context.Background())
|
||||
fd, err = dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), nil)
|
||||
deps = dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), nil)
|
||||
insertIndex = fd.MerkleTrieIndex() + 1
|
||||
|
||||
for _, dep := range dc.deposits {
|
||||
@@ -888,9 +887,9 @@ func TestMin(t *testing.T) {
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
fd, err := dc.FinalizedDeposits(context.Background())
|
||||
fd, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), big.NewInt(16))
|
||||
deps := dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), big.NewInt(16))
|
||||
insertIndex := fd.MerkleTrieIndex() + 1
|
||||
for _, dep := range deps {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
@@ -908,28 +907,28 @@ func TestDepositMap_WorksCorrectly(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
pk0 := bytesutil.PadTo([]byte("pk0"), 48)
|
||||
dep, _ := dc.DepositByPubkey(context.Background(), pk0)
|
||||
dep, _ := dc.DepositByPubkey(t.Context(), pk0)
|
||||
var nilDep *ethpb.Deposit
|
||||
assert.DeepEqual(t, nilDep, dep)
|
||||
|
||||
dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 1000}}
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 0, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), dep, 1000, 0, [32]byte{}))
|
||||
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
|
||||
dep, _ = dc.DepositByPubkey(t.Context(), pk0)
|
||||
assert.NotEqual(t, nilDep, dep)
|
||||
assert.Equal(t, uint64(1000), dep.Data.Amount)
|
||||
|
||||
dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 10000}}
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 1, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), dep, 1000, 1, [32]byte{}))
|
||||
|
||||
// Make sure we have the same deposit returned over here.
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
|
||||
dep, _ = dc.DepositByPubkey(t.Context(), pk0)
|
||||
assert.NotEqual(t, nilDep, dep)
|
||||
assert.Equal(t, uint64(1000), dep.Data.Amount)
|
||||
|
||||
// Make sure another key doesn't work.
|
||||
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk1)
|
||||
dep, _ = dc.DepositByPubkey(t.Context(), pk1)
|
||||
assert.DeepEqual(t, nilDep, dep)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
@@ -13,14 +12,14 @@ var _ PendingDepositsFetcher = (*Cache)(nil)
|
||||
|
||||
func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := Cache{}
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, 111, 100, [32]byte{})
|
||||
dc.InsertPendingDeposit(t.Context(), ðpb.Deposit{}, 111, 100, [32]byte{})
|
||||
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit not inserted")
|
||||
}
|
||||
|
||||
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
dc := Cache{}
|
||||
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
|
||||
dc.InsertPendingDeposit(t.Context(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
|
||||
|
||||
assert.Equal(t, 0, len(dc.pendingDeposits))
|
||||
}
|
||||
@@ -34,13 +33,13 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
{Eth1BlockHeight: 6, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}},
|
||||
}
|
||||
|
||||
deposits := dc.PendingDeposits(context.Background(), big.NewInt(4))
|
||||
deposits := dc.PendingDeposits(t.Context(), big.NewInt(4))
|
||||
expected := []*ethpb.Deposit{
|
||||
{Proof: [][]byte{[]byte("A")}},
|
||||
{Proof: [][]byte{[]byte("B")}},
|
||||
}
|
||||
assert.DeepSSZEqual(t, expected, deposits)
|
||||
|
||||
all := dc.PendingDeposits(context.Background(), nil)
|
||||
all := dc.PendingDeposits(t.Context(), nil)
|
||||
assert.Equal(t, len(dc.pendingDeposits), len(all), "PendingDeposits(ctx, nil) did not return all deposits")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -22,7 +21,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
dc.PrunePendingDeposits(t.Context(), 0)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
@@ -46,7 +45,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
dc.PrunePendingDeposits(t.Context(), 6)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
@@ -65,7 +64,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
dc.PrunePendingDeposits(t.Context(), 10)
|
||||
expected = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
@@ -86,7 +85,7 @@ func TestPruneAllPendingDeposits(t *testing.T) {
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PruneAllPendingDeposits(context.Background())
|
||||
dc.PruneAllPendingDeposits(t.Context())
|
||||
expected := []*ethpb.DepositContainer{}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
@@ -128,10 +127,10 @@ func TestPruneProofs_Ok(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
require.NoError(t, dc.PruneProofs(t.Context(), 1))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
@@ -173,10 +172,10 @@ func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
require.NoError(t, dc.PruneProofs(t.Context(), 2))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
}
|
||||
@@ -217,10 +216,10 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
require.NoError(t, dc.PruneProofs(t.Context(), 99))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
@@ -264,10 +263,10 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
require.NoError(t, dc.PruneProofs(t.Context(), 4))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
@@ -311,10 +310,10 @@ func TestPruneAllProofs(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
dc.PruneAllProofs(context.Background())
|
||||
dc.PruneAllProofs(t.Context())
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
|
||||
5
beacon-chain/cache/registration_test.go
vendored
5
beacon-chain/cache/registration_test.go
vendored
@@ -1,7 +1,6 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -24,7 +23,7 @@ func TestRegistrationCache(t *testing.T) {
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: pubkey,
|
||||
}
|
||||
cache.UpdateIndexToRegisteredMap(context.Background(), m)
|
||||
cache.UpdateIndexToRegisteredMap(t.Context(), m)
|
||||
reg, err := cache.RegistrationByIndex(validatorIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(reg.Pubkey), string(pubkey))
|
||||
@@ -38,7 +37,7 @@ func TestRegistrationCache(t *testing.T) {
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: pubkey,
|
||||
}
|
||||
cache.UpdateIndexToRegisteredMap(context.Background(), m)
|
||||
cache.UpdateIndexToRegisteredMap(t.Context(), m)
|
||||
reg, err := cache.RegistrationByIndex(validatorIndex2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(reg.Pubkey), string(pubkey))
|
||||
|
||||
5
beacon-chain/cache/skip_slot_cache_test.go
vendored
5
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -1,7 +1,6 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@@ -14,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
c := cache.NewSkipSlotCache()
|
||||
|
||||
r := [32]byte{'a'}
|
||||
@@ -38,7 +37,7 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
c := cache.NewSkipSlotCache()
|
||||
|
||||
r := [32]byte{'a'}
|
||||
|
||||
2
beacon-chain/cache/sync_committee.go
vendored
2
beacon-chain/cache/sync_committee.go
vendored
@@ -178,7 +178,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if clearCount != s.cleared.Load() {
|
||||
log.Warn("cache rotated during async committee update operation - abandoning cache update")
|
||||
log.Warn("Cache rotated during async committee update operation - abandoning cache update")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -81,6 +81,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/fuzz:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
@@ -19,9 +18,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/fuzz"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
fuzz "github.com/google/gofuzz"
|
||||
gofuzz "github.com/google/gofuzz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
|
||||
)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -110,13 +110,13 @@ func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
|
||||
want := "source check point not equal to current justified checkpoint"
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
b.Block.Body.Attestations[0].Data.Source.Epoch = time.CurrentEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
|
||||
wsb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -151,14 +151,14 @@ func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
|
||||
want := "source check point not equal to previous justified checkpoint"
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
b.Block.Body.Attestations[0].Data.Source.Epoch = time.PrevEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Target.Epoch = time.PrevEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
|
||||
wsb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -190,7 +190,7 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
|
||||
expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4"
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, expected, err)
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
|
||||
committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, 0)
|
||||
require.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
@@ -235,7 +235,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
@@ -260,7 +260,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
|
||||
committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, 0)
|
||||
require.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
@@ -281,7 +281,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
@@ -313,13 +313,13 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
|
||||
b, err := helpers.TotalActiveBalance(beaconState)
|
||||
require.NoError(t, err)
|
||||
beaconState, err = altair.ProcessAttestationNoVerifySignature(context.Background(), beaconState, att, b)
|
||||
beaconState, err = altair.ProcessAttestationNoVerifySignature(t.Context(), beaconState, att, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
p, err := beaconState.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
indices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
@@ -458,7 +458,7 @@ func TestValidatorFlag_Add_ExceedsLength(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
st := ðpb.BeaconStateAltair{}
|
||||
b := ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{}}
|
||||
for i := 0; i < 10000; i++ {
|
||||
@@ -474,10 +474,11 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessAttestationsNoVerifySignature(context.Background(), s, wsb.Block())
|
||||
r, err := altair.ProcessAttestationsNoVerifySignature(t.Context(), s, wsb.Block())
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, s, b)
|
||||
}
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -555,10 +556,10 @@ func TestSetParticipationAndRewardProposer(t *testing.T) {
|
||||
|
||||
b, err := helpers.TotalActiveBalance(beaconState)
|
||||
require.NoError(t, err)
|
||||
st, err := altair.SetParticipationAndRewardProposer(context.Background(), beaconState, test.epoch, test.indices, test.participatedFlags, b)
|
||||
st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
i, err := helpers.BeaconProposerIndex(context.Background(), st)
|
||||
i, err := helpers.BeaconProposerIndex(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
b, err = beaconState.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
@@ -661,8 +662,8 @@ func TestRewardProposer(t *testing.T) {
|
||||
{rewardNumerator: 1000000000000, want: 34234377253},
|
||||
}
|
||||
for _, test := range tests {
|
||||
require.NoError(t, altair.RewardProposer(context.Background(), beaconState, test.rewardNumerator))
|
||||
i, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
|
||||
require.NoError(t, altair.RewardProposer(t.Context(), beaconState, test.rewardNumerator))
|
||||
i, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
b, err := beaconState.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
@@ -26,7 +25,7 @@ import (
|
||||
func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
|
||||
|
||||
@@ -34,7 +33,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
|
||||
for i := range syncBits {
|
||||
syncBits[i] = 0xff
|
||||
}
|
||||
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
ps := slots.PrevSlot(beaconState.Slot())
|
||||
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
|
||||
@@ -55,7 +54,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
|
||||
}
|
||||
|
||||
var reward uint64
|
||||
beaconState, reward, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
|
||||
beaconState, reward, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(72192), reward)
|
||||
|
||||
@@ -77,7 +76,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
|
||||
require.Equal(t, true, balances[indices[0]] > balances[nonSyncIndex])
|
||||
|
||||
// Proposer should be more profitable than rest of the sync committee
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, balances[proposerIndex] > balances[indices[0]])
|
||||
|
||||
@@ -102,7 +101,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
|
||||
func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
|
||||
|
||||
@@ -110,7 +109,7 @@ func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) {
|
||||
for i := range syncBits {
|
||||
syncBits[i] = 0xAA
|
||||
}
|
||||
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
ps := slots.PrevSlot(beaconState.Slot())
|
||||
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
|
||||
@@ -130,14 +129,14 @@ func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) {
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
|
||||
_, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
|
||||
_, _, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate)
|
||||
require.ErrorContains(t, "invalid sync committee signature", err)
|
||||
}
|
||||
|
||||
func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
|
||||
|
||||
@@ -145,7 +144,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
|
||||
for i := range syncBits {
|
||||
syncBits[i] = 0xAA
|
||||
}
|
||||
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
ps := slots.PrevSlot(beaconState.Slot())
|
||||
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
|
||||
@@ -167,7 +166,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
|
||||
_, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
|
||||
_, _, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -175,7 +174,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
|
||||
func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
committeeKeys := committee.Pubkeys
|
||||
committeeKeys[1] = committeeKeys[0]
|
||||
@@ -192,7 +191,7 @@ func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
|
||||
SyncCommitteeBits: syncBits,
|
||||
}
|
||||
require.NoError(t, beaconState.UpdateBalancesAtIndex(idx, 0))
|
||||
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
|
||||
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(t.Context(), beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 511, len(votedKeys))
|
||||
require.DeepEqual(t, committeeKeys[0], votedKeys[0].Marshal())
|
||||
@@ -203,7 +202,7 @@ func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
|
||||
func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
|
||||
|
||||
@@ -215,7 +214,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
SyncCommitteeBits: syncBits,
|
||||
}
|
||||
|
||||
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
|
||||
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(t.Context(), beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
votedMap := make(map[[fieldparams.BLSPubkeyLength]byte]bool)
|
||||
for _, key := range votedKeys {
|
||||
@@ -228,7 +227,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
committeeKeys := currentSyncCommittee.Pubkeys
|
||||
balances := st.Balances()
|
||||
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(syncBits); i++ {
|
||||
@@ -254,7 +253,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
func Test_VerifySyncCommitteeSig(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
|
||||
|
||||
@@ -262,7 +261,7 @@ func Test_VerifySyncCommitteeSig(t *testing.T) {
|
||||
for i := range syncBits {
|
||||
syncBits[i] = 0xff
|
||||
}
|
||||
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
ps := slots.PrevSlot(beaconState.Slot())
|
||||
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/fuzz"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
fuzz "github.com/google/gofuzz"
|
||||
gofuzz "github.com/google/gofuzz"
|
||||
)
|
||||
|
||||
func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
@@ -27,14 +27,15 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposits)
|
||||
}
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
@@ -45,14 +46,15 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
@@ -63,11 +65,12 @@ func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
@@ -80,11 +83,12 @@ func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
@@ -97,5 +101,6 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
@@ -42,7 +41,7 @@ func TestProcessDeposits_SameValidatorMultipleDepositsSameBlock(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{dep[0], dep[1], dep[2]})
|
||||
newState, err := altair.ProcessDeposits(t.Context(), beaconState, []*ethpb.Deposit{dep[0], dep[1], dep[2]})
|
||||
require.NoError(t, err, "Expected block deposits to process correctly")
|
||||
require.Equal(t, 2, len(newState.Validators()), "Incorrect validator count")
|
||||
}
|
||||
@@ -70,7 +69,7 @@ func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{dep[0]})
|
||||
newState, err := altair.ProcessDeposits(t.Context(), beaconState, []*ethpb.Deposit{dep[0]})
|
||||
require.NoError(t, err, "Expected block deposits to process correctly")
|
||||
if newState.Balances()[1] != dep[0].Data.Amount {
|
||||
t.Errorf(
|
||||
@@ -127,7 +126,7 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{deposit})
|
||||
newState, err := altair.ProcessDeposits(t.Context(), beaconState, []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err, "Process deposit failed")
|
||||
require.Equal(t, uint64(1000+50), newState.Balances()[1], "Expected balance at index 1 to be 1050")
|
||||
}
|
||||
@@ -256,7 +255,7 @@ func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessPreGenesisDeposits(context.Background(), beaconState, dep)
|
||||
newState, err := altair.ProcessPreGenesisDeposits(t.Context(), beaconState, dep)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
_, ok := newState.ValidatorIndexByPubkey(bytesutil.ToBytes48(dep[0].Data.PublicKey))
|
||||
@@ -370,6 +369,6 @@ func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
want := "deposit root did not verify"
|
||||
_, err = altair.ProcessDeposits(context.Background(), beaconState, b.Block.Body.Deposits)
|
||||
_, err = altair.ProcessDeposits(t.Context(), beaconState, b.Block.Body.Deposits)
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
@@ -32,7 +31,7 @@ func TestInitializeEpochValidators_Ok(t *testing.T) {
|
||||
InactivityScores: []uint64{0, 1, 2, 3},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
v, b, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
v, b, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
IsSlashed: true,
|
||||
@@ -74,7 +73,7 @@ func TestInitializeEpochValidators_Overflow(t *testing.T) {
|
||||
InactivityScores: []uint64{0, 1},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = InitializePrecomputeValidators(context.Background(), s)
|
||||
_, _, err = InitializePrecomputeValidators(t.Context(), s)
|
||||
require.ErrorContains(t, "could not read every validator: addition overflows", err)
|
||||
}
|
||||
|
||||
@@ -84,16 +83,16 @@ func TestInitializeEpochValidators_BadState(t *testing.T) {
|
||||
InactivityScores: []uint64{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = InitializePrecomputeValidators(context.Background(), s)
|
||||
_, _, err = InitializePrecomputeValidators(t.Context(), s)
|
||||
require.ErrorContains(t, "num of validators is different than num of inactivity scores", err)
|
||||
}
|
||||
|
||||
func TestProcessEpochParticipation(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
@@ -169,9 +168,9 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
InactivityScores: []uint64{0, 0, 0},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), st)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), st, balance, validators)
|
||||
validators, balance, err = ProcessEpochParticipation(t.Context(), st, balance, validators)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
IsActiveCurrentEpoch: false,
|
||||
@@ -209,9 +208,9 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
func TestAttestationsDelta(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
deltas, err := AttestationsDelta(s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
@@ -247,9 +246,9 @@ func TestAttestationsDelta(t *testing.T) {
|
||||
func TestAttestationsDeltaBellatrix(t *testing.T) {
|
||||
s, err := testStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
deltas, err := AttestationsDelta(s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
@@ -285,9 +284,9 @@ func TestAttestationsDeltaBellatrix(t *testing.T) {
|
||||
func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
@@ -324,9 +323,9 @@ func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
|
||||
func TestProcessRewardsAndPenaltiesPrecompute_InactivityLeak(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
sCopy := s.Copy()
|
||||
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
|
||||
@@ -352,11 +351,11 @@ func TestProcessInactivityScores_CanProcessInactivityLeak(t *testing.T) {
|
||||
defaultScore := uint64(5)
|
||||
require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore}))
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().MinEpochsToInactivityPenalty+2)))
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
s, _, err = ProcessInactivityScores(context.Background(), s, validators)
|
||||
s, _, err = ProcessInactivityScores(t.Context(), s, validators)
|
||||
require.NoError(t, err)
|
||||
inactivityScores, err := s.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
@@ -373,11 +372,11 @@ func TestProcessInactivityScores_GenesisEpoch(t *testing.T) {
|
||||
defaultScore := uint64(10)
|
||||
require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore}))
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().GenesisSlot))
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
s, _, err = ProcessInactivityScores(context.Background(), s, validators)
|
||||
s, _, err = ProcessInactivityScores(t.Context(), s, validators)
|
||||
require.NoError(t, err)
|
||||
inactivityScores, err := s.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
@@ -392,11 +391,11 @@ func TestProcessInactivityScores_CanProcessNonInactivityLeak(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defaultScore := uint64(5)
|
||||
require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore}))
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
s, _, err = ProcessInactivityScores(context.Background(), s, validators)
|
||||
s, _, err = ProcessInactivityScores(t.Context(), s, validators)
|
||||
require.NoError(t, err)
|
||||
inactivityScores, err := s.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
@@ -410,9 +409,9 @@ func TestProcessInactivityScores_CanProcessNonInactivityLeak(t *testing.T) {
|
||||
func TestProcessRewardsAndPenaltiesPrecompute_GenesisEpoch(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
|
||||
@@ -429,9 +428,9 @@ func TestProcessRewardsAndPenaltiesPrecompute_GenesisEpoch(t *testing.T) {
|
||||
func TestProcessRewardsAndPenaltiesPrecompute_BadState(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
_, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
_, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
_, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, []*precompute.Validator{})
|
||||
require.ErrorContains(t, "validator registries not the same length as state's validator registries", err)
|
||||
@@ -442,7 +441,7 @@ func TestProcessInactivityScores_NonEligibleValidator(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defaultScore := uint64(5)
|
||||
require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore}))
|
||||
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
|
||||
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
|
||||
// v0 is eligible (not active previous epoch, slashed and not withdrawable)
|
||||
@@ -463,9 +462,9 @@ func TestProcessInactivityScores_NonEligibleValidator(t *testing.T) {
|
||||
// v3 is eligible (active previous epoch)
|
||||
validators[3].IsActivePrevEpoch = true
|
||||
|
||||
validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
s, _, err = ProcessInactivityScores(context.Background(), s, validators)
|
||||
s, _, err = ProcessInactivityScores(t.Context(), s, validators)
|
||||
require.NoError(t, err)
|
||||
inactivityScores, err := s.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
@@ -29,7 +28,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
|
||||
BodyRoot: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
}
|
||||
require.NoError(t, s.SetLatestBlockHeader(h))
|
||||
postState, err := altair.ProcessSyncCommitteeUpdates(context.Background(), s)
|
||||
postState, err := altair.ProcessSyncCommitteeUpdates(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
current, err := postState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
@@ -38,7 +37,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
|
||||
require.DeepEqual(t, current, next)
|
||||
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s)
|
||||
postState, err = altair.ProcessSyncCommitteeUpdates(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
c, err := postState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
@@ -48,7 +47,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
|
||||
require.DeepEqual(t, next, n)
|
||||
|
||||
require.NoError(t, s.SetSlot(primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch-1))
|
||||
postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s)
|
||||
postState, err = altair.ProcessSyncCommitteeUpdates(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
c, err = postState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
@@ -61,7 +60,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
|
||||
// Test boundary condition.
|
||||
slot := params.BeaconConfig().SlotsPerEpoch * primitives.Slot(time.CurrentEpoch(s)+params.BeaconConfig().EpochsPerSyncCommitteePeriod)
|
||||
require.NoError(t, s.SetSlot(slot))
|
||||
boundaryCommittee, err := altair.NextSyncCommittee(context.Background(), s)
|
||||
boundaryCommittee, err := altair.NextSyncCommittee(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, boundaryCommittee, n)
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func Test_BaseReward(t *testing.T) {
|
||||
valIdx: 2,
|
||||
st: genState(1),
|
||||
want: 0,
|
||||
errString: "validator index 2 does not exist",
|
||||
errString: "index 2 out of bounds",
|
||||
},
|
||||
{
|
||||
name: "active balance is 32eth",
|
||||
@@ -89,7 +89,7 @@ func Test_BaseRewardWithTotalBalance(t *testing.T) {
|
||||
valIdx: 2,
|
||||
activeBalance: 1,
|
||||
want: 0,
|
||||
errString: "validator index 2 does not exist",
|
||||
errString: "index 2 out of bounds",
|
||||
},
|
||||
{
|
||||
name: "active balance is 1",
|
||||
|
||||
@@ -217,15 +217,15 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
|
||||
// ValidateSyncMessageTime validates sync message to ensure that the provided slot is valid.
|
||||
// Spec: [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot
|
||||
func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
if err := slots.ValidateClock(slot, uint64(genesisTime.Unix())); err != nil {
|
||||
if err := slots.ValidateClock(slot, genesisTime); err != nil {
|
||||
return err
|
||||
}
|
||||
messageTime, err := slots.ToTime(uint64(genesisTime.Unix()), slot)
|
||||
messageTime, err := slots.StartTime(genesisTime, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSlot := slots.Since(genesisTime)
|
||||
slotStartTime, err := slots.ToTime(uint64(genesisTime.Unix()), currentSlot)
|
||||
currentSlot := slots.CurrentSlot(genesisTime)
|
||||
slotStartTime, err := slots.StartTime(genesisTime, currentSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -97,7 +96,7 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
st := getState(t, tt.args.validatorCount, v)
|
||||
got, err := altair.NextSyncCommitteeIndices(context.Background(), st)
|
||||
got, err := altair.NextSyncCommitteeIndices(t.Context(), st)
|
||||
if tt.wantErr {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -129,18 +128,18 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
}
|
||||
|
||||
st := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
got1, err := altair.NextSyncCommitteeIndices(context.Background(), st)
|
||||
got1, err := altair.NextSyncCommitteeIndices(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
got2, err := altair.NextSyncCommitteeIndices(context.Background(), st)
|
||||
got2, err := altair.NextSyncCommitteeIndices(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, got1, got2)
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)))
|
||||
got2, err = altair.NextSyncCommitteeIndices(context.Background(), st)
|
||||
got2, err = altair.NextSyncCommitteeIndices(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, got1, got2)
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)))
|
||||
got2, err = altair.NextSyncCommitteeIndices(context.Background(), st)
|
||||
got2, err = altair.NextSyncCommitteeIndices(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, got1, got2)
|
||||
}
|
||||
@@ -206,7 +205,7 @@ func TestSyncCommittee_CanGet(t *testing.T) {
|
||||
if !tt.wantErr {
|
||||
require.NoError(t, tt.args.state.SetSlot(primitives.Slot(tt.args.epoch)*params.BeaconConfig().SlotsPerEpoch))
|
||||
}
|
||||
got, err := altair.NextSyncCommittee(context.Background(), tt.args.state)
|
||||
got, err := altair.NextSyncCommittee(t.Context(), tt.args.state)
|
||||
if tt.wantErr {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -270,7 +269,7 @@ func TestValidateNilSyncContribution(t *testing.T) {
|
||||
func TestSyncSubCommitteePubkeys_CanGet(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
st := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
com, err := altair.NextSyncCommittee(context.Background(), st)
|
||||
com, err := altair.NextSyncCommittee(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
sub, err := altair.SyncSubCommitteePubkeys(com, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
@@ -13,7 +12,7 @@ import (
|
||||
func TestProcessEpoch_CanProcess(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
err := altair.ProcessEpoch(context.Background(), st)
|
||||
err := altair.ProcessEpoch(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
@@ -45,7 +44,7 @@ func TestProcessEpoch_CanProcess(t *testing.T) {
|
||||
func TestProcessEpoch_CanProcessBellatrix(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
err := altair.ProcessEpoch(context.Background(), st)
|
||||
err := altair.ProcessEpoch(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user